file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish(); |
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if !auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
}
}
}
return false;
} | }
}; | random_line_split |
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web | oad, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if !auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
}
}
}
return false;
}
| ::Payl | identifier_name |
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if !auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
|
}
| }
}
}
return false; | conditional_block |
main.rs | use crate::argon2id13::Salt;
use actix_web::{get, post, web, HttpRequest, HttpResponse};
use aes_gcm::aead::{Aead, NewAead};
use aes_gcm::{Aes256Gcm, Key, Nonce};
use futures::StreamExt;
use google_authenticator::{ErrorCorrectionLevel, GoogleAuthenticator};
use hmac::{Hmac, Mac, NewMac};
use lazy_static::lazy_static;
use rand_core::{OsRng, RngCore};
use serde::{Deserialize, Serialize};
use serde_json;
use sha2::Sha256;
use sodiumoxide::crypto::pwhash::argon2id13;
use std::collections::HashMap;
use std::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifie | main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if !auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
}
}
}
return false;
}
| r le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn | identifier_body |
4_orient_grasping.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import rospy
import copy, math
import threading
import time
import tf
from math import pi, radians, degrees, atan2, sqrt
from moveit_commander import MoveGroupCommander, RobotCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from moveit_commander.conversions import pose_to_list
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, DisplayTrajectory
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from math import pi
#GROUP_NAME_GRIPPER = "NAME OF GRIPPER"
roscpp_initialize(sys.argv)
rospy.init_node('control_Husky_UR3', anonymous=True)
robot = RobotCommander()
scene = PlanningSceneInterface()
##모바일 파트 관련 변수 선언
x = 0.0
y = 0.0
theta = 0.0
## 매니퓰레이터 변수 선언
group_name = "ur3_manipulator"
move_group = MoveGroupCommander(group_name)
FIXED_FRAME = 'world'
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
def move_Joint(q1,q2,q3,q4,q5,q6):
joint_goal = move_group.get_current_joint_values()
mobile_joints = [-pi/3, 0.5]
joint_goal_list = [q1,q2,q3,q4,q5,q6]
#매니퓰레이터 관절 value 설정
joint_goal[0] = joint_goal_list[0]
joint_goal[1] = joint_goal_list[1]
joint_goal[2] = joint_goal_list[2]
joint_goal[3] = joint_goal_list[3]
joint_goal[4] = joint_goal_list[4]
joint_goal[5] = joint_goal_list[5]
#제어시작
move_group.go(joint_goal, wait=True)
def get_TF(a,b):
end_flag = 0
listener = tf.TransformListener()
while end_flag ==0:
try:
(trans,rot) = listener.lookupTransform(a,b, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
end_flag = 1
return trans,rot
def move_ee(Px,Py,Pz,Rx,Ry,Rz,Rw):
trans,rot = get_TF('/odom','/base_link')
print('TF from odom to base link :',trans)
x = Px-trans[0]
y = Py-trans[1]
z = Pz-trans[2]
Ox = Rx
Oy = Ry
Oz = Rz-rot[2]
Ow = Rw
print 'real_planning_pose',x,y,z,Ox,Oy,Oz,Ow
print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
| (wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_path_planner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0)
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
#move_Joint(1.63317876240784 ,0.285981577941942 ,-1.57592439233013 ,-0.472776683731581 ,2.82134996965689 ,2.93965970526083)
#move_Joint(1.65163641729639 ,0.202937041530315 ,-1.05397766677144 ,-2.29055198297394 ,3.05995622779418 ,1.57079637373908)
#move_Joint(1.63317874347654 ,-0.210429202660752 ,-1.10151162936461 ,-0.0669323613463442 ,-2.82134998229054 ,-2.93965974902066)
#move_Joint(1.57882340096811 ,-0.312808525716150 ,-1.00858459420407 ,-0.237259591096372 ,-2.51322054719748 ,-3.12658213005787)
#move_Joint(1.49234990741608 ,-1.25023043364197 ,0.884986270249331 ,-1.26185202847346 ,-2.19692733997135 ,3.04571337230654)
#move_Joint(1.38187898379176 ,-1.24138218321594 ,0.770320266997513 ,-1.16042411723514 ,-1.87922844910760 ,2.94403457922216)
#move_Joint(1.26044305157603 ,-1.22046262619007 ,0.674930245153225 ,-1.02526396256805 ,-1.57079631704406 ,2.83203570141689)
#move_Joint(1.14354220642811 ,-1.18364979561972 ,0.595829299678309 ,-0.849373924600574 ,-1.28560567284022 ,2.69592568050081)
#move_Joint(1.04448751501302 ,-1.11991084380463 ,0.510072952763684 ,-0.611447209983393 ,-1.03738511473967 ,2.51967923264632)
#move_Joint(0.971282836139718 ,-0.999323999902423 ,0.355694706708534 ,-0.267416174488388 ,-0.838880551835716 ,2.28194780565803)
#move_Joint(-1.63477141196796 ,-1.91133122955846 ,-1.10151156835317 ,-1.89146935483124 ,2.82134997161739 ,2.93965969942271)
#move_Joint(-1.65322906921999 ,-2.36794446930605 ,-1.05397760826863 ,0.280329388719014 ,-3.05995623288959 ,-1.57079636143505)
#move_Joint(-1.63477141766021 ,-1.98520583742980 ,-1.57592434796521 ,-0.959335599677850 ,-2.82134996914004 ,-2.93965967867828)
#move_Joint(-1.58041604836938 ,-2.00129787870086 ,-1.68144318432848 ,-1.01750430999134 ,-2.51322054568872 ,-3.12658213472640)
#move_Joint(-1.49394257050708 ,2.63601745038978 ,1.77749927268549 ,-2.89902024472062 ,-2.19692733754341 ,3.04571339154885)
#move_Joint(-1.38347163064872 ,2.54662686543240 ,1.84587116794395 ,-2.88239141905799 ,-1.87922844943615 ,2.94403456872028)
#move_Joint(-1.26203572966623 ,2.49325324698662 ,1.87618006889619 ,-2.79863698534015 ,-1.57079632703325 ,2.83203572396531)
| if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy | conditional_block |
4_orient_grasping.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import rospy
import copy, math
import threading
import time
import tf
from math import pi, radians, degrees, atan2, sqrt
from moveit_commander import MoveGroupCommander, RobotCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from moveit_commander.conversions import pose_to_list
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, DisplayTrajectory
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from math import pi
#GROUP_NAME_GRIPPER = "NAME OF GRIPPER"
roscpp_initialize(sys.argv)
rospy.init_node('control_Husky_UR3', anonymous=True)
robot = RobotCommander()
scene = PlanningSceneInterface()
##모바일 파트 관련 변수 선언
x = 0.0
y = 0.0
theta = 0.0
## 매니퓰레이터 변수 선언
group_name = "ur3_manipulator"
move_group = MoveGroupCommander(group_name)
FIXED_FRAME = 'world'
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
def move_Joint(q1,q2,q3,q4,q5,q6):
joint_goal = move_group.get_current_joint_values()
mobile_joints = [-pi/3, 0.5]
joint_goal_list = [q1,q2,q3,q4,q5,q6]
#매니퓰레이터 관절 value 설정
joint_goal[0] = joint_goal_list[0]
joint_goal[1] = joint_goal_list[1]
joint_goal[2] = joint_goal_list[2]
joint_goal[3] = joint_goal_list[3]
joint_goal[4] = joint_goal_list[4]
joint_goal[5] = joint_goal_list[5]
#제어시작
move_group.go(joint_goal, wait=True)
def get_TF(a,b):
end_flag = 0
listener = tf.TransformListener()
while end_flag ==0:
try:
(trans,rot) = listener.lookupTransform(a,b, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
end_flag = 1
return trans,rot
def move_ee(Px,Py,Pz,Rx,Ry,Rz,Rw):
trans,rot = get_TF('/odom','/base_link')
print('TF from odom to base link :',trans)
x = Px-trans[0]
y = Py-trans[1]
z = Pz-trans[2]
Ox = Rx
Oy = Ry
Oz = Rz-rot[2]
Ow = Rw
print 'real_planning_pose',x,y,z,Ox,Oy,Oz,Ow
print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_path_planner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0)
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
#move_Joint(1.63317876240784 ,0.285981577941942 ,-1.57592439233013 ,-0.472776683731581 ,2.82134996965689 ,2.93965970526083) | #move_Joint(1.57882340096811 ,-0.312808525716150 ,-1.00858459420407 ,-0.237259591096372 ,-2.51322054719748 ,-3.12658213005787)
#move_Joint(1.49234990741608 ,-1.25023043364197 ,0.884986270249331 ,-1.26185202847346 ,-2.19692733997135 ,3.04571337230654)
#move_Joint(1.38187898379176 ,-1.24138218321594 ,0.770320266997513 ,-1.16042411723514 ,-1.87922844910760 ,2.94403457922216)
#move_Joint(1.26044305157603 ,-1.22046262619007 ,0.674930245153225 ,-1.02526396256805 ,-1.57079631704406 ,2.83203570141689)
#move_Joint(1.14354220642811 ,-1.18364979561972 ,0.595829299678309 ,-0.849373924600574 ,-1.28560567284022 ,2.69592568050081)
#move_Joint(1.04448751501302 ,-1.11991084380463 ,0.510072952763684 ,-0.611447209983393 ,-1.03738511473967 ,2.51967923264632)
#move_Joint(0.971282836139718 ,-0.999323999902423 ,0.355694706708534 ,-0.267416174488388 ,-0.838880551835716 ,2.28194780565803)
#move_Joint(-1.63477141196796 ,-1.91133122955846 ,-1.10151156835317 ,-1.89146935483124 ,2.82134997161739 ,2.93965969942271)
#move_Joint(-1.65322906921999 ,-2.36794446930605 ,-1.05397760826863 ,0.280329388719014 ,-3.05995623288959 ,-1.57079636143505)
#move_Joint(-1.63477141766021 ,-1.98520583742980 ,-1.57592434796521 ,-0.959335599677850 ,-2.82134996914004 ,-2.93965967867828)
#move_Joint(-1.58041604836938 ,-2.00129787870086 ,-1.68144318432848 ,-1.01750430999134 ,-2.51322054568872 ,-3.12658213472640)
#move_Joint(-1.49394257050708 ,2.63601745038978 ,1.77749927268549 ,-2.89902024472062 ,-2.19692733754341 ,3.04571339154885)
#move_Joint(-1.38347163064872 ,2.54662686543240 ,1.84587116794395 ,-2.88239141905799 ,-1.87922844943615 ,2.94403456872028)
#move_Joint(-1.26203572966623 ,2.49325324698662 ,1.87618006889619 ,-2.79863698534015 ,-1.57079632703325 ,2.83203572396531) | #move_Joint(1.65163641729639 ,0.202937041530315 ,-1.05397766677144 ,-2.29055198297394 ,3.05995622779418 ,1.57079637373908)
#move_Joint(1.63317874347654 ,-0.210429202660752 ,-1.10151162936461 ,-0.0669323613463442 ,-2.82134998229054 ,-2.93965974902066) | random_line_split |
4_orient_grasping.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import rospy
import copy, math
import threading
import time
import tf
from math import pi, radians, degrees, atan2, sqrt
from moveit_commander import MoveGroupCommander, RobotCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from moveit_commander.conversions import pose_to_list
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, DisplayTrajectory
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from math import pi
#GROUP_NAME_GRIPPER = "NAME OF GRIPPER"
roscpp_initialize(sys.argv)
rospy.init_node('control_Husky_UR3', anonymous=True)
robot = RobotCommander()
scene = PlanningSceneInterface()
##모바일 파트 관련 변수 선언
x = 0.0
y = 0.0
theta = 0.0
## 매니퓰레이터 변수 선언
group_name = "ur3_manipulator"
move_group = MoveGroupCommander(group_name)
FIXED_FRAME = 'world'
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
def move_Joint(q1,q2,q3,q4,q5,q6):
joint_goal = move_group.get_current_joint_values()
mobile_joints = [-pi/3, 0.5]
joint_goal_list = [q1,q2,q3,q4,q5,q6]
#매니퓰레이터 관절 value 설정
joint_goal[0] = joint_goal_list[0]
joint_goal[1] = joint_goal_list[1]
joint_goal[2] = joint_goal_list[2]
joint_goal[3] = joint_goal_list[3]
joint_goal[4] = joint_goal_list[4]
joint_goal[5] = joint_goal_list[5]
#제어시작
move_group.go(joint_goal, wait=True)
def get_TF(a,b):
end_flag = 0
listener = tf.TransformListener()
while end_flag ==0:
try:
(trans,rot) = listener.lookupTransform(a,b, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
end_flag = 1
return trans,rot
def move_ee(Px,Py,Pz,Rx,Ry,Rz,Rw):
trans,rot = get_TF('/odom','/base_link')
print('TF from odom to base link :',trans)
x = Px-trans[0]
y = Py-trans[1]
z = Pz-trans[2]
Ox = Rx
Oy = Ry
Oz = Rz-rot[2]
Ow = Rw
print 'real_planning_pose',x,y,z,Ox,Oy,Oz,Ow
print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_path_planner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print |
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
#move_Joint(1.63317876240784 ,0.285981577941942 ,-1.57592439233013 ,-0.472776683731581 ,2.82134996965689 ,2.93965970526083)
#move_Joint(1.65163641729639 ,0.202937041530315 ,-1.05397766677144 ,-2.29055198297394 ,3.05995622779418 ,1.57079637373908)
#move_Joint(1.63317874347654 ,-0.210429202660752 ,-1.10151162936461 ,-0.0669323613463442 ,-2.82134998229054 ,-2.93965974902066)
#move_Joint(1.57882340096811 ,-0.312808525716150 ,-1.00858459420407 ,-0.237259591096372 ,-2.51322054719748 ,-3.12658213005787)
#move_Joint(1.49234990741608 ,-1.25023043364197 ,0.884986270249331 ,-1.26185202847346 ,-2.19692733997135 ,3.04571337230654)
#move_Joint(1.38187898379176 ,-1.24138218321594 ,0.770320266997513 ,-1.16042411723514 ,-1.87922844910760 ,2.94403457922216)
#move_Joint(1.26044305157603 ,-1.22046262619007 ,0.674930245153225 ,-1.02526396256805 ,-1.57079631704406 ,2.83203570141689)
#move_Joint(1.14354220642811 ,-1.18364979561972 ,0.595829299678309 ,-0.849373924600574 ,-1.28560567284022 ,2.69592568050081)
#move_Joint(1.04448751501302 ,-1.11991084380463 ,0.510072952763684 ,-0.611447209983393 ,-1.03738511473967 ,2.51967923264632)
#move_Joint(0.971282836139718 ,-0.999323999902423 ,0.355694706708534 ,-0.267416174488388 ,-0.838880551835716 ,2.28194780565803)
#move_Joint(-1.63477141196796 ,-1.91133122955846 ,-1.10151156835317 ,-1.89146935483124 ,2.82134997161739 ,2.93965969942271)
#move_Joint(-1.65322906921999 ,-2.36794446930605 ,-1.05397760826863 ,0.280329388719014 ,-3.05995623288959 ,-1.57079636143505)
#move_Joint(-1.63477141766021 ,-1.98520583742980 ,-1.57592434796521 ,-0.959335599677850 ,-2.82134996914004 ,-2.93965967867828)
#move_Joint(-1.58041604836938 ,-2.00129787870086 ,-1.68144318432848 ,-1.01750430999134 ,-2.51322054568872 ,-3.12658213472640)
#move_Joint(-1.49394257050708 ,2.63601745038978 ,1.77749927268549 ,-2.89902024472062 ,-2.19692733754341 ,3.04571339154885)
#move_Joint(-1.38347163064872 ,2.54662686543240 ,1.84587116794395 ,-2.88239141905799 ,-1.87922844943615 ,2.94403456872028)
#move_Joint(-1.26203572966623 ,2.49325324698662 ,1.87618006889619 ,-2.79863698534015 ,-1.57079632703325 ,2.83203572396531)
| "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0) | identifier_body |
4_orient_grasping.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import sys
import rospy
import copy, math
import threading
import time
import tf
from math import pi, radians, degrees, atan2, sqrt
from moveit_commander import MoveGroupCommander, RobotCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from moveit_commander.conversions import pose_to_list
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion, Twist
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation, MoveItErrorCodes, DisplayTrajectory
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from math import pi
#GROUP_NAME_GRIPPER = "NAME OF GRIPPER"
roscpp_initialize(sys.argv)
rospy.init_node('control_Husky_UR3', anonymous=True)
robot = RobotCommander()
scene = PlanningSceneInterface()
##모바일 파트 관련 변수 선언
x = 0.0
y = 0.0
theta = 0.0
## 매니퓰레이터 변수 선언
group_name = "ur3_manipulator"
move_group = MoveGroupCommander(group_name)
FIXED_FRAME = 'world'
display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
DisplayTrajectory,
queue_size=20)
def newOdom(msg):
global x
global y
global theta
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
rot_q = msg.pose.pose.orientation
(roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])
def move_Joint(q1,q2,q3,q4,q5,q6):
joint_goal = move_group.get_current_joint_values()
mobile_joints = [-pi/3, 0.5]
joint_goal_list = [q1,q2,q3,q4,q5,q6]
#매니퓰레이터 관절 value 설정
joint_goal[0] = joint_goal_list[0]
joint_goal[1] = joint_goal_list[1]
joint_goal[2] = joint_goal_list[2]
joint_goal[3] = joint_goal_list[3]
joint_goal[4] = joint_goal_list[4]
joint_goal[5] = joint_goal_list[5]
#제어시작
move_group.go(joint_goal, wait=True)
def get_TF(a,b):
end_flag = 0
listener = tf.TransformListener()
while end_flag ==0:
try:
(trans,rot) = listener.lookupTransform(a,b, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
end_flag = 1
return trans,rot
def move_ee(Px,Py,Pz,Rx,Ry,Rz,Rw):
trans,rot = get_TF('/odom','/base_link')
print('TF from odom to base link :',trans)
x = Px-trans[0]
y = Py-trans[1]
z = Pz-trans[2]
Ox = Rx
Oy = Ry
Oz = Rz-rot[2]
Ow = Rw
print 'real_planning_pose',x,y,z,Ox,Oy,Oz,Ow
print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_ | anner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0)
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
#move_Joint(1.63317876240784 ,0.285981577941942 ,-1.57592439233013 ,-0.472776683731581 ,2.82134996965689 ,2.93965970526083)
#move_Joint(1.65163641729639 ,0.202937041530315 ,-1.05397766677144 ,-2.29055198297394 ,3.05995622779418 ,1.57079637373908)
#move_Joint(1.63317874347654 ,-0.210429202660752 ,-1.10151162936461 ,-0.0669323613463442 ,-2.82134998229054 ,-2.93965974902066)
#move_Joint(1.57882340096811 ,-0.312808525716150 ,-1.00858459420407 ,-0.237259591096372 ,-2.51322054719748 ,-3.12658213005787)
#move_Joint(1.49234990741608 ,-1.25023043364197 ,0.884986270249331 ,-1.26185202847346 ,-2.19692733997135 ,3.04571337230654)
#move_Joint(1.38187898379176 ,-1.24138218321594 ,0.770320266997513 ,-1.16042411723514 ,-1.87922844910760 ,2.94403457922216)
#move_Joint(1.26044305157603 ,-1.22046262619007 ,0.674930245153225 ,-1.02526396256805 ,-1.57079631704406 ,2.83203570141689)
#move_Joint(1.14354220642811 ,-1.18364979561972 ,0.595829299678309 ,-0.849373924600574 ,-1.28560567284022 ,2.69592568050081)
#move_Joint(1.04448751501302 ,-1.11991084380463 ,0.510072952763684 ,-0.611447209983393 ,-1.03738511473967 ,2.51967923264632)
#move_Joint(0.971282836139718 ,-0.999323999902423 ,0.355694706708534 ,-0.267416174488388 ,-0.838880551835716 ,2.28194780565803)
#move_Joint(-1.63477141196796 ,-1.91133122955846 ,-1.10151156835317 ,-1.89146935483124 ,2.82134997161739 ,2.93965969942271)
#move_Joint(-1.65322906921999 ,-2.36794446930605 ,-1.05397760826863 ,0.280329388719014 ,-3.05995623288959 ,-1.57079636143505)
#move_Joint(-1.63477141766021 ,-1.98520583742980 ,-1.57592434796521 ,-0.959335599677850 ,-2.82134996914004 ,-2.93965967867828)
#move_Joint(-1.58041604836938 ,-2.00129787870086 ,-1.68144318432848 ,-1.01750430999134 ,-2.51322054568872 ,-3.12658213472640)
#move_Joint(-1.49394257050708 ,2.63601745038978 ,1.77749927268549 ,-2.89902024472062 ,-2.19692733754341 ,3.04571339154885)
#move_Joint(-1.38347163064872 ,2.54662686543240 ,1.84587116794395 ,-2.88239141905799 ,-1.87922844943615 ,2.94403456872028)
#move_Joint(-1.26203572966623 ,2.49325324698662 ,1.87618006889619 ,-2.79863698534015 ,-1.57079632703325 ,2.83203572396531)
| path_pl | identifier_name |
lib.register_lints.rs | // This file was generated by `cargo dev update_lints`.
// Use that command to update this file and do not edit by hand.
// Manual edits will be overwritten.
store.register_lints(&[
#[cfg(feature = "internal")]
utils::internal_lints::CLIPPY_LINTS_INTERNAL,
#[cfg(feature = "internal")]
utils::internal_lints::COLLAPSIBLE_SPAN_LINT_CALLS,
#[cfg(feature = "internal")]
utils::internal_lints::COMPILER_LINT_FUNCTIONS,
#[cfg(feature = "internal")]
utils::internal_lints::DEFAULT_LINT,
#[cfg(feature = "internal")]
utils::internal_lints::IF_CHAIN_STYLE,
#[cfg(feature = "internal")]
utils::internal_lints::INTERNING_DEFINED_SYMBOL,
#[cfg(feature = "internal")]
utils::internal_lints::INVALID_CLIPPY_VERSION_ATTRIBUTE,
#[cfg(feature = "internal")]
utils::internal_lints::INVALID_PATHS,
#[cfg(feature = "internal")]
utils::internal_lints::LINT_WITHOUT_LINT_PASS,
#[cfg(feature = "internal")]
utils::internal_lints::MATCH_TYPE_ON_DIAGNOSTIC_ITEM,
#[cfg(feature = "internal")]
utils::internal_lints::MISSING_CLIPPY_VERSION_ATTRIBUTE,
#[cfg(feature = "internal")]
utils::internal_lints::MISSING_MSRV_ATTR_IMPL,
#[cfg(feature = "internal")]
utils::internal_lints::OUTER_EXPN_EXPN_DATA,
#[cfg(feature = "internal")]
utils::internal_lints::PRODUCE_ICE,
#[cfg(feature = "internal")]
utils::internal_lints::UNNECESSARY_SYMBOL_STR,
absurd_extreme_comparisons::ABSURD_EXTREME_COMPARISONS,
approx_const::APPROX_CONSTANT,
arithmetic::FLOAT_ARITHMETIC,
arithmetic::INTEGER_ARITHMETIC,
as_conversions::AS_CONVERSIONS,
asm_syntax::INLINE_ASM_X86_ATT_SYNTAX,
asm_syntax::INLINE_ASM_X86_INTEL_SYNTAX,
assertions_on_constants::ASSERTIONS_ON_CONSTANTS,
assign_ops::ASSIGN_OP_PATTERN,
assign_ops::MISREFACTORED_ASSIGN_OP,
async_yields_async::ASYNC_YIELDS_ASYNC,
attrs::ALLOW_ATTRIBUTES_WITHOUT_REASON,
attrs::BLANKET_CLIPPY_RESTRICTION_LINTS,
attrs::DEPRECATED_CFG_ATTR,
attrs::DEPRECATED_SEMVER,
attrs::EMPTY_LINE_AFTER_OUTER_ATTR,
attrs::INLINE_ALWAYS,
attrs::MISMATCHED_TARGET_OS,
attrs::USELESS_ATTRIBUTE,
await_holding_invalid::AWAIT_HOLDING_LOCK,
await_holding_invalid::AWAIT_HOLDING_REFCELL_REF,
bit_mask::BAD_BIT_MASK,
bit_mask::INEFFECTIVE_BIT_MASK,
bit_mask::VERBOSE_BIT_MASK,
blacklisted_name::BLACKLISTED_NAME,
blocks_in_if_conditions::BLOCKS_IN_IF_CONDITIONS,
bool_assert_comparison::BOOL_ASSERT_COMPARISON,
booleans::LOGIC_BUG,
booleans::NONMINIMAL_BOOL,
borrow_as_ptr::BORROW_AS_PTR,
bytecount::NAIVE_BYTECOUNT,
cargo::CARGO_COMMON_METADATA,
cargo::MULTIPLE_CRATE_VERSIONS,
cargo::NEGATIVE_FEATURE_NAMES,
cargo::REDUNDANT_FEATURE_NAMES,
cargo::WILDCARD_DEPENDENCIES,
case_sensitive_file_extension_comparisons::CASE_SENSITIVE_FILE_EXTENSION_COMPARISONS,
casts::CAST_ENUM_CONSTRUCTOR,
casts::CAST_ENUM_TRUNCATION,
casts::CAST_LOSSLESS,
casts::CAST_POSSIBLE_TRUNCATION,
casts::CAST_POSSIBLE_WRAP,
casts::CAST_PRECISION_LOSS,
casts::CAST_PTR_ALIGNMENT,
casts::CAST_REF_TO_MUT,
casts::CAST_SIGN_LOSS,
casts::CAST_SLICE_DIFFERENT_SIZES,
casts::CHAR_LIT_AS_U8,
casts::FN_TO_NUMERIC_CAST,
casts::FN_TO_NUMERIC_CAST_ANY,
casts::FN_TO_NUMERIC_CAST_WITH_TRUNCATION,
casts::PTR_AS_PTR,
casts::UNNECESSARY_CAST,
checked_conversions::CHECKED_CONVERSIONS,
cognitive_complexity::COGNITIVE_COMPLEXITY, | comparison_chain::COMPARISON_CHAIN,
copies::BRANCHES_SHARING_CODE,
copies::IFS_SAME_COND,
copies::IF_SAME_THEN_ELSE,
copies::SAME_FUNCTIONS_IN_IF_CONDITION,
copy_iterator::COPY_ITERATOR,
create_dir::CREATE_DIR,
dbg_macro::DBG_MACRO,
default::DEFAULT_TRAIT_ACCESS,
default::FIELD_REASSIGN_WITH_DEFAULT,
default_numeric_fallback::DEFAULT_NUMERIC_FALLBACK,
default_union_representation::DEFAULT_UNION_REPRESENTATION,
dereference::EXPLICIT_DEREF_METHODS,
dereference::NEEDLESS_BORROW,
dereference::REF_BINDING_TO_REFERENCE,
derivable_impls::DERIVABLE_IMPLS,
derive::DERIVE_HASH_XOR_EQ,
derive::DERIVE_ORD_XOR_PARTIAL_ORD,
derive::EXPL_IMPL_CLONE_ON_COPY,
derive::UNSAFE_DERIVE_DESERIALIZE,
disallowed_methods::DISALLOWED_METHODS,
disallowed_script_idents::DISALLOWED_SCRIPT_IDENTS,
disallowed_types::DISALLOWED_TYPES,
doc::DOC_MARKDOWN,
doc::MISSING_ERRORS_DOC,
doc::MISSING_PANICS_DOC,
doc::MISSING_SAFETY_DOC,
doc::NEEDLESS_DOCTEST_MAIN,
double_comparison::DOUBLE_COMPARISONS,
double_parens::DOUBLE_PARENS,
drop_forget_ref::DROP_COPY,
drop_forget_ref::DROP_REF,
drop_forget_ref::FORGET_COPY,
drop_forget_ref::FORGET_REF,
duration_subsec::DURATION_SUBSEC,
else_if_without_else::ELSE_IF_WITHOUT_ELSE,
empty_enum::EMPTY_ENUM,
entry::MAP_ENTRY,
enum_clike::ENUM_CLIKE_UNPORTABLE_VARIANT,
enum_variants::ENUM_VARIANT_NAMES,
enum_variants::MODULE_INCEPTION,
enum_variants::MODULE_NAME_REPETITIONS,
eq_op::EQ_OP,
eq_op::OP_REF,
equatable_if_let::EQUATABLE_IF_LET,
erasing_op::ERASING_OP,
escape::BOXED_LOCAL,
eta_reduction::REDUNDANT_CLOSURE,
eta_reduction::REDUNDANT_CLOSURE_FOR_METHOD_CALLS,
eval_order_dependence::DIVERGING_SUB_EXPRESSION,
eval_order_dependence::EVAL_ORDER_DEPENDENCE,
excessive_bools::FN_PARAMS_EXCESSIVE_BOOLS,
excessive_bools::STRUCT_EXCESSIVE_BOOLS,
exhaustive_items::EXHAUSTIVE_ENUMS,
exhaustive_items::EXHAUSTIVE_STRUCTS,
exit::EXIT,
explicit_write::EXPLICIT_WRITE,
fallible_impl_from::FALLIBLE_IMPL_FROM,
float_equality_without_abs::FLOAT_EQUALITY_WITHOUT_ABS,
float_literal::EXCESSIVE_PRECISION,
float_literal::LOSSY_FLOAT_LITERAL,
floating_point_arithmetic::IMPRECISE_FLOPS,
floating_point_arithmetic::SUBOPTIMAL_FLOPS,
format::USELESS_FORMAT,
format_args::FORMAT_IN_FORMAT_ARGS,
format_args::TO_STRING_IN_FORMAT_ARGS,
format_impl::PRINT_IN_FORMAT_IMPL,
format_impl::RECURSIVE_FORMAT_IMPL,
formatting::POSSIBLE_MISSING_COMMA,
formatting::SUSPICIOUS_ASSIGNMENT_FORMATTING,
formatting::SUSPICIOUS_ELSE_FORMATTING,
formatting::SUSPICIOUS_UNARY_OP_FORMATTING,
from_over_into::FROM_OVER_INTO,
from_str_radix_10::FROM_STR_RADIX_10,
functions::DOUBLE_MUST_USE,
functions::MUST_USE_CANDIDATE,
functions::MUST_USE_UNIT,
functions::NOT_UNSAFE_PTR_ARG_DEREF,
functions::RESULT_UNIT_ERR,
functions::TOO_MANY_ARGUMENTS,
functions::TOO_MANY_LINES,
future_not_send::FUTURE_NOT_SEND,
get_last_with_len::GET_LAST_WITH_LEN,
identity_op::IDENTITY_OP,
if_let_mutex::IF_LET_MUTEX,
if_not_else::IF_NOT_ELSE,
if_then_some_else_none::IF_THEN_SOME_ELSE_NONE,
implicit_hasher::IMPLICIT_HASHER,
implicit_return::IMPLICIT_RETURN,
implicit_saturating_sub::IMPLICIT_SATURATING_SUB,
inconsistent_struct_constructor::INCONSISTENT_STRUCT_CONSTRUCTOR,
index_refutable_slice::INDEX_REFUTABLE_SLICE,
indexing_slicing::INDEXING_SLICING,
indexing_slicing::OUT_OF_BOUNDS_INDEXING,
infinite_iter::INFINITE_ITER,
infinite_iter::MAYBE_INFINITE_ITER,
inherent_impl::MULTIPLE_INHERENT_IMPL,
inherent_to_string::INHERENT_TO_STRING,
inherent_to_string::INHERENT_TO_STRING_SHADOW_DISPLAY,
init_numbered_fields::INIT_NUMBERED_FIELDS,
inline_fn_without_body::INLINE_FN_WITHOUT_BODY,
int_plus_one::INT_PLUS_ONE,
integer_division::INTEGER_DIVISION,
invalid_upcast_comparisons::INVALID_UPCAST_COMPARISONS,
items_after_statements::ITEMS_AFTER_STATEMENTS,
iter_not_returning_iterator::ITER_NOT_RETURNING_ITERATOR,
large_const_arrays::LARGE_CONST_ARRAYS,
large_enum_variant::LARGE_ENUM_VARIANT,
large_stack_arrays::LARGE_STACK_ARRAYS,
len_zero::COMPARISON_TO_EMPTY,
len_zero::LEN_WITHOUT_IS_EMPTY,
len_zero::LEN_ZERO,
let_if_seq::USELESS_LET_IF_SEQ,
let_underscore::LET_UNDERSCORE_DROP,
let_underscore::LET_UNDERSCORE_LOCK,
let_underscore::LET_UNDERSCORE_MUST_USE,
lifetimes::EXTRA_UNUSED_LIFETIMES,
lifetimes::NEEDLESS_LIFETIMES,
literal_representation::DECIMAL_LITERAL_REPRESENTATION,
literal_representation::INCONSISTENT_DIGIT_GROUPING,
literal_representation::LARGE_DIGIT_GROUPS,
literal_representation::MISTYPED_LITERAL_SUFFIXES,
literal_representation::UNREADABLE_LITERAL,
literal_representation::UNUSUAL_BYTE_GROUPINGS,
loops::EMPTY_LOOP,
loops::EXPLICIT_COUNTER_LOOP,
loops::EXPLICIT_INTO_ITER_LOOP,
loops::EXPLICIT_ITER_LOOP,
loops::FOR_KV_MAP,
loops::FOR_LOOPS_OVER_FALLIBLES,
loops::ITER_NEXT_LOOP,
loops::MANUAL_FLATTEN,
loops::MANUAL_MEMCPY,
loops::MISSING_SPIN_LOOP,
loops::MUT_RANGE_BOUND,
loops::NEEDLESS_COLLECT,
loops::NEEDLESS_RANGE_LOOP,
loops::NEVER_LOOP,
loops::SAME_ITEM_PUSH,
loops::SINGLE_ELEMENT_LOOP,
loops::WHILE_IMMUTABLE_CONDITION,
loops::WHILE_LET_LOOP,
loops::WHILE_LET_ON_ITERATOR,
macro_use::MACRO_USE_IMPORTS,
main_recursion::MAIN_RECURSION,
manual_assert::MANUAL_ASSERT,
manual_async_fn::MANUAL_ASYNC_FN,
manual_bits::MANUAL_BITS,
manual_map::MANUAL_MAP,
manual_non_exhaustive::MANUAL_NON_EXHAUSTIVE,
manual_ok_or::MANUAL_OK_OR,
manual_strip::MANUAL_STRIP,
manual_unwrap_or::MANUAL_UNWRAP_OR,
map_clone::MAP_CLONE,
map_err_ignore::MAP_ERR_IGNORE,
map_unit_fn::OPTION_MAP_UNIT_FN,
map_unit_fn::RESULT_MAP_UNIT_FN,
match_on_vec_items::MATCH_ON_VEC_ITEMS,
match_result_ok::MATCH_RESULT_OK,
match_str_case_mismatch::MATCH_STR_CASE_MISMATCH,
matches::INFALLIBLE_DESTRUCTURING_MATCH,
matches::MATCH_AS_REF,
matches::MATCH_BOOL,
matches::MATCH_LIKE_MATCHES_MACRO,
matches::MATCH_OVERLAPPING_ARM,
matches::MATCH_REF_PATS,
matches::MATCH_SAME_ARMS,
matches::MATCH_SINGLE_BINDING,
matches::MATCH_WILDCARD_FOR_SINGLE_VARIANTS,
matches::MATCH_WILD_ERR_ARM,
matches::NEEDLESS_MATCH,
matches::REDUNDANT_PATTERN_MATCHING,
matches::REST_PAT_IN_FULLY_BOUND_STRUCTS,
matches::SINGLE_MATCH,
matches::SINGLE_MATCH_ELSE,
matches::WILDCARD_ENUM_MATCH_ARM,
matches::WILDCARD_IN_OR_PATTERNS,
mem_forget::MEM_FORGET,
mem_replace::MEM_REPLACE_OPTION_WITH_NONE,
mem_replace::MEM_REPLACE_WITH_DEFAULT,
mem_replace::MEM_REPLACE_WITH_UNINIT,
methods::BIND_INSTEAD_OF_MAP,
methods::BYTES_NTH,
methods::CHARS_LAST_CMP,
methods::CHARS_NEXT_CMP,
methods::CLONED_INSTEAD_OF_COPIED,
methods::CLONE_DOUBLE_REF,
methods::CLONE_ON_COPY,
methods::CLONE_ON_REF_PTR,
methods::EXPECT_FUN_CALL,
methods::EXPECT_USED,
methods::EXTEND_WITH_DRAIN,
methods::FILETYPE_IS_FILE,
methods::FILTER_MAP_IDENTITY,
methods::FILTER_MAP_NEXT,
methods::FILTER_NEXT,
methods::FLAT_MAP_IDENTITY,
methods::FLAT_MAP_OPTION,
methods::FROM_ITER_INSTEAD_OF_COLLECT,
methods::GET_UNWRAP,
methods::IMPLICIT_CLONE,
methods::INEFFICIENT_TO_STRING,
methods::INSPECT_FOR_EACH,
methods::INTO_ITER_ON_REF,
methods::ITERATOR_STEP_BY_ZERO,
methods::ITER_CLONED_COLLECT,
methods::ITER_COUNT,
methods::ITER_NEXT_SLICE,
methods::ITER_NTH,
methods::ITER_NTH_ZERO,
methods::ITER_OVEREAGER_CLONED,
methods::ITER_SKIP_NEXT,
methods::ITER_WITH_DRAIN,
methods::MANUAL_FILTER_MAP,
methods::MANUAL_FIND_MAP,
methods::MANUAL_SATURATING_ARITHMETIC,
methods::MANUAL_SPLIT_ONCE,
methods::MANUAL_STR_REPEAT,
methods::MAP_COLLECT_RESULT_UNIT,
methods::MAP_FLATTEN,
methods::MAP_IDENTITY,
methods::MAP_UNWRAP_OR,
methods::NEEDLESS_SPLITN,
methods::NEW_RET_NO_SELF,
methods::OK_EXPECT,
methods::OPTION_AS_REF_DEREF,
methods::OPTION_FILTER_MAP,
methods::OPTION_MAP_OR_NONE,
methods::OR_FUN_CALL,
methods::OR_THEN_UNWRAP,
methods::RESULT_MAP_OR_INTO_OPTION,
methods::SEARCH_IS_SOME,
methods::SHOULD_IMPLEMENT_TRAIT,
methods::SINGLE_CHAR_ADD_STR,
methods::SINGLE_CHAR_PATTERN,
methods::SKIP_WHILE_NEXT,
methods::STRING_EXTEND_CHARS,
methods::SUSPICIOUS_MAP,
methods::SUSPICIOUS_SPLITN,
methods::UNINIT_ASSUMED_INIT,
methods::UNNECESSARY_FILTER_MAP,
methods::UNNECESSARY_FIND_MAP,
methods::UNNECESSARY_FOLD,
methods::UNNECESSARY_LAZY_EVALUATIONS,
methods::UNNECESSARY_TO_OWNED,
methods::UNWRAP_OR_ELSE_DEFAULT,
methods::UNWRAP_USED,
methods::USELESS_ASREF,
methods::WRONG_SELF_CONVENTION,
methods::ZST_OFFSET,
minmax::MIN_MAX,
misc::CMP_NAN,
misc::CMP_OWNED,
misc::FLOAT_CMP,
misc::FLOAT_CMP_CONST,
misc::MODULO_ONE,
misc::SHORT_CIRCUIT_STATEMENT,
misc::TOPLEVEL_REF_ARG,
misc::USED_UNDERSCORE_BINDING,
misc::ZERO_PTR,
misc_early::BUILTIN_TYPE_SHADOW,
misc_early::DOUBLE_NEG,
misc_early::DUPLICATE_UNDERSCORE_ARGUMENT,
misc_early::MIXED_CASE_HEX_LITERALS,
misc_early::REDUNDANT_PATTERN,
misc_early::SEPARATED_LITERAL_SUFFIX,
misc_early::UNNEEDED_FIELD_PATTERN,
misc_early::UNNEEDED_WILDCARD_PATTERN,
misc_early::UNSEPARATED_LITERAL_SUFFIX,
misc_early::ZERO_PREFIXED_LITERAL,
missing_const_for_fn::MISSING_CONST_FOR_FN,
missing_doc::MISSING_DOCS_IN_PRIVATE_ITEMS,
missing_enforced_import_rename::MISSING_ENFORCED_IMPORT_RENAMES,
missing_inline::MISSING_INLINE_IN_PUBLIC_ITEMS,
module_style::MOD_MODULE_FILES,
module_style::SELF_NAMED_MODULE_FILES,
modulo_arithmetic::MODULO_ARITHMETIC,
mut_key::MUTABLE_KEY_TYPE,
mut_mut::MUT_MUT,
mut_mutex_lock::MUT_MUTEX_LOCK,
mut_reference::UNNECESSARY_MUT_PASSED,
mutable_debug_assertion::DEBUG_ASSERT_WITH_MUT_CALL,
mutex_atomic::MUTEX_ATOMIC,
mutex_atomic::MUTEX_INTEGER,
needless_arbitrary_self_type::NEEDLESS_ARBITRARY_SELF_TYPE,
needless_bitwise_bool::NEEDLESS_BITWISE_BOOL,
needless_bool::BOOL_COMPARISON,
needless_bool::NEEDLESS_BOOL,
needless_borrowed_ref::NEEDLESS_BORROWED_REFERENCE,
needless_continue::NEEDLESS_CONTINUE,
needless_for_each::NEEDLESS_FOR_EACH,
needless_late_init::NEEDLESS_LATE_INIT,
needless_option_as_deref::NEEDLESS_OPTION_AS_DEREF,
needless_pass_by_value::NEEDLESS_PASS_BY_VALUE,
needless_question_mark::NEEDLESS_QUESTION_MARK,
needless_update::NEEDLESS_UPDATE,
neg_cmp_op_on_partial_ord::NEG_CMP_OP_ON_PARTIAL_ORD,
neg_multiply::NEG_MULTIPLY,
new_without_default::NEW_WITHOUT_DEFAULT,
no_effect::NO_EFFECT,
no_effect::NO_EFFECT_UNDERSCORE_BINDING,
no_effect::UNNECESSARY_OPERATION,
non_copy_const::BORROW_INTERIOR_MUTABLE_CONST,
non_copy_const::DECLARE_INTERIOR_MUTABLE_CONST,
non_expressive_names::JUST_UNDERSCORES_AND_DIGITS,
non_expressive_names::MANY_SINGLE_CHAR_NAMES,
non_expressive_names::SIMILAR_NAMES,
non_octal_unix_permissions::NON_OCTAL_UNIX_PERMISSIONS,
non_send_fields_in_send_ty::NON_SEND_FIELDS_IN_SEND_TY,
nonstandard_macro_braces::NONSTANDARD_MACRO_BRACES,
octal_escapes::OCTAL_ESCAPES,
only_used_in_recursion::ONLY_USED_IN_RECURSION,
open_options::NONSENSICAL_OPEN_OPTIONS,
option_env_unwrap::OPTION_ENV_UNWRAP,
option_if_let_else::OPTION_IF_LET_ELSE,
overflow_check_conditional::OVERFLOW_CHECK_CONDITIONAL,
panic_in_result_fn::PANIC_IN_RESULT_FN,
panic_unimplemented::PANIC,
panic_unimplemented::TODO,
panic_unimplemented::UNIMPLEMENTED,
panic_unimplemented::UNREACHABLE,
partialeq_ne_impl::PARTIALEQ_NE_IMPL,
pass_by_ref_or_value::LARGE_TYPES_PASSED_BY_VALUE,
pass_by_ref_or_value::TRIVIALLY_COPY_PASS_BY_REF,
path_buf_push_overwrite::PATH_BUF_PUSH_OVERWRITE,
pattern_type_mismatch::PATTERN_TYPE_MISMATCH,
precedence::PRECEDENCE,
ptr::CMP_NULL,
ptr::INVALID_NULL_PTR_USAGE,
ptr::MUT_FROM_REF,
ptr::PTR_ARG,
ptr_eq::PTR_EQ,
ptr_offset_with_cast::PTR_OFFSET_WITH_CAST,
question_mark::QUESTION_MARK,
ranges::MANUAL_RANGE_CONTAINS,
ranges::RANGE_MINUS_ONE,
ranges::RANGE_PLUS_ONE,
ranges::RANGE_ZIP_WITH_LEN,
ranges::REVERSED_EMPTY_RANGES,
redundant_clone::REDUNDANT_CLONE,
redundant_closure_call::REDUNDANT_CLOSURE_CALL,
redundant_else::REDUNDANT_ELSE,
redundant_field_names::REDUNDANT_FIELD_NAMES,
redundant_pub_crate::REDUNDANT_PUB_CRATE,
redundant_slicing::DEREF_BY_SLICING,
redundant_slicing::REDUNDANT_SLICING,
redundant_static_lifetimes::REDUNDANT_STATIC_LIFETIMES,
ref_option_ref::REF_OPTION_REF,
reference::DEREF_ADDROF,
regex::INVALID_REGEX,
regex::TRIVIAL_REGEX,
repeat_once::REPEAT_ONCE,
return_self_not_must_use::RETURN_SELF_NOT_MUST_USE,
returns::LET_AND_RETURN,
returns::NEEDLESS_RETURN,
same_name_method::SAME_NAME_METHOD,
self_assignment::SELF_ASSIGNMENT,
self_named_constructors::SELF_NAMED_CONSTRUCTORS,
semicolon_if_nothing_returned::SEMICOLON_IF_NOTHING_RETURNED,
serde_api::SERDE_API_MISUSE,
shadow::SHADOW_REUSE,
shadow::SHADOW_SAME,
shadow::SHADOW_UNRELATED,
single_char_lifetime_names::SINGLE_CHAR_LIFETIME_NAMES,
single_component_path_imports::SINGLE_COMPONENT_PATH_IMPORTS,
size_of_in_element_count::SIZE_OF_IN_ELEMENT_COUNT,
slow_vector_initialization::SLOW_VECTOR_INITIALIZATION,
stable_sort_primitive::STABLE_SORT_PRIMITIVE,
strings::STRING_ADD,
strings::STRING_ADD_ASSIGN,
strings::STRING_FROM_UTF8_AS_BYTES,
strings::STRING_LIT_AS_BYTES,
strings::STRING_SLICE,
strings::STRING_TO_STRING,
strings::STR_TO_STRING,
strlen_on_c_strings::STRLEN_ON_C_STRINGS,
suspicious_operation_groupings::SUSPICIOUS_OPERATION_GROUPINGS,
suspicious_trait_impl::SUSPICIOUS_ARITHMETIC_IMPL,
suspicious_trait_impl::SUSPICIOUS_OP_ASSIGN_IMPL,
swap::ALMOST_SWAPPED,
swap::MANUAL_SWAP,
tabs_in_doc_comments::TABS_IN_DOC_COMMENTS,
temporary_assignment::TEMPORARY_ASSIGNMENT,
to_digit_is_some::TO_DIGIT_IS_SOME,
trailing_empty_array::TRAILING_EMPTY_ARRAY,
trait_bounds::TRAIT_DUPLICATION_IN_BOUNDS,
trait_bounds::TYPE_REPETITION_IN_BOUNDS,
transmute::CROSSPOINTER_TRANSMUTE,
transmute::TRANSMUTES_EXPRESSIBLE_AS_PTR_CASTS,
transmute::TRANSMUTE_BYTES_TO_STR,
transmute::TRANSMUTE_FLOAT_TO_INT,
transmute::TRANSMUTE_INT_TO_BOOL,
transmute::TRANSMUTE_INT_TO_CHAR,
transmute::TRANSMUTE_INT_TO_FLOAT,
transmute::TRANSMUTE_NUM_TO_BYTES,
transmute::TRANSMUTE_PTR_TO_PTR,
transmute::TRANSMUTE_PTR_TO_REF,
transmute::TRANSMUTE_UNDEFINED_REPR,
transmute::UNSOUND_COLLECTION_TRANSMUTE,
transmute::USELESS_TRANSMUTE,
transmute::WRONG_TRANSMUTE,
transmuting_null::TRANSMUTING_NULL,
try_err::TRY_ERR,
types::BORROWED_BOX,
types::BOX_COLLECTION,
types::LINKEDLIST,
types::OPTION_OPTION,
types::RC_BUFFER,
types::RC_MUTEX,
types::REDUNDANT_ALLOCATION,
types::TYPE_COMPLEXITY,
types::VEC_BOX,
undocumented_unsafe_blocks::UNDOCUMENTED_UNSAFE_BLOCKS,
undropped_manually_drops::UNDROPPED_MANUALLY_DROPS,
unicode::INVISIBLE_CHARACTERS,
unicode::NON_ASCII_LITERAL,
unicode::UNICODE_NOT_NFC,
uninit_vec::UNINIT_VEC,
unit_hash::UNIT_HASH,
unit_return_expecting_ord::UNIT_RETURN_EXPECTING_ORD,
unit_types::LET_UNIT_VALUE,
unit_types::UNIT_ARG,
unit_types::UNIT_CMP,
unnamed_address::FN_ADDRESS_COMPARISONS,
unnamed_address::VTABLE_ADDRESS_COMPARISONS,
unnecessary_self_imports::UNNECESSARY_SELF_IMPORTS,
unnecessary_sort_by::UNNECESSARY_SORT_BY,
unnecessary_wraps::UNNECESSARY_WRAPS,
unnested_or_patterns::UNNESTED_OR_PATTERNS,
unsafe_removed_from_name::UNSAFE_REMOVED_FROM_NAME,
unused_async::UNUSED_ASYNC,
unused_io_amount::UNUSED_IO_AMOUNT,
unused_self::UNUSED_SELF,
unused_unit::UNUSED_UNIT,
unwrap::PANICKING_UNWRAP,
unwrap::UNNECESSARY_UNWRAP,
unwrap_in_result::UNWRAP_IN_RESULT,
upper_case_acronyms::UPPER_CASE_ACRONYMS,
use_self::USE_SELF,
useless_conversion::USELESS_CONVERSION,
vec::USELESS_VEC,
vec_init_then_push::VEC_INIT_THEN_PUSH,
vec_resize_to_zero::VEC_RESIZE_TO_ZERO,
verbose_file_reads::VERBOSE_FILE_READS,
wildcard_imports::ENUM_GLOB_USE,
wildcard_imports::WILDCARD_IMPORTS,
write::PRINTLN_EMPTY_STRING,
write::PRINT_LITERAL,
write::PRINT_STDERR,
write::PRINT_STDOUT,
write::PRINT_WITH_NEWLINE,
write::USE_DEBUG,
write::WRITELN_EMPTY_STRING,
write::WRITE_LITERAL,
write::WRITE_WITH_NEWLINE,
zero_div_zero::ZERO_DIVIDED_BY_ZERO,
zero_sized_map_values::ZERO_SIZED_MAP_VALUES,
]) | collapsible_if::COLLAPSIBLE_ELSE_IF,
collapsible_if::COLLAPSIBLE_IF,
collapsible_match::COLLAPSIBLE_MATCH, | random_line_split |
tracesegment.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awsxray // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray"
import (
"encoding/json"
"errors"
"fmt"
)
type CauseType int
const (
// CauseTypeExceptionID indicates that the type of the `cause`
// field is a string
CauseTypeExceptionID CauseType = iota + 1
// CauseTypeObject indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil {
return errors.New(`segment "start_time" can not be nil`)
}
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"`
}
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) | (data []byte) error {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
}
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql field.
type SQLData struct {
ConnectionString *string `json:"connection_string,omitempty"`
URL *string `json:"url,omitempty"` // protocol://host[:port]/database
SanitizedQuery *string `json:"sanitized_query,omitempty"`
DatabaseType *string `json:"database_type,omitempty"`
DatabaseVersion *string `json:"database_version,omitempty"`
DriverVersion *string `json:"driver_version,omitempty"`
User *string `json:"user,omitempty"`
Preparation *string `json:"preparation,omitempty"` // "statement" / "call"
}
// ServiceData provides the shape for unmarshalling the service field.
type ServiceData struct {
Version *string `json:"version,omitempty"`
CompilerVersion *string `json:"compiler_version,omitempty"`
Compiler *string `json:"compiler,omitempty"`
}
// SpanLinkData provides the shape for unmarshalling the span links in the span link field.
type SpanLinkData struct {
TraceID *string `json:"trace_id"`
SpanID *string `json:"id"`
Attributes map[string]interface{} `json:"attributes,omitempty"`
}
| UnmarshalJSON | identifier_name |
tracesegment.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awsxray // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray"
import (
"encoding/json"
"errors"
"fmt"
)
type CauseType int
const (
// CauseTypeExceptionID indicates that the type of the `cause`
// field is a string
CauseTypeExceptionID CauseType = iota + 1
// CauseTypeObject indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil |
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"`
}
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
}
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql field.
type SQLData struct {
ConnectionString *string `json:"connection_string,omitempty"`
URL *string `json:"url,omitempty"` // protocol://host[:port]/database
SanitizedQuery *string `json:"sanitized_query,omitempty"`
DatabaseType *string `json:"database_type,omitempty"`
DatabaseVersion *string `json:"database_version,omitempty"`
DriverVersion *string `json:"driver_version,omitempty"`
User *string `json:"user,omitempty"`
Preparation *string `json:"preparation,omitempty"` // "statement" / "call"
}
// ServiceData provides the shape for unmarshalling the service field.
type ServiceData struct {
Version *string `json:"version,omitempty"`
CompilerVersion *string `json:"compiler_version,omitempty"`
Compiler *string `json:"compiler,omitempty"`
}
// SpanLinkData provides the shape for unmarshalling the span links in the span link field.
type SpanLinkData struct {
TraceID *string `json:"trace_id"`
SpanID *string `json:"id"`
Attributes map[string]interface{} `json:"attributes,omitempty"`
}
| {
return errors.New(`segment "start_time" can not be nil`)
} | conditional_block |
tracesegment.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awsxray // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray"
import (
"encoding/json"
"errors"
"fmt"
)
type CauseType int
const (
// CauseTypeExceptionID indicates that the type of the `cause`
// field is a string
CauseTypeExceptionID CauseType = iota + 1
// CauseTypeObject indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil {
return errors.New(`segment "start_time" can not be nil`)
}
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"` | }
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
}
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql field.
type SQLData struct {
ConnectionString *string `json:"connection_string,omitempty"`
URL *string `json:"url,omitempty"` // protocol://host[:port]/database
SanitizedQuery *string `json:"sanitized_query,omitempty"`
DatabaseType *string `json:"database_type,omitempty"`
DatabaseVersion *string `json:"database_version,omitempty"`
DriverVersion *string `json:"driver_version,omitempty"`
User *string `json:"user,omitempty"`
Preparation *string `json:"preparation,omitempty"` // "statement" / "call"
}
// ServiceData provides the shape for unmarshalling the service field.
type ServiceData struct {
Version *string `json:"version,omitempty"`
CompilerVersion *string `json:"compiler_version,omitempty"`
Compiler *string `json:"compiler,omitempty"`
}
// SpanLinkData provides the shape for unmarshalling the span links in the span link field.
type SpanLinkData struct {
TraceID *string `json:"trace_id"`
SpanID *string `json:"id"`
Attributes map[string]interface{} `json:"attributes,omitempty"`
} | random_line_split | |
tracesegment.go | // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package awsxray // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray"
import (
"encoding/json"
"errors"
"fmt"
)
type CauseType int
const (
// CauseTypeExceptionID indicates that the type of the `cause`
// field is a string
CauseTypeExceptionID CauseType = iota + 1
// CauseTypeObject indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil {
return errors.New(`segment "start_time" can not be nil`)
}
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"`
}
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) UnmarshalJSON(data []byte) error |
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql field.
type SQLData struct {
ConnectionString *string `json:"connection_string,omitempty"`
URL *string `json:"url,omitempty"` // protocol://host[:port]/database
SanitizedQuery *string `json:"sanitized_query,omitempty"`
DatabaseType *string `json:"database_type,omitempty"`
DatabaseVersion *string `json:"database_version,omitempty"`
DriverVersion *string `json:"driver_version,omitempty"`
User *string `json:"user,omitempty"`
Preparation *string `json:"preparation,omitempty"` // "statement" / "call"
}
// ServiceData provides the shape for unmarshalling the service field.
type ServiceData struct {
Version *string `json:"version,omitempty"`
CompilerVersion *string `json:"compiler_version,omitempty"`
Compiler *string `json:"compiler,omitempty"`
}
// SpanLinkData provides the shape for unmarshalling the span links in the span link field.
type SpanLinkData struct {
TraceID *string `json:"trace_id"`
SpanID *string `json:"id"`
Attributes map[string]interface{} `json:"attributes,omitempty"`
}
| {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
} | identifier_body |
gateway.go | // Copyright Project Contour Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"fmt"
"time"
"github.com/projectcontour/contour/internal/k8s"
"github.com/projectcontour/contour/internal/leadership"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
)
type gatewayReconciler struct {
client client.Client
eventHandler cache.ResourceEventHandler
statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) hasMatchingController(obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass |
// TODO: Ensure the gateway by creating manage infrastructure, i.e. the Envoy service.
// xref: https://github.com/projectcontour/contour/issues/3545
r.log.WithField("namespace", oldest.Namespace).WithField("name", oldest.Name).Info("assigning gateway to DAG")
r.eventHandler.OnAdd(oldest, false)
return reconcile.Result{}, nil
}
func isAccepted(gatewayClass *gatewayapi_v1beta1.GatewayClass) bool {
for _, cond := range gatewayClass.Status.Conditions {
if cond.Type == string(gatewayapi_v1beta1.GatewayClassConditionStatusAccepted) && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func setGatewayNotAccepted(gateway *gatewayapi_v1beta1.Gateway) *gatewayapi_v1beta1.Gateway {
newCond := metav1.Condition{
Type: string(gatewayapi_v1beta1.GatewayConditionAccepted),
Status: metav1.ConditionFalse,
Reason: "OlderGatewayExists",
Message: "An older Gateway exists for the accepted GatewayClass",
LastTransitionTime: metav1.NewTime(time.Now()),
ObservedGeneration: gateway.Generation,
}
for i := range gateway.Status.Conditions {
cond := &gateway.Status.Conditions[i]
if cond.Type != string(gatewayapi_v1beta1.GatewayConditionAccepted) {
continue
}
// Update only if something has changed.
if cond.Status != newCond.Status || cond.Reason != newCond.Reason || cond.Message != newCond.Message {
cond.Status = newCond.Status
cond.Reason = newCond.Reason
cond.Message = newCond.Message
cond.LastTransitionTime = newCond.LastTransitionTime
cond.ObservedGeneration = newCond.ObservedGeneration
}
return gateway
}
gateway.Status.Conditions = append(gateway.Status.Conditions, newCond)
return gateway
}
| {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
Mutator: k8s.StatusMutatorFunc(func(obj client.Object) client.Object {
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
panic(fmt.Sprintf("unsupported object type %T", obj))
}
return setGatewayNotAccepted(gw.DeepCopy())
}),
})
} else {
// this branch makes testing easier by not going through the StatusUpdater.
copy := setGatewayNotAccepted(gw.DeepCopy())
if err := r.client.Status().Update(context.Background(), copy); err != nil {
r.log.WithError(err).Error("error updating gateway status")
return reconcile.Result{}, fmt.Errorf("error updating status of gateway %s/%s: %v", gw.Namespace, gw.Name, err)
}
}
} | conditional_block |
gateway.go | // Copyright Project Contour Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"fmt"
"time"
"github.com/projectcontour/contour/internal/k8s"
"github.com/projectcontour/contour/internal/leadership"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
)
type gatewayReconciler struct {
client client.Client
eventHandler cache.ResourceEventHandler
statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) | (obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
Mutator: k8s.StatusMutatorFunc(func(obj client.Object) client.Object {
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
panic(fmt.Sprintf("unsupported object type %T", obj))
}
return setGatewayNotAccepted(gw.DeepCopy())
}),
})
} else {
// this branch makes testing easier by not going through the StatusUpdater.
copy := setGatewayNotAccepted(gw.DeepCopy())
if err := r.client.Status().Update(context.Background(), copy); err != nil {
r.log.WithError(err).Error("error updating gateway status")
return reconcile.Result{}, fmt.Errorf("error updating status of gateway %s/%s: %v", gw.Namespace, gw.Name, err)
}
}
}
// TODO: Ensure the gateway by creating manage infrastructure, i.e. the Envoy service.
// xref: https://github.com/projectcontour/contour/issues/3545
r.log.WithField("namespace", oldest.Namespace).WithField("name", oldest.Name).Info("assigning gateway to DAG")
r.eventHandler.OnAdd(oldest, false)
return reconcile.Result{}, nil
}
func isAccepted(gatewayClass *gatewayapi_v1beta1.GatewayClass) bool {
for _, cond := range gatewayClass.Status.Conditions {
if cond.Type == string(gatewayapi_v1beta1.GatewayClassConditionStatusAccepted) && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func setGatewayNotAccepted(gateway *gatewayapi_v1beta1.Gateway) *gatewayapi_v1beta1.Gateway {
newCond := metav1.Condition{
Type: string(gatewayapi_v1beta1.GatewayConditionAccepted),
Status: metav1.ConditionFalse,
Reason: "OlderGatewayExists",
Message: "An older Gateway exists for the accepted GatewayClass",
LastTransitionTime: metav1.NewTime(time.Now()),
ObservedGeneration: gateway.Generation,
}
for i := range gateway.Status.Conditions {
cond := &gateway.Status.Conditions[i]
if cond.Type != string(gatewayapi_v1beta1.GatewayConditionAccepted) {
continue
}
// Update only if something has changed.
if cond.Status != newCond.Status || cond.Reason != newCond.Reason || cond.Message != newCond.Message {
cond.Status = newCond.Status
cond.Reason = newCond.Reason
cond.Message = newCond.Message
cond.LastTransitionTime = newCond.LastTransitionTime
cond.ObservedGeneration = newCond.ObservedGeneration
}
return gateway
}
gateway.Status.Conditions = append(gateway.Status.Conditions, newCond)
return gateway
}
| hasMatchingController | identifier_name |
gateway.go | // Copyright Project Contour Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"fmt"
"time"
"github.com/projectcontour/contour/internal/k8s"
"github.com/projectcontour/contour/internal/leadership"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
)
type gatewayReconciler struct {
client client.Client
eventHandler cache.ResourceEventHandler
statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) hasMatchingController(obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
Mutator: k8s.StatusMutatorFunc(func(obj client.Object) client.Object {
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
panic(fmt.Sprintf("unsupported object type %T", obj))
}
return setGatewayNotAccepted(gw.DeepCopy())
}),
})
} else {
// this branch makes testing easier by not going through the StatusUpdater.
copy := setGatewayNotAccepted(gw.DeepCopy())
if err := r.client.Status().Update(context.Background(), copy); err != nil {
r.log.WithError(err).Error("error updating gateway status")
return reconcile.Result{}, fmt.Errorf("error updating status of gateway %s/%s: %v", gw.Namespace, gw.Name, err)
}
}
}
// TODO: Ensure the gateway by creating manage infrastructure, i.e. the Envoy service.
// xref: https://github.com/projectcontour/contour/issues/3545
r.log.WithField("namespace", oldest.Namespace).WithField("name", oldest.Name).Info("assigning gateway to DAG")
r.eventHandler.OnAdd(oldest, false)
return reconcile.Result{}, nil
}
| if cond.Type == string(gatewayapi_v1beta1.GatewayClassConditionStatusAccepted) && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func setGatewayNotAccepted(gateway *gatewayapi_v1beta1.Gateway) *gatewayapi_v1beta1.Gateway {
newCond := metav1.Condition{
Type: string(gatewayapi_v1beta1.GatewayConditionAccepted),
Status: metav1.ConditionFalse,
Reason: "OlderGatewayExists",
Message: "An older Gateway exists for the accepted GatewayClass",
LastTransitionTime: metav1.NewTime(time.Now()),
ObservedGeneration: gateway.Generation,
}
for i := range gateway.Status.Conditions {
cond := &gateway.Status.Conditions[i]
if cond.Type != string(gatewayapi_v1beta1.GatewayConditionAccepted) {
continue
}
// Update only if something has changed.
if cond.Status != newCond.Status || cond.Reason != newCond.Reason || cond.Message != newCond.Message {
cond.Status = newCond.Status
cond.Reason = newCond.Reason
cond.Message = newCond.Message
cond.LastTransitionTime = newCond.LastTransitionTime
cond.ObservedGeneration = newCond.ObservedGeneration
}
return gateway
}
gateway.Status.Conditions = append(gateway.Status.Conditions, newCond)
return gateway
} | func isAccepted(gatewayClass *gatewayapi_v1beta1.GatewayClass) bool {
for _, cond := range gatewayClass.Status.Conditions { | random_line_split |
gateway.go | // Copyright Project Contour Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"fmt"
"time"
"github.com/projectcontour/contour/internal/k8s"
"github.com/projectcontour/contour/internal/leadership"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
)
type gatewayReconciler struct {
client client.Client
eventHandler cache.ResourceEventHandler
statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) |
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) hasMatchingController(obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
Mutator: k8s.StatusMutatorFunc(func(obj client.Object) client.Object {
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
panic(fmt.Sprintf("unsupported object type %T", obj))
}
return setGatewayNotAccepted(gw.DeepCopy())
}),
})
} else {
// this branch makes testing easier by not going through the StatusUpdater.
copy := setGatewayNotAccepted(gw.DeepCopy())
if err := r.client.Status().Update(context.Background(), copy); err != nil {
r.log.WithError(err).Error("error updating gateway status")
return reconcile.Result{}, fmt.Errorf("error updating status of gateway %s/%s: %v", gw.Namespace, gw.Name, err)
}
}
}
// TODO: Ensure the gateway by creating manage infrastructure, i.e. the Envoy service.
// xref: https://github.com/projectcontour/contour/issues/3545
r.log.WithField("namespace", oldest.Namespace).WithField("name", oldest.Name).Info("assigning gateway to DAG")
r.eventHandler.OnAdd(oldest, false)
return reconcile.Result{}, nil
}
func isAccepted(gatewayClass *gatewayapi_v1beta1.GatewayClass) bool {
for _, cond := range gatewayClass.Status.Conditions {
if cond.Type == string(gatewayapi_v1beta1.GatewayClassConditionStatusAccepted) && cond.Status == metav1.ConditionTrue {
return true
}
}
return false
}
func setGatewayNotAccepted(gateway *gatewayapi_v1beta1.Gateway) *gatewayapi_v1beta1.Gateway {
newCond := metav1.Condition{
Type: string(gatewayapi_v1beta1.GatewayConditionAccepted),
Status: metav1.ConditionFalse,
Reason: "OlderGatewayExists",
Message: "An older Gateway exists for the accepted GatewayClass",
LastTransitionTime: metav1.NewTime(time.Now()),
ObservedGeneration: gateway.Generation,
}
for i := range gateway.Status.Conditions {
cond := &gateway.Status.Conditions[i]
if cond.Type != string(gatewayapi_v1beta1.GatewayConditionAccepted) {
continue
}
// Update only if something has changed.
if cond.Status != newCond.Status || cond.Reason != newCond.Reason || cond.Message != newCond.Message {
cond.Status = newCond.Status
cond.Reason = newCond.Reason
cond.Message = newCond.Message
cond.LastTransitionTime = newCond.LastTransitionTime
cond.ObservedGeneration = newCond.ObservedGeneration
}
return gateway
}
gateway.Status.Conditions = append(gateway.Status.Conditions, newCond)
return gateway
}
| {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
} | identifier_body |
FieldClassifierAndKeywords.py | #coding=utf-8
import sys
import os
import subprocess
import jieba
import string
import re
import math
import codecs
import time
import json
class FieldClassifierAndKeywords:
def __init__(self):
words = jieba.cut("我是谁", cut_all=False)
def FieldClassifierAndKeywords(self,question):
##读入问题,调用分词工具分词,同时去除标点符号
delset = string.punctuation
question = question.translate(None, delset)
questionTag = self.typeClassify(question)
f = open("input.txt","w")
words = jieba.cut(question, cut_all = False)
s = ""
for i in words:
s = s+i.encode('utf-8')+" "
f.write(s)
f.close()
command = ["stanford-postagger-full-2015-12-09/stanford-postagger.sh",
'stanford-postagger-full-2015-12-09/models/chinese-distsim.tagger', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1
else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summation/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
return "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
| while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1]
sm =""
st =""
s = LCSsequence(tag,maxtag,sm,st)
print s
t1 = s[1].split()
t2 = s[2].split()
dict = {}
for i in range(0,len(t1)):
dict[words[int(t2[i])]] = maxweight[int(t1[i])]
return dict
def LCSsequence(List1, List2, s1,s2):
if len(List1) == 0 or len(List2) == 0:
return (0,s1,s2)
if List1[-1:] == List2[-1:]:
i = str(len(List2[:-1]))
j = str(len(List1[:-1]))
s =LCSsequence(List1[:-1], List2[:-1],s1,s2)
return (s[0]+1, s[1]+" "+ i, s[2] + " " +j)
else:
ss1 = LCSsequence(List1[:-1], List2,s1,s2)
ss2 = LCSsequence(List1, List2[:-1],s1,s2)
if ss1[0]>ss2[0]:
return ss1
else:
return ss2
def SimilarityComparison( targetList, MatchingList):
# targetList is a list of pos tag from query, MatchingSet is a list of list
c = LCS(targetList,MatchingList)
similarity = (float(c)/len(MatchingList))
return similarity
def LCS(List1, List2):
if len(List1) == 0 or len(List2) ==0:
return 0
if List1[-1:] == List2[-1:]:
return LCS(List1[:-1], List2[:-1])+1
else:
return max(LCS(List1[:-1], List2),LCS(List1, List2[:-1]))
if __name__ == '__main__':
l1 = ["NN","NR","AS","NN","NR","SD"]
l2 = [["NN","NR","QW","AS","WE","SD","AS"],["NN","QW","QS","SD"],["NN","NR","AS","NR","AS","QW","NN","QS","SD"],["NN","NR","AS","QW","QS","SD"]]
#SimilarityComparison(l1,l2)
seq=""
s2= ""
s = wordWithWeight2()
print s
#S = LCSsequence(l1,l2[2],seq,s2)
| if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
| identifier_body |
FieldClassifierAndKeywords.py | #coding=utf-8
import sys
import os
import subprocess
import jieba
import string
import re
import math
import codecs
import time
import json
class FieldClassifierAndKeywords:
def __init__(self):
words = jieba.cut("我是谁", cut_all=False)
def FieldClassifierAndKeywords(self,question):
##读入问题,调用分词工具分词,同时去除标点符号
delset = string.punctuation
question = question.translate(None, delset)
questionTag = self.typeClassify(question)
f = open("input.txt","w")
words = jieba.cut(question, cut_all = False)
s = ""
for i in words:
s = s+i.encode('utf-8')+" "
f.write(s)
f.close()
command = ["stanford-postagger-full-2015-12-09/stanford-postagger.sh",
'stanford-postagger-full-2015-12-09/models/chinese-distsim.tagger', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1 | else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summation/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
return "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1]
sm =""
st =""
s = LCSsequence(tag,maxtag,sm,st)
print s
t1 = s[1].split()
t2 = s[2].split()
dict = {}
for i in range(0,len(t1)):
dict[words[int(t2[i])]] = maxweight[int(t1[i])]
return dict
def LCSsequence(List1, List2, s1,s2):
if len(List1) == 0 or len(List2) == 0:
return (0,s1,s2)
if List1[-1:] == List2[-1:]:
i = str(len(List2[:-1]))
j = str(len(List1[:-1]))
s =LCSsequence(List1[:-1], List2[:-1],s1,s2)
return (s[0]+1, s[1]+" "+ i, s[2] + " " +j)
else:
ss1 = LCSsequence(List1[:-1], List2,s1,s2)
ss2 = LCSsequence(List1, List2[:-1],s1,s2)
if ss1[0]>ss2[0]:
return ss1
else:
return ss2
def SimilarityComparison( targetList, MatchingList):
# targetList is a list of pos tag from query, MatchingSet is a list of list
c = LCS(targetList,MatchingList)
similarity = (float(c)/len(MatchingList))
return similarity
def LCS(List1, List2):
if len(List1) == 0 or len(List2) ==0:
return 0
if List1[-1:] == List2[-1:]:
return LCS(List1[:-1], List2[:-1])+1
else:
return max(LCS(List1[:-1], List2),LCS(List1, List2[:-1]))
if __name__ == '__main__':
l1 = ["NN","NR","AS","NN","NR","SD"]
l2 = [["NN","NR","QW","AS","WE","SD","AS"],["NN","QW","QS","SD"],["NN","NR","AS","NR","AS","QW","NN","QS","SD"],["NN","NR","AS","QW","QS","SD"]]
#SimilarityComparison(l1,l2)
seq=""
s2= ""
s = wordWithWeight2()
print s
#S = LCSsequence(l1,l2[2],seq,s2) | random_line_split | |
FieldClassifierAndKeywords.py | #coding=utf-8
import sys
import os
import subprocess
import jieba
import string
import re
import math
import codecs
import time
import json
class FieldClassifierAndKeywords:
def __init__(self):
words = jieba.cut("我是谁", cut_all=False)
def FieldClassifierAndKeywords(self,question):
##读入问题,调用分词工具分词,同时去除标点符号
delset = string.punctuation
question = question.translate(None, delset)
questionTag = self.typeClassify(question)
f = open("input.txt","w")
words = jieba.cut(question, cut_all = False)
s = ""
for i in words:
s = s+i.encode('utf-8')+" "
f.write(s)
f.close()
command = ["stanford-postagger-full-2015-12-09/stanford-postagger.sh",
'stanford-postagger-full-2015-12-09/models/chinese-distsim.tagger', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1
else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summat | urn "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1]
sm =""
st =""
s = LCSsequence(tag,maxtag,sm,st)
print s
t1 = s[1].split()
t2 = s[2].split()
dict = {}
for i in range(0,len(t1)):
dict[words[int(t2[i])]] = maxweight[int(t1[i])]
return dict
def LCSsequence(List1, List2, s1,s2):
if len(List1) == 0 or len(List2) == 0:
return (0,s1,s2)
if List1[-1:] == List2[-1:]:
i = str(len(List2[:-1]))
j = str(len(List1[:-1]))
s =LCSsequence(List1[:-1], List2[:-1],s1,s2)
return (s[0]+1, s[1]+" "+ i, s[2] + " " +j)
else:
ss1 = LCSsequence(List1[:-1], List2,s1,s2)
ss2 = LCSsequence(List1, List2[:-1],s1,s2)
if ss1[0]>ss2[0]:
return ss1
else:
return ss2
def SimilarityComparison( targetList, MatchingList):
# targetList is a list of pos tag from query, MatchingSet is a list of list
c = LCS(targetList,MatchingList)
similarity = (float(c)/len(MatchingList))
return similarity
def LCS(List1, List2):
if len(List1) == 0 or len(List2) ==0:
return 0
if List1[-1:] == List2[-1:]:
return LCS(List1[:-1], List2[:-1])+1
else:
return max(LCS(List1[:-1], List2),LCS(List1, List2[:-1]))
if __name__ == '__main__':
l1 = ["NN","NR","AS","NN","NR","SD"]
l2 = [["NN","NR","QW","AS","WE","SD","AS"],["NN","QW","QS","SD"],["NN","NR","AS","NR","AS","QW","NN","QS","SD"],["NN","NR","AS","QW","QS","SD"]]
#SimilarityComparison(l1,l2)
seq=""
s2= ""
s = wordWithWeight2()
print s
#S = LCSsequence(l1,l2[2],seq,s2)
| ion/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
ret | conditional_block |
FieldClassifierAndKeywords.py | #coding=utf-8
import sys
import os
import subprocess
import jieba
import string
import re
import math
import codecs
import time
import json
class | :
def __init__(self):
words = jieba.cut("我是谁", cut_all=False)
def FieldClassifierAndKeywords(self,question):
##读入问题,调用分词工具分词,同时去除标点符号
delset = string.punctuation
question = question.translate(None, delset)
questionTag = self.typeClassify(question)
f = open("input.txt","w")
words = jieba.cut(question, cut_all = False)
s = ""
for i in words:
s = s+i.encode('utf-8')+" "
f.write(s)
f.close()
command = ["stanford-postagger-full-2015-12-09/stanford-postagger.sh",
'stanford-postagger-full-2015-12-09/models/chinese-distsim.tagger', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1
else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summation/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
return "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1]
sm =""
st =""
s = LCSsequence(tag,maxtag,sm,st)
print s
t1 = s[1].split()
t2 = s[2].split()
dict = {}
for i in range(0,len(t1)):
dict[words[int(t2[i])]] = maxweight[int(t1[i])]
return dict
def LCSsequence(List1, List2, s1,s2):
if len(List1) == 0 or len(List2) == 0:
return (0,s1,s2)
if List1[-1:] == List2[-1:]:
i = str(len(List2[:-1]))
j = str(len(List1[:-1]))
s =LCSsequence(List1[:-1], List2[:-1],s1,s2)
return (s[0]+1, s[1]+" "+ i, s[2] + " " +j)
else:
ss1 = LCSsequence(List1[:-1], List2,s1,s2)
ss2 = LCSsequence(List1, List2[:-1],s1,s2)
if ss1[0]>ss2[0]:
return ss1
else:
return ss2
def SimilarityComparison( targetList, MatchingList):
# targetList is a list of pos tag from query, MatchingSet is a list of list
c = LCS(targetList,MatchingList)
similarity = (float(c)/len(MatchingList))
return similarity
def LCS(List1, List2):
if len(List1) == 0 or len(List2) ==0:
return 0
if List1[-1:] == List2[-1:]:
return LCS(List1[:-1], List2[:-1])+1
else:
return max(LCS(List1[:-1], List2),LCS(List1, List2[:-1]))
if __name__ == '__main__':
l1 = ["NN","NR","AS","NN","NR","SD"]
l2 = [["NN","NR","QW","AS","WE","SD","AS"],["NN","QW","QS","SD"],["NN","NR","AS","NR","AS","QW","NN","QS","SD"],["NN","NR","AS","QW","QS","SD"]]
#SimilarityComparison(l1,l2)
seq=""
s2= ""
s = wordWithWeight2()
print s
#S = LCSsequence(l1,l2[2],seq,s2)
| FieldClassifierAndKeywords | identifier_name |
index.js | import React, { Component } from 'react'
import { Link } from 'react-router'
import { connect } from 'react-redux'
import api from 'api/api'
import Tip from 'component/pagemsg/tip'
import Tipshowend from 'component/pagemsg/tipshowend'
import css from 'css/indexpage'
import companylogo from 'images/companylogo.png'
import jijiangkaibo from 'images/jijiangkaibo.png'
import zhibo from 'images/zhibo.jpg'
import chongbo from 'images/chongbo.jpg'
import loadingimg2 from 'images/loading2.gif'
class IndexPage extends Component{
constructor(props){
super(props);
this.state = {
list : [],
liststatus : 'pending',
chuangzuolist : {},
translateY : 0
}
this.page = 1;
this.total = '';
this.token = this.props.userstate.token || window.localStorage.getItem('token');
this.datatype = 2;
this.create_time = '';
this.remain = true;
this.touchY = 0;
this.translateY = 0;
this.istouchmove = false;
this.updatamsgshow = false;
this.componentStatus = true;
}
componentDidMount(){
/*获取app 所需要信息*/
let token = api.getLocalStorage('token'), mobile = api.getLocalStorage('mobile');
api.FetchGet('/hyb-stu/stu_my/base',{
token : token
}).then((resapp)=>{
let nickname = resapp.data.mobile ? resapp.data.mobile : '游客'+api.setmd5(token).substring(5 ,10),
bbs_icon = resapp.data.bbs_icon ? resapp.data.bbs_icon : '',
stu_id = resapp.data.im_id ? resapp.data.im_id.split('#')[0] : '';
nickname = nickname.substring(0,3)+'xxxx'+nickname.substring(7);
api.webview("getlogindata?param={\"stu_name\":\""+resapp.data.nick_name+"\" ,\"stu_id\":\""+stu_id+"\" ,\"interview_im_sig\":\""+resapp.data.interview_im_sig+"\" ,\"interview_im_id\":\""+resapp.data.interview_im_id+"\" ,\"token\":\""+token+"\" ,\"login_id\":\""+mobile+"\" ,\"nick_name\":\""+nickname+"\" ,\"im_sign\":\""+resapp.data.im_sig+"\" ,\"user_icon_url\":\""+bbs_icon+"\" ,\"txy_sign\":\""+resapp.data.file_sig+"\" ,\"im_identifier\":\""+resapp.data.im_id+"\"}");
});
this.getlist();
window.scrollTo(0,0);
}
componentWillUnmou | entStatus = false;
document.removeEventListener('scroll',this.scroll);
}
appzhibocallback=()=>{
this.page = 1;
this.getlist();
}
getlist=()=>{
this.page==1 ? this.setState({ "liststatus" : "pending" ,"list" : [] }) : this.setState({ "liststatus" : "pending" });
setTimeout(()=>{
if(this.datatype==1){ /*创作*/
api.FetchPost('/hyb-stu/stu_user_hot_point/find_main_article',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page_size : this.page ,create_time : this.create_time})
}).then(({res})=>{
if(this.page>1 && api.isEmptyObject(res.data.article_map)){
return false;
}
this.remain = res.data.remain;
if(!api.isEmptyObject(res.data.article_map)){
if(api.isEmptyObject(this.state.chuangzuolist)){
this.setState({ "chuangzuolist" : res.data.article_map ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
let list = Object.assign({}, this.state.chuangzuolist);
let newlist = {};
Object.keys(list).map((key)=>{
Object.keys(res.data.article_map).map((key2)=>{
if(!list[key2]){
if(newlist[key2]){
newlist[key2] = [];
}
newlist[key2] = res.data.article_map[key2];
}else{
if(!newlist[key]){
newlist[key] = [];
}
if(res.data.article_map[key]!=void 0){
newlist[key] = list[key].concat(res.data.article_map[key]);
}else{
newlist[key] = list[key];
}
}
});
});
this.setState({ "chuangzuolist" : Object.assign(newlist, list) ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
let last = Object.keys(res.data.article_map)[Object.keys(res.data.article_map).length-1];
last = res.data.article_map[last];
last = last[last.length-1];
this.create_time = last.create_time;
}else{
this.setState({ "liststatus" : 'nodata' });
}
});
}else if(this.datatype==2){ /*直播*/
api.FetchPost('/hyb-stu/stu_talk/list',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page : this.page ,size : 10 })
}).then(({res})=>{
this.total = res.data.total;
if(this.page==1){
if(res.data.list.length){
this.componentStatus && this.setState({ "list" : res.data.list ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
this.componentStatus && this.setState({ "list" : [] ,"liststatus" : "nodata" });
}
}else{
this.componentStatus && this.setState({ "list" : this.state.list.concat(res.data.list) ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
});
}
},400);
}
scroll=(event)=>{
let scrolltop = document.documentElement.scrollTop || document.body.scrollTop;
let el = '';
if(this.datatype==2){
el = document.querySelectorAll('ul.livelist li:last-child')[0];
}
if(this.datatype==1){
el = document.querySelectorAll('.box2:last-child')[0];
}
if(!el){
return;
}
if(this.datatype==2 && this.page>=this.total){ /*直播*/
return;
}
if(this.datatype==1 && !this.remain){ /*创作*/
return;
}
if(this.state.liststatus!='pending'){
if(scrolltop + window.innerHeight + 10 >= Math.ceil(document.body.scrollHeight)){
++this.page;
this.getlist();
}
}
}
openNavOnnLive=(event)=>{
const id = api.closest(event.target ,'li').getAttribute('data-id');
const actiontype = "looklivejump?param={\"token\":\""+this.token+"\" ,\"meeting_id\":\""+id+"\" }";
api.webview(actiontype);
}
changetype=(event)=>{
if(!event.target.classList.contains('on')){
this.datatype = event.target.getAttribute('data-type');
let li = api.closest(event.target ,'ul').querySelectorAll('li');
for(let item of li){
item.classList.remove('on');
}
event.target.classList.add('on');
this.page = 1;
this.create_time = '';
this.getlist();
}
}
actionzan=(event)=>{
const el = api.closest(event.target ,'span') ,el_i = el.querySelectorAll('i')[0] ,div = api.closest(event.target ,'div.box');
let praise_type = 0;
if(el_i.classList.contains('on')){
praise_type = 1;
}
api.FetchPost('/hyb-stu/stu_user_hot_point/praise_count_inc',{
UserKey : this.props.userstate.userKey,
token : this.props.userstate.token,
body : JSON.stringify({
praise_type : praise_type,
hot_point_id : el.getAttribute('data-id'),
hot_point_user_id : el.getAttribute('data-userid')
})
}).then(({res})=>{
if(praise_type==1){
el.querySelectorAll('em')[0].textContent = --el.querySelectorAll('em')[0].textContent;
el_i.classList.remove('on');
}else{
el.querySelectorAll('em')[0].textContent = ++el.querySelectorAll('em')[0].textContent;
el_i.classList.add('on');
}
})
}
chuangzuo=()=>{
return <div className="indexdynamic">
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" />
: this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" />
: this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" />
: Object.keys(this.state.chuangzuolist).sort().reverse().map((key ,index)=>{
return <div key={key} className="dynamic">
<h1>{key}</h1>
{
this.state.chuangzuolist[key].map((item ,index)=>{
return <div className="box2" key={index} data-key={key} data-id={item.hot_point_id}>
<div className="boxhd2">
<img src={item.icon ? item.icon : usericonimg} />
<p>
<Link to={{ "pathname" : "/creationsocial" ,state : { "pointid" : item.hot_point_id } }} >
<span>{item.name}</span>
<span>{item.collection_time}</span>
</Link>
</p>
</div>
<div className="boxbd2">
<ul className="imglist">
<li className="long">
{
item.cover_picture ? <Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}><img src={item.cover_picture} /></Link> : ''
}
<Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}>
<p>
<em>{item.summary.length > 30 ? api.substring(item.summary ,30 ,'...') : item.summary}</em>
{item.summary.length > 30 ? <span>【长文】</span> : ''}
</p>
</Link>
</li>
</ul>
</div>
<div className="ft2">
<span><i className="icon b"></i>{item.comment_count}</span>
<span onClick={this.actionzan} data-status={item.praise_status} data-id={item.hot_point_id} data-userid={item.user_id}><i className={item.praise_status==1 ? "icon a on" : "icon a"}></i><em>{item.praise_count}</em></span>
</div>
</div>
})
}
</div>
})
}
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
handleBind = (event)=>{
if(event.type=='touchstart'){
this.touchStart(event);
}else if(event.type=='touchmove'){
this.touchMove(event);
}
}
touchStart = (event)=>{
this.touchY = event.targetTouches[0].pageY;
}
touchMove = (event)=>{
let dir = event.targetTouches[0].pageY - this.touchY ,translateY = 0 ,direction = dir > 0 ? 1 : -1;
const scrollY = document.documentElement.scrollTop || document.body.scrollTop;
const end = ()=>{
if(this.state.translateY>20){
this.appzhibocallback();
setTimeout(()=>{
this.refs.updatamsg.innerHTML = '下拉即可刷新';
},320);
}
this.setState({ "translateY" : 0 });
this.istouchmove = false;
this.updatamsgshow = false;
window.removeEventListener('touchend' ,end);
}
if(direction>0 && scrollY<=0){
translateY = Math.min(dir, 35) / 2 + Math.max(0, dir - 35);
if(translateY>10){
this.updatamsgshow = true;
}
if(translateY>23){
this.refs.updatamsg.innerHTML = '释放即可刷新';
}
if(!this.istouchmove){
window.addEventListener('touchend' ,end ,false);
}
this.setState({ "translateY" : api.damping(translateY) });
this.istouchmove = true;
}
}
zhibo=()=>{
let style = { transform : `translateY(${this.state.translateY}px)` },
style1 = this.updatamsgshow ? { visibility : "visible" ,transform : `translateY(${this.state.translateY/6}px)` } : { transform : `translateY(${this.state.translateY/6}px)` };
return <div className="box" onTouchStart={this.handleBind} onTouchMove={this.handleBind}>
<div className="updatamsg" style={style1}><img src={loadingimg2} /><b ref="updatamsg" >下拉即可刷新</b></div>
<ul className="livelist" style={style}>
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" /> : '' }
{ this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" /> : '' }
{ this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" /> : '' }
{
this.state.list.map((item ,index)=>{
return item.type==0 ? '' : <li data-channel={item.channel_id} data-id={item.id} key={index} onClick={this.openNavOnnLive}>
<p className="title">
<label><img src={item.logo ? item.logo : companylogo} /></label>
<span><em>{item.full_name}</em><em><i className="icon3"></i>开播时间:{item.start_time}</em></span>
</p>
<p className="bo">
{
item.live_status==0
? <label><img src={jijiangkaibo} /><em>即将开播</em></label>
: item.live_status==1 ? <label><img src={item.preview_url ? item.preview_url : zhibo} /><em><i className="onlive"></i>直播中</em></label>
: item.live_status==2 ? <label><img src={item.preview_url ? item.preview_url : chongbo} /><em>观看重播</em></label>
: ''
}
<b className="detail"><i className="icon3"></i>{item.theme}</b>
</p>
</li>
})
}
</ul>
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
/*<li onClick={this.changetype} data-type="0">热点</li> <i className="icon jia"></i>*/
/* <li onClick={this.changetype} data-type="1" className="on">创作</li> */
render(){
return(
<div className="indexPage">
<div className="hd">
<ul>
<li onClick={this.changetype} data-type="2">直播</li>
</ul>
</div>
{
this.datatype==1 ? this.chuangzuo() : this.zhibo()
}
</div>
)
}
}
const mapStateToProps = (state ,ownProps) =>{
return {
userstate : state.UserState
}
}
IndexPage = connect(mapStateToProps)(IndexPage)
export default IndexPage | nt(){
this.compon | identifier_name |
index.js | import React, { Component } from 'react'
import { Link } from 'react-router'
import { connect } from 'react-redux'
import api from 'api/api'
import Tip from 'component/pagemsg/tip'
import Tipshowend from 'component/pagemsg/tipshowend'
import css from 'css/indexpage'
import companylogo from 'images/companylogo.png'
import jijiangkaibo from 'images/jijiangkaibo.png'
import zhibo from 'images/zhibo.jpg'
import chongbo from 'images/chongbo.jpg'
import loadingimg2 from 'images/loading2.gif'
class IndexPage extends Component{
constructor(props){
super(props);
this.state = {
list : [],
liststatus : 'pending',
chuangzuolist : {},
translateY : 0
}
this.page = 1;
this.total = '';
this.token = this.props.userstate.token || window.localStorage.getItem('token');
this.datatype = 2;
this.create_time = '';
this.remain = true;
this.touchY = 0;
this.translateY = 0;
this.istouchmove = false;
this.updatamsgshow = false;
this.componentStatus = true;
}
componentDidMount(){
/*获取app 所需要信息*/
let token = api.getLocalStorage('token'), mobile = api.getLocalStorage('mobile');
api.FetchGet('/hyb-stu/stu_my/base',{
token : token
}).then((resapp)=>{
let nickname = resapp.data.mobile ? resapp.data.mobile : '游客'+api.setmd5(token).substring(5 ,10),
bbs_icon = resapp.data.bbs_icon ? resapp.data.bbs_icon : '',
stu_id = resapp.data.im_id ? resapp.data.im_id.split('#')[0] : '';
nickname = nickname.substring(0,3)+'xxxx'+nickname.substring(7);
api.webview("getlogindata?param={\"stu_name\":\""+resapp.data.nick_name+"\" ,\"stu_id\":\""+stu_id+"\" ,\"interview_im_sig\":\""+resapp.data.interview_im_sig+"\" ,\"interview_im_id\":\""+resapp.data.interview_im_id+"\" ,\"token\":\""+token+"\" ,\"login_id\":\""+mobile+"\" ,\"nick_name\":\""+nickname+"\" ,\"im_sign\":\""+resapp.data.im_sig+"\" ,\"user_icon_url\":\""+bbs_icon+"\" ,\"txy_sign\":\""+resapp.data.file_sig+"\" ,\"im_identifier\":\""+resapp.data.im_id+"\"}");
});
this.getlist();
window.scrollTo(0,0);
}
componentWillUnmount(){
this.componentStatus = false;
document.removeEventListener('scroll',this.scroll);
}
appzhibocallback=()=>{
this.page = 1;
this.getlist();
}
getlist=()=>{
this.page==1 ? this.setState({ "liststatus" : "pending" ,"list" : [] }) : this.setState({ "liststatus" : "pending" });
setTimeout(()=>{
if(this.datatype==1){ /*创作*/
api.FetchPost('/hyb-stu/stu_user_hot_point/find_main_article',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page_size : this.page ,create_time : this.create_time})
}).then(({res})=>{
if(this.page>1 && api.isEmptyObject(res.data.article_map)){
return false;
}
this.remain = res.data.remain;
if(!api.isEmptyObject(res.data.article_map)){
if(api.isEmptyObject(this.state.chuangzuolist)){
this.setState({ "chuangzuolist" : res.data.article_map ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
let list = Object.assign({}, this.state.chuangzuolist);
let newlist = {};
Object.keys(list).map((key)=>{
Object.keys(res.data.article_map).map((key2)=>{
if(!list[key2]){
if(newlist[key2]){
newlist[key2] = [];
}
newlist[key2] = res.data.article_map[key2];
}else{
if(!newlist[key]){
newlist[key] = [];
}
if(res.data.article_map[key]!=void 0){
newlist[key] = list[key].concat(res.data.article_map[key]);
}else{
newlist[key] = list[key];
}
}
});
});
this.setState({ "chuangzuolist" : Object.assign(newlist, list) ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
let last = Object.keys(res.data.article_map)[Object.keys(res.data.article_map).length-1];
last = res.data.article_map[last];
last = last[last.length-1];
this.create_time = last.create_time;
}else{
this.setState({ "liststatus" : 'nodata' });
}
});
}else if(this.datatype==2){ /*直播*/
api.Fetc | event)=>{
let scrolltop = document.documentElement.scrollTop || document.body.scrollTop;
let el = '';
if(this.datatype==2){
el = document.querySelectorAll('ul.livelist li:last-child')[0];
}
if(this.datatype==1){
el = document.querySelectorAll('.box2:last-child')[0];
}
if(!el){
return;
}
if(this.datatype==2 && this.page>=this.total){ /*直播*/
return;
}
if(this.datatype==1 && !this.remain){ /*创作*/
return;
}
if(this.state.liststatus!='pending'){
if(scrolltop + window.innerHeight + 10 >= Math.ceil(document.body.scrollHeight)){
++this.page;
this.getlist();
}
}
}
openNavOnnLive=(event)=>{
const id = api.closest(event.target ,'li').getAttribute('data-id');
const actiontype = "looklivejump?param={\"token\":\""+this.token+"\" ,\"meeting_id\":\""+id+"\" }";
api.webview(actiontype);
}
changetype=(event)=>{
if(!event.target.classList.contains('on')){
this.datatype = event.target.getAttribute('data-type');
let li = api.closest(event.target ,'ul').querySelectorAll('li');
for(let item of li){
item.classList.remove('on');
}
event.target.classList.add('on');
this.page = 1;
this.create_time = '';
this.getlist();
}
}
actionzan=(event)=>{
const el = api.closest(event.target ,'span') ,el_i = el.querySelectorAll('i')[0] ,div = api.closest(event.target ,'div.box');
let praise_type = 0;
if(el_i.classList.contains('on')){
praise_type = 1;
}
api.FetchPost('/hyb-stu/stu_user_hot_point/praise_count_inc',{
UserKey : this.props.userstate.userKey,
token : this.props.userstate.token,
body : JSON.stringify({
praise_type : praise_type,
hot_point_id : el.getAttribute('data-id'),
hot_point_user_id : el.getAttribute('data-userid')
})
}).then(({res})=>{
if(praise_type==1){
el.querySelectorAll('em')[0].textContent = --el.querySelectorAll('em')[0].textContent;
el_i.classList.remove('on');
}else{
el.querySelectorAll('em')[0].textContent = ++el.querySelectorAll('em')[0].textContent;
el_i.classList.add('on');
}
})
}
chuangzuo=()=>{
return <div className="indexdynamic">
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" />
: this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" />
: this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" />
: Object.keys(this.state.chuangzuolist).sort().reverse().map((key ,index)=>{
return <div key={key} className="dynamic">
<h1>{key}</h1>
{
this.state.chuangzuolist[key].map((item ,index)=>{
return <div className="box2" key={index} data-key={key} data-id={item.hot_point_id}>
<div className="boxhd2">
<img src={item.icon ? item.icon : usericonimg} />
<p>
<Link to={{ "pathname" : "/creationsocial" ,state : { "pointid" : item.hot_point_id } }} >
<span>{item.name}</span>
<span>{item.collection_time}</span>
</Link>
</p>
</div>
<div className="boxbd2">
<ul className="imglist">
<li className="long">
{
item.cover_picture ? <Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}><img src={item.cover_picture} /></Link> : ''
}
<Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}>
<p>
<em>{item.summary.length > 30 ? api.substring(item.summary ,30 ,'...') : item.summary}</em>
{item.summary.length > 30 ? <span>【长文】</span> : ''}
</p>
</Link>
</li>
</ul>
</div>
<div className="ft2">
<span><i className="icon b"></i>{item.comment_count}</span>
<span onClick={this.actionzan} data-status={item.praise_status} data-id={item.hot_point_id} data-userid={item.user_id}><i className={item.praise_status==1 ? "icon a on" : "icon a"}></i><em>{item.praise_count}</em></span>
</div>
</div>
})
}
</div>
})
}
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
handleBind = (event)=>{
if(event.type=='touchstart'){
this.touchStart(event);
}else if(event.type=='touchmove'){
this.touchMove(event);
}
}
touchStart = (event)=>{
this.touchY = event.targetTouches[0].pageY;
}
touchMove = (event)=>{
let dir = event.targetTouches[0].pageY - this.touchY ,translateY = 0 ,direction = dir > 0 ? 1 : -1;
const scrollY = document.documentElement.scrollTop || document.body.scrollTop;
const end = ()=>{
if(this.state.translateY>20){
this.appzhibocallback();
setTimeout(()=>{
this.refs.updatamsg.innerHTML = '下拉即可刷新';
},320);
}
this.setState({ "translateY" : 0 });
this.istouchmove = false;
this.updatamsgshow = false;
window.removeEventListener('touchend' ,end);
}
if(direction>0 && scrollY<=0){
translateY = Math.min(dir, 35) / 2 + Math.max(0, dir - 35);
if(translateY>10){
this.updatamsgshow = true;
}
if(translateY>23){
this.refs.updatamsg.innerHTML = '释放即可刷新';
}
if(!this.istouchmove){
window.addEventListener('touchend' ,end ,false);
}
this.setState({ "translateY" : api.damping(translateY) });
this.istouchmove = true;
}
}
zhibo=()=>{
let style = { transform : `translateY(${this.state.translateY}px)` },
style1 = this.updatamsgshow ? { visibility : "visible" ,transform : `translateY(${this.state.translateY/6}px)` } : { transform : `translateY(${this.state.translateY/6}px)` };
return <div className="box" onTouchStart={this.handleBind} onTouchMove={this.handleBind}>
<div className="updatamsg" style={style1}><img src={loadingimg2} /><b ref="updatamsg" >下拉即可刷新</b></div>
<ul className="livelist" style={style}>
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" /> : '' }
{ this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" /> : '' }
{ this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" /> : '' }
{
this.state.list.map((item ,index)=>{
return item.type==0 ? '' : <li data-channel={item.channel_id} data-id={item.id} key={index} onClick={this.openNavOnnLive}>
<p className="title">
<label><img src={item.logo ? item.logo : companylogo} /></label>
<span><em>{item.full_name}</em><em><i className="icon3"></i>开播时间:{item.start_time}</em></span>
</p>
<p className="bo">
{
item.live_status==0
? <label><img src={jijiangkaibo} /><em>即将开播</em></label>
: item.live_status==1 ? <label><img src={item.preview_url ? item.preview_url : zhibo} /><em><i className="onlive"></i>直播中</em></label>
: item.live_status==2 ? <label><img src={item.preview_url ? item.preview_url : chongbo} /><em>观看重播</em></label>
: ''
}
<b className="detail"><i className="icon3"></i>{item.theme}</b>
</p>
</li>
})
}
</ul>
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
/*<li onClick={this.changetype} data-type="0">热点</li> <i className="icon jia"></i>*/
/* <li onClick={this.changetype} data-type="1" className="on">创作</li> */
render(){
return(
<div className="indexPage">
<div className="hd">
<ul>
<li onClick={this.changetype} data-type="2">直播</li>
</ul>
</div>
{
this.datatype==1 ? this.chuangzuo() : this.zhibo()
}
</div>
)
}
}
const mapStateToProps = (state ,ownProps) =>{
return {
userstate : state.UserState
}
}
IndexPage = connect(mapStateToProps)(IndexPage)
export default IndexPage | hPost('/hyb-stu/stu_talk/list',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page : this.page ,size : 10 })
}).then(({res})=>{
this.total = res.data.total;
if(this.page==1){
if(res.data.list.length){
this.componentStatus && this.setState({ "list" : res.data.list ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
this.componentStatus && this.setState({ "list" : [] ,"liststatus" : "nodata" });
}
}else{
this.componentStatus && this.setState({ "list" : this.state.list.concat(res.data.list) ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
});
}
},400);
}
scroll=( | conditional_block |
index.js | import React, { Component } from 'react'
import { Link } from 'react-router'
import { connect } from 'react-redux'
import api from 'api/api'
import Tip from 'component/pagemsg/tip'
import Tipshowend from 'component/pagemsg/tipshowend'
import css from 'css/indexpage'
import companylogo from 'images/companylogo.png'
import jijiangkaibo from 'images/jijiangkaibo.png'
import zhibo from 'images/zhibo.jpg'
import chongbo from 'images/chongbo.jpg'
import loadingimg2 from 'images/loading2.gif'
class IndexPage extends Component{
constructor(props){
super(props);
this.state = {
list : [],
liststatus : 'pending',
chuangzuolist : {},
translateY : 0
}
this.page = 1;
this.total = '';
this.token = this.props.userstate.token || window.localStorage.getItem('token');
this.datatype = 2;
this.create_time = '';
this.remain = true;
this.touchY = 0;
this.translateY = 0;
this.istouchmove = false;
this.updatamsgshow = false;
this.componentStatus = true;
}
componentDidMount(){
/*获取app 所需要信息*/
let token = api.getLocalStorage('token'), mobile = api.getLocalStorage('mobile');
api.FetchGet('/hyb-stu/stu_my/base',{
token : token
}).then((resapp)=>{
let nickname = resapp.data.mobile ? resapp.data.mobile : '游客'+api.setmd5(token).substring(5 ,10),
bbs_icon = resapp.data.bbs_icon ? resapp.data.bbs_icon : '',
stu_id = resapp.data.im_id ? resapp.data.im_id.split('#')[0] : '';
nickname = nickname.substring(0,3)+'xxxx'+nickname.substring(7);
api.webview("getlogindata?param={\"stu_name\":\""+resapp.data.nick_name+"\" ,\"stu_id\":\""+stu_id+"\" ,\"interview_im_sig\":\""+resapp.data.interview_im_sig+"\" ,\"interview_im_id\":\""+resapp.data.interview_im_id+"\" ,\"token\":\""+token+"\" ,\"login_id\":\""+mobile+"\" ,\"nick_name\":\""+nickname+"\" ,\"im_sign\":\""+resapp.data.im_sig+"\" ,\"user_icon_url\":\""+bbs_icon+"\" ,\"txy_sign\":\""+resapp.data.file_sig+"\" ,\"im_identifier\":\""+resapp.data.im_id+"\"}");
});
this.getlist();
window.scrollTo(0,0);
}
componentWillUnmount(){
this.componentStatus = false;
document.removeEventListener('scroll',this.scroll);
}
appzhibocallback=()=>{
this.page = 1;
this.getlist();
}
getlist=()=>{
this.page==1 ? this.setState({ "liststatus" : "pending" ,"list" : [] }) : this.setState({ "liststatus" : "pending" });
setTimeout(()=>{
if(this.datatype==1){ /*创作*/
api.FetchPost('/hyb-stu/stu_user_hot_point/find_main_article',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page_size : this.page ,create_time : this.create_time})
}).then(({res})=>{
if(this.page>1 && api.isEmptyObject(res.data.article_map)){
return false;
}
this.remain = res.data.remain;
if(!api.isEmptyObject(res.data.article_map)){
if(api.isEmptyObject(this.state.chuangzuolist)){
this.setState({ "chuangzuolist" : res.data.article_map ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
let list = Object.assign({}, this.state.chuangzuolist);
let newlist = {};
Object.keys(list).map((key)=>{
Object.keys(res.data.article_map).map((key2)=>{
if(!list[key2]){
if(newlist[key2]){
newlist[key2] = [];
}
newlist[key2] = res.data.article_map[key2];
}else{
if(!newlist[key]){
newlist[key] = [];
}
if(res.data.article_map[key]!=void 0){
newlist[key] = list[key].concat(res.data.article_map[key]);
}else{
newlist[key] = list[key];
}
}
});
});
this.setState({ "chuangzuolist" : Object.assign(newlist, list) ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
let last = Object.keys(res.data.article_map)[Object.keys(res.data.article_map).length-1];
last = res.data.article_map[last];
last = last[last.length-1];
this.create_time = last.create_time;
}else{
this.setState({ "liststatus" : 'nodata' });
}
});
}else if(this.datatype==2){ /*直播*/
api.FetchPost('/hyb-stu/stu_talk/list',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page : this.page ,size : 10 })
}).then(({res})=>{
this.total = res.data.total;
if(this.page==1){
if(res.data.list.length){
this.componentStatus && this.setState({ "list" : res.data.list ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
this.componentStatus && this.setState({ "list" : [] ,"liststatus" : "nodata" });
}
}else{
this.componentStatus && this.setState({ "list" : this.state.list.concat(res.data.list) ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
});
}
},400);
}
scroll=(event)=>{
let scrolltop = document.documentElement.scrollTop || document.body.scrollTop;
let el = '';
if(this.datatype==2){
el = document.querySelectorAll('ul.livelist li:last-child')[0];
}
if(this.datatype==1){
el = document.querySelectorAll('.box2:last-child')[0];
}
if(!el){
return;
}
if(this.datatype==2 && this.page>=this.total){ /*直播*/
return;
}
if(this.datatype==1 && !this.remain){ /*创作*/
return;
}
if(this.state.liststatus!='pending'){
if(scrolltop + window.innerHeight + 10 >= Math.ceil(document.body.scrollHeight)){
++this.page;
this.getlist();
}
}
}
openNavOnnLive=(event)=>{
const id = api.closest(event.target ,'li').getAttribute('data-id');
const actiontype = "looklivejump?param={\"token\":\""+this.token+"\" ,\"meeting_id\":\""+id+"\" }";
api.webview(actiontype);
}
changetype=(event)=>{
if(!event.target.classList.contains('on')){
this.datatype = event.target.getAttribute('data-type');
let li = api.closest(event.target ,'ul').querySelectorAll('li');
for(let item of li){
item.classList.remove('on');
}
event.target.classList.add('on');
this.page = 1;
this.create_time = '';
this.getlist();
}
}
actionzan=(event)=>{
const el = api.closest(event.target ,'span') ,el_i = el.querySelectorAll('i')[0] ,div = api.closest(event.target ,'div.box');
let praise_type = 0;
if(el_i.classList.contains('on')){
praise_type = 1;
}
api.FetchPost('/hyb-stu/stu_user_hot_point/praise_count_inc',{
UserKey : this.props.userstate.userKey,
token : this.props.userstate.token,
body : JSON.stringify({
praise_type : praise_type,
hot_point_id : el.getAttribute('data-id'),
hot_point_user_id : el.getAttribute('data-userid')
})
}).then(({res})=>{
if(praise_type==1){
el.querySelectorAll('em')[0].textContent = --el.querySelectorAll('em')[0].textContent;
el_i.classList.remove('on');
}else{
el.querySelectorAll('em')[0].textContent = ++el.querySelectorAll('em')[0].textContent;
el_i.classList.add('on');
}
})
}
chuangzuo=()=>{
return <div className="indexdynamic">
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" />
: this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" />
: this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" />
: Object.keys(this.state.chuangzuolist).sort().reverse().map((key ,index)=>{
return <div key={key} className="dynamic">
<h1>{key}</h1>
{
this.state.chuangzuolist[key].map((item ,index)=>{
return <div className="box2" key={index} data-key={key} data-id={item.hot_point_id}>
<div className="boxhd2">
<img src={item.icon ? item.icon : usericonimg} />
<p>
<Link to={{ "pathname" : "/creationsocial" ,state : { "pointid" : item.hot_point_id } }} >
<span>{item.name}</span>
<span>{item.collection_time}</span>
</Link>
</p>
</div>
<div className="boxbd2">
<ul className="imglist">
<li className="long">
{
item.cover_picture ? <Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}><img src={item.cover_picture} /></Link> : ''
}
<Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}>
<p>
<em>{item.summary.length > 30 ? api.substring(item.summary ,30 ,'...') : item.summary}</em>
{item.summary.length > 30 ? <span>【长文】</span> : ''}
</p>
</Link>
</li>
</ul>
</div>
<div className="ft2">
<span><i className="icon b"></i>{item.comment_count}</span>
<span onClick={this.actionzan} data-status={item.praise_status} data-id={item.hot_point_id} data-userid={item.user_id}><i className={item.praise_status==1 ? "icon a on" : "icon a"}></i><em>{item.praise_count}</em></span>
</div>
</div>
})
}
</div>
})
}
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
handleBind = (event)=>{
if(event.type=='touchstart'){
this.touchStart(event);
}else if(event.type=='touchmove'){
this.touchMove(event);
}
}
touchStart = (event)=>{
this.touchY = event.targetTouches[0].pageY;
}
touchMove = (event)=>{
let dir = event.targetTouches[0].pageY - this.touchY ,translateY = 0 ,direction = dir > 0 ? 1 : -1;
const scrollY = document.documentElement.scrollTop || document.body.scrollTop;
const end = ()=>{
if(this.state.translateY>20){
this.appzhibocallback();
setTimeout(()=>{
this.refs.updatamsg.innerHTML = '下拉即可刷新';
},320);
}
this.setState({ "translateY" : 0 });
this.istouchmove = false;
this.updatamsgshow = false;
window.removeEventListener('touchend' ,end);
}
if(direction>0 && scrollY<=0){
translateY = Math.min(dir, 35) / 2 + Math.max(0, dir - 35);
if(translateY>10){
this.updatamsgshow = true;
}
if(translateY>23){
this.refs.updatamsg.innerHTML = '释放即可刷新';
}
if(!this.istouchmove){
window.addEventListener('touchend' ,end ,false);
}
this.setState({ "translateY" : api.damping(translateY) });
this.istouchmove = true;
}
}
zhibo=()=>{
let style = { transform : `translateY(${this.state.translateY}px)` },
style1 = this.updatamsgshow ? { visibility : "visible" ,transform : `translateY(${this.state.translateY/6}px)` } : { transform : `translateY(${this.state.translateY/6}px)` };
return <div className="box" onTouchStart={this.handleBind} onTouchMove={this.handleBind}>
<div className="updatamsg" style={style1}><img src={loadingimg2} /><b ref="updatamsg" >下拉即可刷新</b></div>
<ul className="livelist" style={style}>
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" /> : '' }
{ this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" /> : '' }
{ this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" /> : '' }
{
this.state.list.map((item ,index)=>{
return item.type==0 ? '' : <li data-channel={item.channel_id} data-id={item.id} key={index} onClick={this.openNavOnnLive}>
<p className="title">
<label><img src={item.logo ? item.logo : companylogo} /></label>
<span><em>{item.full_name}</em><em><i className="icon3"></i>开播时间:{item.start_time}</em></span>
</p>
<p className="bo">
{
item.live_status==0
? <label><img src={jijiangkaibo} /><em>即将开播</em></label>
: item.live_status==1 ? <label><img src={item.preview_url ? item.preview_url : zhibo} /><em><i className="onlive"></i>直播中</em></label>
: item.live_status==2 ? <label><img src={item.preview_url ? item.preview_url : chongbo} /><em>观看重播</em></label>
: ''
}
<b className="detail"><i className="icon3"></i>{item.theme}</b>
</p>
</li>
})
}
</ul>
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
/*<li onClick={this.changetype} data-type="0">热点</li> <i className="icon jia"></i>*/
/* <li onClick={this.changetype} data-type="1" className="on">创作</li> */
render(){
return(
<div className="indexPage">
<div className="hd">
<ul>
<li onClick={this.changetype} data-type="2">直播</li>
</ul>
</div>
{
this.datatype==1 ? this.chuangzuo() : this.zhibo()
}
</div>
)
}
}
const mapStateToProps = (state ,ownProps) =>{
return {
| }
}
IndexPage = connect(mapStateToProps)(IndexPage)
export default IndexPage | userstate : state.UserState
| random_line_split |
manager.go | package task
import (
"container/heap"
"fmt"
"sync"
"time"
"github.com/alecthomas/log4go"
"github.com/jinzhu/gorm"
)
const (
TASK_TYPE_PERIODIC = TaskType(1)
TASK_TYPE_ONESHOT = TaskType(2)
TASK_SOURCE_PUSH = TaskSource(0)
STATUS_UNKNOWN = TaskStatus(-1)
STATUS_INIT = TaskStatus(0) //init but may editing
STATUS_PENDING = TaskStatus(1) //added to pending Queue
STATUS_EXEC = TaskStatus(2)
STATUS_SUCC = TaskStatus(3)
STATUS_FAIL = TaskStatus(4)
STATUS_CANCEL = TaskStatus(5)
TICK = time.Minute
)
type TaskSource int
type TaskStatus int
type TaskType int
type TaskHandler interface {
DoTask(identifier string, context interface{}) error
Sync(uid string) (interface{}, error)
}
type TaskKey struct {
Source TaskSource
Uid string
}
type Task struct {
ID uint `gorm:"column:id;primary_key"`
CreatedAt time.Time `gorm:"column:created_at"`
UpdatedAt time.Time `gorm:"column:updated_at"`
CanceledAt time.Time `gorm:"column:canceled_at"`
UserIdentifier string `gorm:"column:uid;type:varchar(32);not null;index"`
Type TaskType `gorm:"column:type;type:tinyint(4)"`
Source TaskSource `gorm:"column:source;type:tinyint(4)"`
Period int `gorm:"column:period;type:int(11)"`
LastExecutionTime time.Time `gorm:"column:last_execution_time"`
NextExecutionTime time.Time `gorm:"column:next_execution_time"`
Status TaskStatus `gorm:"column:status;type:tinyint(4);index"`
Click int `gorm:"column:click"`
Reach int `gorm:"column:reach"`
ClickRate float32 `gorm:"column:click_rate"`
Retry int `gorm:"-"`
RetryInterval int `gorm:"-"`
Timeout int `gorm:"-"`
Handler TaskHandler `gorm:"-" json:"-"`
Context interface{} `gorm:"-"`
}
type TaskLog struct {
TaskId int
Status int
Start time.Time
End time.Time
}
type TaskManager struct {
TaskMap struct {
sync.RWMutex
inner map[TaskKey]*Task
}
PendingQueue struct {
sync.RWMutex
inner PriorityQueue
}
stop chan bool
wake chan bool
wdb *gorm.DB
rdb *gorm.DB
handlers map[TaskSource]TaskHandler
}
type PriorityQueue []*Task
var (
GlobalTaskManager *TaskManager
)
func (Task) TableName() string {
return "tb_task"
}
func (t *Task) Equal(other *Task) bool {
return t.UserIdentifier == other.UserIdentifier && t.Source == other.Source
}
func NewTaskManager(rdb, wdb *gorm.DB) (*TaskManager, error) {
m := &TaskManager{
TaskMap: struct {
sync.RWMutex
inner map[TaskKey]*Task
}{
inner: make(map[TaskKey]*Task),
},
PendingQueue: struct {
sync.RWMutex
inner PriorityQueue
}{
inner: make(PriorityQueue, 0),
},
stop: make(chan bool),
wake: make(chan bool),
wdb: wdb,
rdb: rdb,
handlers: make(map[TaskSource]TaskHandler),
}
heap.Init(&m.PendingQueue.inner)
return m, nil
}
func (q *PriorityQueue) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PriorityQueue) Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) Push(x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.Retry > 0 {
log4go.Global.Info("task %v-%v fails, retry (%v/%v)", task.Type, task.UserIdentifier, task.Retry, b)
task.Retry--
time.Sleep(time.Second * time.Duration(task.RetryInterval))
} else {
break
}
} else {
taskManager.saveSuccessTask(task)
return
}
}
taskManager.doneTask(task, STATUS_FAIL)
}()
}
wg.Wait()
}
func (taskManager *TaskManager) Run() {
for {
now := time.Now()
next := taskManager.getNextWakeupTime()
var duration time.Duration
if now.After(next) {
duration = time.Duration(0)
} else {
duration = next.Sub(now)
}
log4go.Global.Debug("wait for duration %v next:%v now:%v", duration, next, now)
select {
case <-taskManager.stop:
log4go.Global.Info("taskmanager closed")
return
case <-time.After(duration):
tasks := taskManager.popAvaliableTasks(now)
if len(tasks) > 0 {
log4go.Global.Debug("run tasks [%d]", len(tasks))
go taskManager.runTasks(tasks)
}
case <-taskManager.wake:
log4go.Global.Debug("taskmanager waked")
continue
}
}
}
func (taskManager *TaskManager) Stop() {
taskManager.stop <- true
}
func (taskManager *TaskManager) SyncTask() error {
tasks := []*Task{}
if err := taskManager.rdb.Where("status in (?)", []TaskStatus{STATUS_PENDING, STATUS_EXEC, STATUS_INIT}).Find(&tasks).Error; err != nil {
return err
}
for _, task := range tasks {
var context interface{}
var err error
if _, ok := taskManager.handlers[task.Source]; !ok | else {
task.Handler = taskManager.handlers[task.Source]
context, err = task.Handler.Sync(task.UserIdentifier)
if err != nil {
log4go.Warn("task context sync error: %v", err)
continue
} else {
task.Context = context
}
}
now := time.Now()
if task.NextExecutionTime.Before(now) {
log4go.Warn("next execution time is to early, just set it to failure")
taskManager.updateTaskStatus(task, STATUS_FAIL)
} else {
taskManager.addTaskToPendingQueue(task)
log4go.Warn("schedule task : [%v]", task.UserIdentifier)
}
}
return nil
}
func (taskManager *TaskManager) updateTaskStatus(task *Task, status TaskStatus) error {
if err := taskManager.wdb.Model(task).Update("status", status).Error; err != nil {
return fmt.Errorf("update taks error : %v", status)
}
log4go.Info("update task [%v] status [%v] ", task.UserIdentifier, status)
return nil
}
func (taskManager *TaskManager) saveSuccessTask(task *Task) error {
log4go.Info("update task [%v] status SUCCESS", task.UserIdentifier)
task.LastExecutionTime = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_SUCC,
"last_execution_time": task.LastExecutionTime}).Error; err != nil {
return fmt.Errorf("update delivery time and status error")
}
task.Status = STATUS_SUCC
return nil
}
func (taskManager *TaskManager) saveCancelTask(task *Task) error {
log4go.Info("update task [%v] status canceld", task.UserIdentifier)
task.CanceledAt = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_CANCEL,
"canceled_at": task.CanceledAt}).Error; err != nil {
return fmt.Errorf("update canceld time and status error")
}
task.Status = STATUS_CANCEL
return nil
}
func (taskManager *TaskManager) saveTaskLog(tasklog *TaskLog) {
panic("error")
}
func (taskManager *TaskManager) saveTaskToDB(task *Task) error {
var err error
if err = taskManager.wdb.Create(task).Error; err != nil {
return err
}
log4go.Info("saved task %d to db", task.ID)
return nil
}
| {
log4go.Warn("unknown task source :%v", task.Source)
continue
} | conditional_block |
manager.go | package task
import (
"container/heap"
"fmt"
"sync"
"time"
"github.com/alecthomas/log4go"
"github.com/jinzhu/gorm"
)
const (
TASK_TYPE_PERIODIC = TaskType(1)
TASK_TYPE_ONESHOT = TaskType(2)
TASK_SOURCE_PUSH = TaskSource(0)
STATUS_UNKNOWN = TaskStatus(-1)
STATUS_INIT = TaskStatus(0) //init but may editing
STATUS_PENDING = TaskStatus(1) //added to pending Queue
STATUS_EXEC = TaskStatus(2)
STATUS_SUCC = TaskStatus(3)
STATUS_FAIL = TaskStatus(4)
STATUS_CANCEL = TaskStatus(5)
TICK = time.Minute
)
type TaskSource int
type TaskStatus int
type TaskType int
type TaskHandler interface {
DoTask(identifier string, context interface{}) error
Sync(uid string) (interface{}, error)
}
type TaskKey struct {
Source TaskSource
Uid string
}
type Task struct {
ID uint `gorm:"column:id;primary_key"`
CreatedAt time.Time `gorm:"column:created_at"`
UpdatedAt time.Time `gorm:"column:updated_at"`
CanceledAt time.Time `gorm:"column:canceled_at"`
UserIdentifier string `gorm:"column:uid;type:varchar(32);not null;index"`
Type TaskType `gorm:"column:type;type:tinyint(4)"`
Source TaskSource `gorm:"column:source;type:tinyint(4)"`
Period int `gorm:"column:period;type:int(11)"`
LastExecutionTime time.Time `gorm:"column:last_execution_time"`
NextExecutionTime time.Time `gorm:"column:next_execution_time"`
Status TaskStatus `gorm:"column:status;type:tinyint(4);index"`
Click int `gorm:"column:click"`
Reach int `gorm:"column:reach"`
ClickRate float32 `gorm:"column:click_rate"`
Retry int `gorm:"-"`
RetryInterval int `gorm:"-"`
Timeout int `gorm:"-"`
Handler TaskHandler `gorm:"-" json:"-"`
Context interface{} `gorm:"-"`
}
type TaskLog struct {
TaskId int
Status int
Start time.Time
End time.Time
}
type TaskManager struct {
TaskMap struct {
sync.RWMutex
inner map[TaskKey]*Task
}
PendingQueue struct {
sync.RWMutex
inner PriorityQueue
}
stop chan bool
wake chan bool
wdb *gorm.DB
rdb *gorm.DB
handlers map[TaskSource]TaskHandler
}
type PriorityQueue []*Task
var (
GlobalTaskManager *TaskManager
)
func (Task) TableName() string {
return "tb_task"
}
func (t *Task) Equal(other *Task) bool {
return t.UserIdentifier == other.UserIdentifier && t.Source == other.Source
}
func NewTaskManager(rdb, wdb *gorm.DB) (*TaskManager, error) {
m := &TaskManager{
TaskMap: struct {
sync.RWMutex
inner map[TaskKey]*Task
}{
inner: make(map[TaskKey]*Task),
},
PendingQueue: struct {
sync.RWMutex
inner PriorityQueue
}{
inner: make(PriorityQueue, 0),
},
stop: make(chan bool),
wake: make(chan bool),
wdb: wdb,
rdb: rdb,
handlers: make(map[TaskSource]TaskHandler),
}
heap.Init(&m.PendingQueue.inner)
return m, nil
}
func (q *PriorityQueue) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PriorityQueue) Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) | (x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.Retry > 0 {
log4go.Global.Info("task %v-%v fails, retry (%v/%v)", task.Type, task.UserIdentifier, task.Retry, b)
task.Retry--
time.Sleep(time.Second * time.Duration(task.RetryInterval))
} else {
break
}
} else {
taskManager.saveSuccessTask(task)
return
}
}
taskManager.doneTask(task, STATUS_FAIL)
}()
}
wg.Wait()
}
func (taskManager *TaskManager) Run() {
for {
now := time.Now()
next := taskManager.getNextWakeupTime()
var duration time.Duration
if now.After(next) {
duration = time.Duration(0)
} else {
duration = next.Sub(now)
}
log4go.Global.Debug("wait for duration %v next:%v now:%v", duration, next, now)
select {
case <-taskManager.stop:
log4go.Global.Info("taskmanager closed")
return
case <-time.After(duration):
tasks := taskManager.popAvaliableTasks(now)
if len(tasks) > 0 {
log4go.Global.Debug("run tasks [%d]", len(tasks))
go taskManager.runTasks(tasks)
}
case <-taskManager.wake:
log4go.Global.Debug("taskmanager waked")
continue
}
}
}
func (taskManager *TaskManager) Stop() {
taskManager.stop <- true
}
func (taskManager *TaskManager) SyncTask() error {
tasks := []*Task{}
if err := taskManager.rdb.Where("status in (?)", []TaskStatus{STATUS_PENDING, STATUS_EXEC, STATUS_INIT}).Find(&tasks).Error; err != nil {
return err
}
for _, task := range tasks {
var context interface{}
var err error
if _, ok := taskManager.handlers[task.Source]; !ok {
log4go.Warn("unknown task source :%v", task.Source)
continue
} else {
task.Handler = taskManager.handlers[task.Source]
context, err = task.Handler.Sync(task.UserIdentifier)
if err != nil {
log4go.Warn("task context sync error: %v", err)
continue
} else {
task.Context = context
}
}
now := time.Now()
if task.NextExecutionTime.Before(now) {
log4go.Warn("next execution time is to early, just set it to failure")
taskManager.updateTaskStatus(task, STATUS_FAIL)
} else {
taskManager.addTaskToPendingQueue(task)
log4go.Warn("schedule task : [%v]", task.UserIdentifier)
}
}
return nil
}
func (taskManager *TaskManager) updateTaskStatus(task *Task, status TaskStatus) error {
if err := taskManager.wdb.Model(task).Update("status", status).Error; err != nil {
return fmt.Errorf("update taks error : %v", status)
}
log4go.Info("update task [%v] status [%v] ", task.UserIdentifier, status)
return nil
}
func (taskManager *TaskManager) saveSuccessTask(task *Task) error {
log4go.Info("update task [%v] status SUCCESS", task.UserIdentifier)
task.LastExecutionTime = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_SUCC,
"last_execution_time": task.LastExecutionTime}).Error; err != nil {
return fmt.Errorf("update delivery time and status error")
}
task.Status = STATUS_SUCC
return nil
}
func (taskManager *TaskManager) saveCancelTask(task *Task) error {
log4go.Info("update task [%v] status canceld", task.UserIdentifier)
task.CanceledAt = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_CANCEL,
"canceled_at": task.CanceledAt}).Error; err != nil {
return fmt.Errorf("update canceld time and status error")
}
task.Status = STATUS_CANCEL
return nil
}
func (taskManager *TaskManager) saveTaskLog(tasklog *TaskLog) {
panic("error")
}
func (taskManager *TaskManager) saveTaskToDB(task *Task) error {
var err error
if err = taskManager.wdb.Create(task).Error; err != nil {
return err
}
log4go.Info("saved task %d to db", task.ID)
return nil
}
| Push | identifier_name |
manager.go | package task
import (
"container/heap"
"fmt"
"sync"
"time"
"github.com/alecthomas/log4go"
"github.com/jinzhu/gorm"
)
const (
TASK_TYPE_PERIODIC = TaskType(1)
TASK_TYPE_ONESHOT = TaskType(2)
TASK_SOURCE_PUSH = TaskSource(0)
STATUS_UNKNOWN = TaskStatus(-1)
STATUS_INIT = TaskStatus(0) //init but may editing
STATUS_PENDING = TaskStatus(1) //added to pending Queue
STATUS_EXEC = TaskStatus(2)
STATUS_SUCC = TaskStatus(3)
STATUS_FAIL = TaskStatus(4)
STATUS_CANCEL = TaskStatus(5)
TICK = time.Minute
)
type TaskSource int
type TaskStatus int
type TaskType int
type TaskHandler interface {
DoTask(identifier string, context interface{}) error
Sync(uid string) (interface{}, error)
}
type TaskKey struct {
Source TaskSource
Uid string
}
type Task struct {
ID uint `gorm:"column:id;primary_key"`
CreatedAt time.Time `gorm:"column:created_at"`
UpdatedAt time.Time `gorm:"column:updated_at"`
CanceledAt time.Time `gorm:"column:canceled_at"`
UserIdentifier string `gorm:"column:uid;type:varchar(32);not null;index"`
Type TaskType `gorm:"column:type;type:tinyint(4)"`
Source TaskSource `gorm:"column:source;type:tinyint(4)"`
Period int `gorm:"column:period;type:int(11)"`
LastExecutionTime time.Time `gorm:"column:last_execution_time"`
NextExecutionTime time.Time `gorm:"column:next_execution_time"`
Status TaskStatus `gorm:"column:status;type:tinyint(4);index"`
Click int `gorm:"column:click"`
Reach int `gorm:"column:reach"`
ClickRate float32 `gorm:"column:click_rate"`
Retry int `gorm:"-"`
RetryInterval int `gorm:"-"`
Timeout int `gorm:"-"`
Handler TaskHandler `gorm:"-" json:"-"`
Context interface{} `gorm:"-"`
}
type TaskLog struct {
TaskId int
Status int
Start time.Time
End time.Time
}
type TaskManager struct {
TaskMap struct {
sync.RWMutex
inner map[TaskKey]*Task
}
PendingQueue struct {
sync.RWMutex
inner PriorityQueue
}
stop chan bool
wake chan bool
wdb *gorm.DB
rdb *gorm.DB
handlers map[TaskSource]TaskHandler
}
type PriorityQueue []*Task
var (
GlobalTaskManager *TaskManager
)
func (Task) TableName() string {
return "tb_task"
}
func (t *Task) Equal(other *Task) bool {
return t.UserIdentifier == other.UserIdentifier && t.Source == other.Source
}
func NewTaskManager(rdb, wdb *gorm.DB) (*TaskManager, error) {
m := &TaskManager{
TaskMap: struct {
sync.RWMutex
inner map[TaskKey]*Task
}{
inner: make(map[TaskKey]*Task),
},
PendingQueue: struct {
sync.RWMutex
inner PriorityQueue
}{
inner: make(PriorityQueue, 0),
},
stop: make(chan bool),
wake: make(chan bool),
wdb: wdb,
rdb: rdb,
handlers: make(map[TaskSource]TaskHandler),
}
heap.Init(&m.PendingQueue.inner)
return m, nil
}
func (q *PriorityQueue) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PriorityQueue) Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) Push(x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.Retry > 0 {
log4go.Global.Info("task %v-%v fails, retry (%v/%v)", task.Type, task.UserIdentifier, task.Retry, b)
task.Retry--
time.Sleep(time.Second * time.Duration(task.RetryInterval))
} else {
break
}
} else {
taskManager.saveSuccessTask(task)
return
}
}
taskManager.doneTask(task, STATUS_FAIL)
}()
}
wg.Wait()
}
func (taskManager *TaskManager) Run() {
for {
now := time.Now()
next := taskManager.getNextWakeupTime()
var duration time.Duration
if now.After(next) {
duration = time.Duration(0)
} else {
duration = next.Sub(now)
}
log4go.Global.Debug("wait for duration %v next:%v now:%v", duration, next, now)
select {
case <-taskManager.stop:
log4go.Global.Info("taskmanager closed")
return
case <-time.After(duration):
tasks := taskManager.popAvaliableTasks(now)
if len(tasks) > 0 {
log4go.Global.Debug("run tasks [%d]", len(tasks))
go taskManager.runTasks(tasks)
}
case <-taskManager.wake:
log4go.Global.Debug("taskmanager waked")
continue
}
}
}
func (taskManager *TaskManager) Stop() {
taskManager.stop <- true
}
func (taskManager *TaskManager) SyncTask() error {
tasks := []*Task{}
if err := taskManager.rdb.Where("status in (?)", []TaskStatus{STATUS_PENDING, STATUS_EXEC, STATUS_INIT}).Find(&tasks).Error; err != nil {
return err
}
for _, task := range tasks {
var context interface{}
var err error
if _, ok := taskManager.handlers[task.Source]; !ok {
log4go.Warn("unknown task source :%v", task.Source)
continue
} else {
task.Handler = taskManager.handlers[task.Source]
context, err = task.Handler.Sync(task.UserIdentifier)
if err != nil {
log4go.Warn("task context sync error: %v", err)
continue
} else {
task.Context = context
}
}
now := time.Now()
if task.NextExecutionTime.Before(now) {
log4go.Warn("next execution time is to early, just set it to failure")
taskManager.updateTaskStatus(task, STATUS_FAIL)
} else {
taskManager.addTaskToPendingQueue(task)
log4go.Warn("schedule task : [%v]", task.UserIdentifier)
}
}
return nil
}
func (taskManager *TaskManager) updateTaskStatus(task *Task, status TaskStatus) error {
if err := taskManager.wdb.Model(task).Update("status", status).Error; err != nil {
return fmt.Errorf("update taks error : %v", status)
}
log4go.Info("update task [%v] status [%v] ", task.UserIdentifier, status)
return nil
}
func (taskManager *TaskManager) saveSuccessTask(task *Task) error {
log4go.Info("update task [%v] status SUCCESS", task.UserIdentifier)
task.LastExecutionTime = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_SUCC,
"last_execution_time": task.LastExecutionTime}).Error; err != nil {
return fmt.Errorf("update delivery time and status error")
}
task.Status = STATUS_SUCC
return nil
}
func (taskManager *TaskManager) saveCancelTask(task *Task) error {
log4go.Info("update task [%v] status canceld", task.UserIdentifier)
task.CanceledAt = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_CANCEL,
"canceled_at": task.CanceledAt}).Error; err != nil {
return fmt.Errorf("update canceld time and status error")
}
task.Status = STATUS_CANCEL
return nil
}
func (taskManager *TaskManager) saveTaskLog(tasklog *TaskLog) {
panic("error")
}
func (taskManager *TaskManager) saveTaskToDB(task *Task) error | {
var err error
if err = taskManager.wdb.Create(task).Error; err != nil {
return err
}
log4go.Info("saved task %d to db", task.ID)
return nil
} | identifier_body | |
manager.go | package task
import (
"container/heap"
"fmt"
"sync"
"time"
"github.com/alecthomas/log4go"
"github.com/jinzhu/gorm"
)
const (
TASK_TYPE_PERIODIC = TaskType(1)
TASK_TYPE_ONESHOT = TaskType(2)
TASK_SOURCE_PUSH = TaskSource(0)
STATUS_UNKNOWN = TaskStatus(-1)
STATUS_INIT = TaskStatus(0) //init but may editing
STATUS_PENDING = TaskStatus(1) //added to pending Queue
STATUS_EXEC = TaskStatus(2)
STATUS_SUCC = TaskStatus(3)
STATUS_FAIL = TaskStatus(4)
STATUS_CANCEL = TaskStatus(5)
TICK = time.Minute
)
type TaskSource int
type TaskStatus int
type TaskType int
type TaskHandler interface { |
type TaskKey struct {
Source TaskSource
Uid string
}
type Task struct {
ID uint `gorm:"column:id;primary_key"`
CreatedAt time.Time `gorm:"column:created_at"`
UpdatedAt time.Time `gorm:"column:updated_at"`
CanceledAt time.Time `gorm:"column:canceled_at"`
UserIdentifier string `gorm:"column:uid;type:varchar(32);not null;index"`
Type TaskType `gorm:"column:type;type:tinyint(4)"`
Source TaskSource `gorm:"column:source;type:tinyint(4)"`
Period int `gorm:"column:period;type:int(11)"`
LastExecutionTime time.Time `gorm:"column:last_execution_time"`
NextExecutionTime time.Time `gorm:"column:next_execution_time"`
Status TaskStatus `gorm:"column:status;type:tinyint(4);index"`
Click int `gorm:"column:click"`
Reach int `gorm:"column:reach"`
ClickRate float32 `gorm:"column:click_rate"`
Retry int `gorm:"-"`
RetryInterval int `gorm:"-"`
Timeout int `gorm:"-"`
Handler TaskHandler `gorm:"-" json:"-"`
Context interface{} `gorm:"-"`
}
type TaskLog struct {
TaskId int
Status int
Start time.Time
End time.Time
}
type TaskManager struct {
TaskMap struct {
sync.RWMutex
inner map[TaskKey]*Task
}
PendingQueue struct {
sync.RWMutex
inner PriorityQueue
}
stop chan bool
wake chan bool
wdb *gorm.DB
rdb *gorm.DB
handlers map[TaskSource]TaskHandler
}
type PriorityQueue []*Task
var (
GlobalTaskManager *TaskManager
)
func (Task) TableName() string {
return "tb_task"
}
func (t *Task) Equal(other *Task) bool {
return t.UserIdentifier == other.UserIdentifier && t.Source == other.Source
}
func NewTaskManager(rdb, wdb *gorm.DB) (*TaskManager, error) {
m := &TaskManager{
TaskMap: struct {
sync.RWMutex
inner map[TaskKey]*Task
}{
inner: make(map[TaskKey]*Task),
},
PendingQueue: struct {
sync.RWMutex
inner PriorityQueue
}{
inner: make(PriorityQueue, 0),
},
stop: make(chan bool),
wake: make(chan bool),
wdb: wdb,
rdb: rdb,
handlers: make(map[TaskSource]TaskHandler),
}
heap.Init(&m.PendingQueue.inner)
return m, nil
}
func (q *PriorityQueue) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PriorityQueue) Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) Push(x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.Retry > 0 {
log4go.Global.Info("task %v-%v fails, retry (%v/%v)", task.Type, task.UserIdentifier, task.Retry, b)
task.Retry--
time.Sleep(time.Second * time.Duration(task.RetryInterval))
} else {
break
}
} else {
taskManager.saveSuccessTask(task)
return
}
}
taskManager.doneTask(task, STATUS_FAIL)
}()
}
wg.Wait()
}
func (taskManager *TaskManager) Run() {
for {
now := time.Now()
next := taskManager.getNextWakeupTime()
var duration time.Duration
if now.After(next) {
duration = time.Duration(0)
} else {
duration = next.Sub(now)
}
log4go.Global.Debug("wait for duration %v next:%v now:%v", duration, next, now)
select {
case <-taskManager.stop:
log4go.Global.Info("taskmanager closed")
return
case <-time.After(duration):
tasks := taskManager.popAvaliableTasks(now)
if len(tasks) > 0 {
log4go.Global.Debug("run tasks [%d]", len(tasks))
go taskManager.runTasks(tasks)
}
case <-taskManager.wake:
log4go.Global.Debug("taskmanager waked")
continue
}
}
}
func (taskManager *TaskManager) Stop() {
taskManager.stop <- true
}
func (taskManager *TaskManager) SyncTask() error {
tasks := []*Task{}
if err := taskManager.rdb.Where("status in (?)", []TaskStatus{STATUS_PENDING, STATUS_EXEC, STATUS_INIT}).Find(&tasks).Error; err != nil {
return err
}
for _, task := range tasks {
var context interface{}
var err error
if _, ok := taskManager.handlers[task.Source]; !ok {
log4go.Warn("unknown task source :%v", task.Source)
continue
} else {
task.Handler = taskManager.handlers[task.Source]
context, err = task.Handler.Sync(task.UserIdentifier)
if err != nil {
log4go.Warn("task context sync error: %v", err)
continue
} else {
task.Context = context
}
}
now := time.Now()
if task.NextExecutionTime.Before(now) {
log4go.Warn("next execution time is to early, just set it to failure")
taskManager.updateTaskStatus(task, STATUS_FAIL)
} else {
taskManager.addTaskToPendingQueue(task)
log4go.Warn("schedule task : [%v]", task.UserIdentifier)
}
}
return nil
}
func (taskManager *TaskManager) updateTaskStatus(task *Task, status TaskStatus) error {
if err := taskManager.wdb.Model(task).Update("status", status).Error; err != nil {
return fmt.Errorf("update taks error : %v", status)
}
log4go.Info("update task [%v] status [%v] ", task.UserIdentifier, status)
return nil
}
func (taskManager *TaskManager) saveSuccessTask(task *Task) error {
log4go.Info("update task [%v] status SUCCESS", task.UserIdentifier)
task.LastExecutionTime = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_SUCC,
"last_execution_time": task.LastExecutionTime}).Error; err != nil {
return fmt.Errorf("update delivery time and status error")
}
task.Status = STATUS_SUCC
return nil
}
func (taskManager *TaskManager) saveCancelTask(task *Task) error {
log4go.Info("update task [%v] status canceld", task.UserIdentifier)
task.CanceledAt = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_CANCEL,
"canceled_at": task.CanceledAt}).Error; err != nil {
return fmt.Errorf("update canceld time and status error")
}
task.Status = STATUS_CANCEL
return nil
}
func (taskManager *TaskManager) saveTaskLog(tasklog *TaskLog) {
panic("error")
}
func (taskManager *TaskManager) saveTaskToDB(task *Task) error {
var err error
if err = taskManager.wdb.Create(task).Error; err != nil {
return err
}
log4go.Info("saved task %d to db", task.ID)
return nil
} | DoTask(identifier string, context interface{}) error
Sync(uid string) (interface{}, error)
} | random_line_split |
retrieval_eval_bleu.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import os
import torch
from fairseq import bleu
from tqdm import tqdm
from empchat.datasets.dailydialog import DDDataset
from empchat.datasets.empchat import EmpDataset
from empchat.datasets.reddit import RedditDataset
from empchat.datasets.parlai_dictionary import ParlAIDictionary
from empchat.datasets.tokens import tokenize, PAD_TOKEN, START_OF_COMMENT, UNK_TOKEN
from empchat.models import load as load_model, score_candidates
from empchat.util import get_opt
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter("%(asctime)s: [ %(message)s ]", "%m/%d/%Y %I:%M:%S %p")
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Commandline arguments & init
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument(
"--bleu-dict",
type=str,
default=None,
help=(
"Path to dictionary to use for BLEU calculation (if "
"not the same as the dictionary to use for retrieval)"
),
)
parser.add_argument(
"--candidates", type=str, default=None, help="Path to candidates to use"
)
parser.add_argument(
"--dailydialog-cands", action="store_true", help="Include DailyDialog candidates"
)
parser.add_argument(
"--dailydialog-folder", type=str, help="Path to DailyDialog data folder"
)
parser.add_argument(
"--empchat-cands",
action="store_true",
help="Include EmpatheticDialogues candidates",
)
parser.add_argument(
"--empchat-folder", type=str, help="Path to EmpatheticDialogues data folder"
)
parser.add_argument(
"--fasttext", type=int, default=None, help="Number of fastText labels to prepend"
)
parser.add_argument(
"--fasttext-path", type=str, default=None, help="Path to fastText classifier"
)
parser.add_argument(
"--fasttext-type",
type=str,
default=None,
help="Specifies labels of fastText classifier",
)
parser.add_argument("--gpu", type=int, default=-1, help="Specify GPU device id to use")
parser.add_argument(
"--max-cand-length",
type=int,
default=20,
help="Max candidate length in number of tokens",
)
parser.add_argument(
"--max-hist-len",
type=int,
default=1,
help="Max num conversation turns to use in context",
)
parser.add_argument(
"--model", "--pretrained", type=str, default=None, help="Path to model to use"
)
parser.add_argument(
"--n-candidates", type=int, default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
all_sent = set()
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def _starts_with_gt(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
|
if args.candidates:
fixed_candidates = torch.load(args.candidates)
if args.n_candidates < fixed_candidates.size(0):
logging.warning(
f"Keeping only {args.n_candidates} / {fixed_candidates.size(0)} candidates"
)
fixed_candidates = fixed_candidates[: args.n_candidates]
else:
fixed_candidates, breakingpt, breakingpt2 = build_candidates(
args.max_cand_length, args.n_candidates
)
if args.cuda:
fixed_candidates = fixed_candidates.cuda(non_blocking=True)
logging.warning("Embedding candidates")
with torch.no_grad():
cand_embs = embed_candidates(fixed_candidates)
logging.warning("Done with candidates")
if args.save_candidates:
cand_path = os.path.join(args.output_folder, "reddit_cands_tokens.bin")
logging.warning(f"Saving candidates in {cand_path}")
torch.save(fixed_candidates, cand_path)
emb_path = os.path.join(args.output_folder, "reddit_cands.bin")
logging.warning(f"Saving candidate embs in {emb_path}")
torch.save(cand_embs, emb_path)
txt_path = os.path.join(args.output_folder, "reddit_cands.txt")
logging.warning(f"Saving candidate texts in {txt_path}")
with open(txt_path, "w") as f:
for candidate in fixed_candidates:
f.write(stringify(candidate))
f.write("\n")
logging.warning("Done saving files")
# ------------------------------------------------------------------------------
# Drop in to interactive mode
# ------------------------------------------------------------------------------
def predict(context, top_n=5, normalize=False):
"""
returns a list of top_n tuples ("sentence", "score")
"""
with torch.no_grad():
context = context.unsqueeze(0)
candidates = fixed_candidates
if args.cuda:
context = context.cuda(non_blocking=True)
ctx, _ = net(context, None)
scores, index = score_candidates(ctx, cand_embs, top_n, normalize)
response = []
outputs = []
for i, (score, index) in enumerate(zip(scores.squeeze(0), index.squeeze(0)), 1):
response.append((stringify(candidates[index]), float(score)))
if index < breakingpt:
outputs.append("EmpChat")
elif index < breakingpt2:
outputs.append("DailyDialog")
else:
outputs.append("Reddit")
return response, outputs
def get_bleu4(split, history_len=1):
"""
Print BLEU scores and output contexts and retrieved responses.
"""
if history_len < 1:
history_len = 1
source_ct = [0, 0, 0]
net_parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
bleu_parlai_dict = ParlAIDictionary.create_from_reddit_style(bleu_dictionary)
scorer = bleu.Scorer(BLEU_PAD_IDX, BLEU_EOS_IDX, BLEU_UNK_IDX)
outf = open("retrieved_split_" + args.name + "_" + split + ".txt", "w")
def _get_dataset(reddit_dict, parlai_dict):
if args.task == "dailydialog":
return DDDataset(
split,
parlai_dict,
data_folder=args.dailydialog_folder,
history_len=history_len,
)
elif args.task == "empchat":
return EmpDataset(
split,
parlai_dict,
data_folder=args.empchat_folder,
history_len=history_len,
reactonly=args.reactonly,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
elif args.task == "reddit":
return RedditDataset(
data_folder=args.reddit_folder,
chunk_id=999,
dict_=reddit_dict,
max_hist_len=history_len,
rm_blank_sentences=True,
)
else:
raise ValueError("Task unrecognized!")
net_dataset = _get_dataset(net_dictionary, net_parlai_dict)
bleu_dataset = _get_dataset(bleu_dictionary, bleu_parlai_dict)
sample_index = range(len(bleu_dataset))
for data_idx in sample_index:
net_context, _ = net_dataset[data_idx][:2]
bleu_context, bleu_sentence = bleu_dataset[data_idx][:2]
target_tokens = bleu_sentence
if args.fasttext is not None:
target_tokens = target_tokens[args.fasttext :]
context = bleu_parlai_dict.vec2txt(bleu_context.numpy().tolist())
responses, sources = predict(net_context)
response = responses[0][0]
source = sources[0]
if source == "Reddit":
source_ct[0] += 1
elif source == "EmpChat":
source_ct[1] += 1
else:
source_ct[2] += 1
if args.task == "empchat":
cid, sid = bleu_dataset.getid(data_idx)
else:
cid = sid = -1
# This is a hack, because the other datasets have no .getid() method
if args.fasttext is not None:
response = " ".join(response.split()[args.fasttext :])
outf.write("\t".join([str(cid), str(sid), context, response, source]) + "\n")
hypo_tokens = torch.IntTensor(bleu_parlai_dict.txt2vec(response))
# Use this tokenization even if a BERT tokenizer exists, to match the BLEU
# calculation when not using BERT
scorer.add(target_tokens.type(torch.IntTensor), hypo_tokens)
print(scorer.result_string(order=1))
print(scorer.result_string(order=2))
print(scorer.result_string(order=3))
print(scorer.result_string(order=4))
print(actual_ct)
print(
f"EmpatheticDialogues {int(source_ct[1]):d}: selected "
f"{float(source_ct[1]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[1]) / sum(actual_ct)}"
)
print(
f"DailyDialog {int(source_ct[2]):d}: selected "
f"{float(source_ct[2]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[2]) / sum(actual_ct)}"
)
print(
f"Reddit {int(source_ct[0]):d}: selected "
f"{float(source_ct[0]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[0]) / sum(actual_ct)}"
)
get_bleu4("valid", history_len=args.max_hist_len)
get_bleu4("test", history_len=args.max_hist_len)
| iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
).replace("@@ ", "")
# Remove any BPE tokenization | identifier_body |
retrieval_eval_bleu.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import os
import torch
from fairseq import bleu
from tqdm import tqdm
from empchat.datasets.dailydialog import DDDataset
from empchat.datasets.empchat import EmpDataset
from empchat.datasets.reddit import RedditDataset
from empchat.datasets.parlai_dictionary import ParlAIDictionary
from empchat.datasets.tokens import tokenize, PAD_TOKEN, START_OF_COMMENT, UNK_TOKEN
from empchat.models import load as load_model, score_candidates
from empchat.util import get_opt
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter("%(asctime)s: [ %(message)s ]", "%m/%d/%Y %I:%M:%S %p")
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Commandline arguments & init
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument(
"--bleu-dict",
type=str,
default=None,
help=(
"Path to dictionary to use for BLEU calculation (if "
"not the same as the dictionary to use for retrieval)"
),
)
parser.add_argument(
"--candidates", type=str, default=None, help="Path to candidates to use"
)
parser.add_argument(
"--dailydialog-cands", action="store_true", help="Include DailyDialog candidates"
)
parser.add_argument(
"--dailydialog-folder", type=str, help="Path to DailyDialog data folder"
)
parser.add_argument(
"--empchat-cands",
action="store_true",
help="Include EmpatheticDialogues candidates",
)
parser.add_argument(
"--empchat-folder", type=str, help="Path to EmpatheticDialogues data folder"
)
parser.add_argument(
"--fasttext", type=int, default=None, help="Number of fastText labels to prepend"
)
parser.add_argument(
"--fasttext-path", type=str, default=None, help="Path to fastText classifier"
)
parser.add_argument(
"--fasttext-type",
type=str,
default=None,
help="Specifies labels of fastText classifier",
)
parser.add_argument("--gpu", type=int, default=-1, help="Specify GPU device id to use")
parser.add_argument(
"--max-cand-length",
type=int,
default=20,
help="Max candidate length in number of tokens",
)
parser.add_argument(
"--max-hist-len",
type=int,
default=1,
help="Max num conversation turns to use in context", | parser.add_argument(
"--model", "--pretrained", type=str, default=None, help="Path to model to use"
)
parser.add_argument(
"--n-candidates", type=int, default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
all_sent = set()
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def _starts_with_gt(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
).replace("@@ ", "")
# Remove any BPE tokenization
if args.candidates:
fixed_candidates = torch.load(args.candidates)
if args.n_candidates < fixed_candidates.size(0):
logging.warning(
f"Keeping only {args.n_candidates} / {fixed_candidates.size(0)} candidates"
)
fixed_candidates = fixed_candidates[: args.n_candidates]
else:
fixed_candidates, breakingpt, breakingpt2 = build_candidates(
args.max_cand_length, args.n_candidates
)
if args.cuda:
fixed_candidates = fixed_candidates.cuda(non_blocking=True)
logging.warning("Embedding candidates")
with torch.no_grad():
cand_embs = embed_candidates(fixed_candidates)
logging.warning("Done with candidates")
if args.save_candidates:
cand_path = os.path.join(args.output_folder, "reddit_cands_tokens.bin")
logging.warning(f"Saving candidates in {cand_path}")
torch.save(fixed_candidates, cand_path)
emb_path = os.path.join(args.output_folder, "reddit_cands.bin")
logging.warning(f"Saving candidate embs in {emb_path}")
torch.save(cand_embs, emb_path)
txt_path = os.path.join(args.output_folder, "reddit_cands.txt")
logging.warning(f"Saving candidate texts in {txt_path}")
with open(txt_path, "w") as f:
for candidate in fixed_candidates:
f.write(stringify(candidate))
f.write("\n")
logging.warning("Done saving files")
# ------------------------------------------------------------------------------
# Drop in to interactive mode
# ------------------------------------------------------------------------------
def predict(context, top_n=5, normalize=False):
"""
returns a list of top_n tuples ("sentence", "score")
"""
with torch.no_grad():
context = context.unsqueeze(0)
candidates = fixed_candidates
if args.cuda:
context = context.cuda(non_blocking=True)
ctx, _ = net(context, None)
scores, index = score_candidates(ctx, cand_embs, top_n, normalize)
response = []
outputs = []
for i, (score, index) in enumerate(zip(scores.squeeze(0), index.squeeze(0)), 1):
response.append((stringify(candidates[index]), float(score)))
if index < breakingpt:
outputs.append("EmpChat")
elif index < breakingpt2:
outputs.append("DailyDialog")
else:
outputs.append("Reddit")
return response, outputs
def get_bleu4(split, history_len=1):
"""
Print BLEU scores and output contexts and retrieved responses.
"""
if history_len < 1:
history_len = 1
source_ct = [0, 0, 0]
net_parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
bleu_parlai_dict = ParlAIDictionary.create_from_reddit_style(bleu_dictionary)
scorer = bleu.Scorer(BLEU_PAD_IDX, BLEU_EOS_IDX, BLEU_UNK_IDX)
outf = open("retrieved_split_" + args.name + "_" + split + ".txt", "w")
def _get_dataset(reddit_dict, parlai_dict):
if args.task == "dailydialog":
return DDDataset(
split,
parlai_dict,
data_folder=args.dailydialog_folder,
history_len=history_len,
)
elif args.task == "empchat":
return EmpDataset(
split,
parlai_dict,
data_folder=args.empchat_folder,
history_len=history_len,
reactonly=args.reactonly,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
elif args.task == "reddit":
return RedditDataset(
data_folder=args.reddit_folder,
chunk_id=999,
dict_=reddit_dict,
max_hist_len=history_len,
rm_blank_sentences=True,
)
else:
raise ValueError("Task unrecognized!")
net_dataset = _get_dataset(net_dictionary, net_parlai_dict)
bleu_dataset = _get_dataset(bleu_dictionary, bleu_parlai_dict)
sample_index = range(len(bleu_dataset))
for data_idx in sample_index:
net_context, _ = net_dataset[data_idx][:2]
bleu_context, bleu_sentence = bleu_dataset[data_idx][:2]
target_tokens = bleu_sentence
if args.fasttext is not None:
target_tokens = target_tokens[args.fasttext :]
context = bleu_parlai_dict.vec2txt(bleu_context.numpy().tolist())
responses, sources = predict(net_context)
response = responses[0][0]
source = sources[0]
if source == "Reddit":
source_ct[0] += 1
elif source == "EmpChat":
source_ct[1] += 1
else:
source_ct[2] += 1
if args.task == "empchat":
cid, sid = bleu_dataset.getid(data_idx)
else:
cid = sid = -1
# This is a hack, because the other datasets have no .getid() method
if args.fasttext is not None:
response = " ".join(response.split()[args.fasttext :])
outf.write("\t".join([str(cid), str(sid), context, response, source]) + "\n")
hypo_tokens = torch.IntTensor(bleu_parlai_dict.txt2vec(response))
# Use this tokenization even if a BERT tokenizer exists, to match the BLEU
# calculation when not using BERT
scorer.add(target_tokens.type(torch.IntTensor), hypo_tokens)
print(scorer.result_string(order=1))
print(scorer.result_string(order=2))
print(scorer.result_string(order=3))
print(scorer.result_string(order=4))
print(actual_ct)
print(
f"EmpatheticDialogues {int(source_ct[1]):d}: selected "
f"{float(source_ct[1]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[1]) / sum(actual_ct)}"
)
print(
f"DailyDialog {int(source_ct[2]):d}: selected "
f"{float(source_ct[2]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[2]) / sum(actual_ct)}"
)
print(
f"Reddit {int(source_ct[0]):d}: selected "
f"{float(source_ct[0]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[0]) / sum(actual_ct)}"
)
get_bleu4("valid", history_len=args.max_hist_len)
get_bleu4("test", history_len=args.max_hist_len) | ) | random_line_split |
retrieval_eval_bleu.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import os
import torch
from fairseq import bleu
from tqdm import tqdm
from empchat.datasets.dailydialog import DDDataset
from empchat.datasets.empchat import EmpDataset
from empchat.datasets.reddit import RedditDataset
from empchat.datasets.parlai_dictionary import ParlAIDictionary
from empchat.datasets.tokens import tokenize, PAD_TOKEN, START_OF_COMMENT, UNK_TOKEN
from empchat.models import load as load_model, score_candidates
from empchat.util import get_opt
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter("%(asctime)s: [ %(message)s ]", "%m/%d/%Y %I:%M:%S %p")
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Commandline arguments & init
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument(
"--bleu-dict",
type=str,
default=None,
help=(
"Path to dictionary to use for BLEU calculation (if "
"not the same as the dictionary to use for retrieval)"
),
)
parser.add_argument(
"--candidates", type=str, default=None, help="Path to candidates to use"
)
parser.add_argument(
"--dailydialog-cands", action="store_true", help="Include DailyDialog candidates"
)
parser.add_argument(
"--dailydialog-folder", type=str, help="Path to DailyDialog data folder"
)
parser.add_argument(
"--empchat-cands",
action="store_true",
help="Include EmpatheticDialogues candidates",
)
parser.add_argument(
"--empchat-folder", type=str, help="Path to EmpatheticDialogues data folder"
)
parser.add_argument(
"--fasttext", type=int, default=None, help="Number of fastText labels to prepend"
)
parser.add_argument(
"--fasttext-path", type=str, default=None, help="Path to fastText classifier"
)
parser.add_argument(
"--fasttext-type",
type=str,
default=None,
help="Specifies labels of fastText classifier",
)
parser.add_argument("--gpu", type=int, default=-1, help="Specify GPU device id to use")
parser.add_argument(
"--max-cand-length",
type=int,
default=20,
help="Max candidate length in number of tokens",
)
parser.add_argument(
"--max-hist-len",
type=int,
default=1,
help="Max num conversation turns to use in context",
)
parser.add_argument(
"--model", "--pretrained", type=str, default=None, help="Path to model to use"
)
parser.add_argument(
"--n-candidates", type=int, default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
|
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def _starts_with_gt(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
).replace("@@ ", "")
# Remove any BPE tokenization
if args.candidates:
fixed_candidates = torch.load(args.candidates)
if args.n_candidates < fixed_candidates.size(0):
logging.warning(
f"Keeping only {args.n_candidates} / {fixed_candidates.size(0)} candidates"
)
fixed_candidates = fixed_candidates[: args.n_candidates]
else:
fixed_candidates, breakingpt, breakingpt2 = build_candidates(
args.max_cand_length, args.n_candidates
)
if args.cuda:
fixed_candidates = fixed_candidates.cuda(non_blocking=True)
logging.warning("Embedding candidates")
with torch.no_grad():
cand_embs = embed_candidates(fixed_candidates)
logging.warning("Done with candidates")
if args.save_candidates:
cand_path = os.path.join(args.output_folder, "reddit_cands_tokens.bin")
logging.warning(f"Saving candidates in {cand_path}")
torch.save(fixed_candidates, cand_path)
emb_path = os.path.join(args.output_folder, "reddit_cands.bin")
logging.warning(f"Saving candidate embs in {emb_path}")
torch.save(cand_embs, emb_path)
txt_path = os.path.join(args.output_folder, "reddit_cands.txt")
logging.warning(f"Saving candidate texts in {txt_path}")
with open(txt_path, "w") as f:
for candidate in fixed_candidates:
f.write(stringify(candidate))
f.write("\n")
logging.warning("Done saving files")
# ------------------------------------------------------------------------------
# Drop in to interactive mode
# ------------------------------------------------------------------------------
def predict(context, top_n=5, normalize=False):
"""
returns a list of top_n tuples ("sentence", "score")
"""
with torch.no_grad():
context = context.unsqueeze(0)
candidates = fixed_candidates
if args.cuda:
context = context.cuda(non_blocking=True)
ctx, _ = net(context, None)
scores, index = score_candidates(ctx, cand_embs, top_n, normalize)
response = []
outputs = []
for i, (score, index) in enumerate(zip(scores.squeeze(0), index.squeeze(0)), 1):
response.append((stringify(candidates[index]), float(score)))
if index < breakingpt:
outputs.append("EmpChat")
elif index < breakingpt2:
outputs.append("DailyDialog")
else:
outputs.append("Reddit")
return response, outputs
def get_bleu4(split, history_len=1):
"""
Print BLEU scores and output contexts and retrieved responses.
"""
if history_len < 1:
history_len = 1
source_ct = [0, 0, 0]
net_parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
bleu_parlai_dict = ParlAIDictionary.create_from_reddit_style(bleu_dictionary)
scorer = bleu.Scorer(BLEU_PAD_IDX, BLEU_EOS_IDX, BLEU_UNK_IDX)
outf = open("retrieved_split_" + args.name + "_" + split + ".txt", "w")
def _get_dataset(reddit_dict, parlai_dict):
if args.task == "dailydialog":
return DDDataset(
split,
parlai_dict,
data_folder=args.dailydialog_folder,
history_len=history_len,
)
elif args.task == "empchat":
return EmpDataset(
split,
parlai_dict,
data_folder=args.empchat_folder,
history_len=history_len,
reactonly=args.reactonly,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
elif args.task == "reddit":
return RedditDataset(
data_folder=args.reddit_folder,
chunk_id=999,
dict_=reddit_dict,
max_hist_len=history_len,
rm_blank_sentences=True,
)
else:
raise ValueError("Task unrecognized!")
net_dataset = _get_dataset(net_dictionary, net_parlai_dict)
bleu_dataset = _get_dataset(bleu_dictionary, bleu_parlai_dict)
sample_index = range(len(bleu_dataset))
for data_idx in sample_index:
net_context, _ = net_dataset[data_idx][:2]
bleu_context, bleu_sentence = bleu_dataset[data_idx][:2]
target_tokens = bleu_sentence
if args.fasttext is not None:
target_tokens = target_tokens[args.fasttext :]
context = bleu_parlai_dict.vec2txt(bleu_context.numpy().tolist())
responses, sources = predict(net_context)
response = responses[0][0]
source = sources[0]
if source == "Reddit":
source_ct[0] += 1
elif source == "EmpChat":
source_ct[1] += 1
else:
source_ct[2] += 1
if args.task == "empchat":
cid, sid = bleu_dataset.getid(data_idx)
else:
cid = sid = -1
# This is a hack, because the other datasets have no .getid() method
if args.fasttext is not None:
response = " ".join(response.split()[args.fasttext :])
outf.write("\t".join([str(cid), str(sid), context, response, source]) + "\n")
hypo_tokens = torch.IntTensor(bleu_parlai_dict.txt2vec(response))
# Use this tokenization even if a BERT tokenizer exists, to match the BLEU
# calculation when not using BERT
scorer.add(target_tokens.type(torch.IntTensor), hypo_tokens)
print(scorer.result_string(order=1))
print(scorer.result_string(order=2))
print(scorer.result_string(order=3))
print(scorer.result_string(order=4))
print(actual_ct)
print(
f"EmpatheticDialogues {int(source_ct[1]):d}: selected "
f"{float(source_ct[1]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[1]) / sum(actual_ct)}"
)
print(
f"DailyDialog {int(source_ct[2]):d}: selected "
f"{float(source_ct[2]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[2]) / sum(actual_ct)}"
)
print(
f"Reddit {int(source_ct[0]):d}: selected "
f"{float(source_ct[0]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[0]) / sum(actual_ct)}"
)
get_bleu4("valid", history_len=args.max_hist_len)
get_bleu4("test", history_len=args.max_hist_len)
| all_sent = set() | conditional_block |
retrieval_eval_bleu.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import os
import torch
from fairseq import bleu
from tqdm import tqdm
from empchat.datasets.dailydialog import DDDataset
from empchat.datasets.empchat import EmpDataset
from empchat.datasets.reddit import RedditDataset
from empchat.datasets.parlai_dictionary import ParlAIDictionary
from empchat.datasets.tokens import tokenize, PAD_TOKEN, START_OF_COMMENT, UNK_TOKEN
from empchat.models import load as load_model, score_candidates
from empchat.util import get_opt
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter("%(asctime)s: [ %(message)s ]", "%m/%d/%Y %I:%M:%S %p")
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Commandline arguments & init
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument(
"--bleu-dict",
type=str,
default=None,
help=(
"Path to dictionary to use for BLEU calculation (if "
"not the same as the dictionary to use for retrieval)"
),
)
parser.add_argument(
"--candidates", type=str, default=None, help="Path to candidates to use"
)
parser.add_argument(
"--dailydialog-cands", action="store_true", help="Include DailyDialog candidates"
)
parser.add_argument(
"--dailydialog-folder", type=str, help="Path to DailyDialog data folder"
)
parser.add_argument(
"--empchat-cands",
action="store_true",
help="Include EmpatheticDialogues candidates",
)
parser.add_argument(
"--empchat-folder", type=str, help="Path to EmpatheticDialogues data folder"
)
parser.add_argument(
"--fasttext", type=int, default=None, help="Number of fastText labels to prepend"
)
parser.add_argument(
"--fasttext-path", type=str, default=None, help="Path to fastText classifier"
)
parser.add_argument(
"--fasttext-type",
type=str,
default=None,
help="Specifies labels of fastText classifier",
)
parser.add_argument("--gpu", type=int, default=-1, help="Specify GPU device id to use")
parser.add_argument(
"--max-cand-length",
type=int,
default=20,
help="Max candidate length in number of tokens",
)
parser.add_argument(
"--max-hist-len",
type=int,
default=1,
help="Max num conversation turns to use in context",
)
parser.add_argument(
"--model", "--pretrained", type=str, default=None, help="Path to model to use"
)
parser.add_argument(
"--n-candidates", type=int, default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
all_sent = set()
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def | (sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
).replace("@@ ", "")
# Remove any BPE tokenization
if args.candidates:
fixed_candidates = torch.load(args.candidates)
if args.n_candidates < fixed_candidates.size(0):
logging.warning(
f"Keeping only {args.n_candidates} / {fixed_candidates.size(0)} candidates"
)
fixed_candidates = fixed_candidates[: args.n_candidates]
else:
fixed_candidates, breakingpt, breakingpt2 = build_candidates(
args.max_cand_length, args.n_candidates
)
if args.cuda:
fixed_candidates = fixed_candidates.cuda(non_blocking=True)
logging.warning("Embedding candidates")
with torch.no_grad():
cand_embs = embed_candidates(fixed_candidates)
logging.warning("Done with candidates")
if args.save_candidates:
cand_path = os.path.join(args.output_folder, "reddit_cands_tokens.bin")
logging.warning(f"Saving candidates in {cand_path}")
torch.save(fixed_candidates, cand_path)
emb_path = os.path.join(args.output_folder, "reddit_cands.bin")
logging.warning(f"Saving candidate embs in {emb_path}")
torch.save(cand_embs, emb_path)
txt_path = os.path.join(args.output_folder, "reddit_cands.txt")
logging.warning(f"Saving candidate texts in {txt_path}")
with open(txt_path, "w") as f:
for candidate in fixed_candidates:
f.write(stringify(candidate))
f.write("\n")
logging.warning("Done saving files")
# ------------------------------------------------------------------------------
# Drop in to interactive mode
# ------------------------------------------------------------------------------
def predict(context, top_n=5, normalize=False):
"""
returns a list of top_n tuples ("sentence", "score")
"""
with torch.no_grad():
context = context.unsqueeze(0)
candidates = fixed_candidates
if args.cuda:
context = context.cuda(non_blocking=True)
ctx, _ = net(context, None)
scores, index = score_candidates(ctx, cand_embs, top_n, normalize)
response = []
outputs = []
for i, (score, index) in enumerate(zip(scores.squeeze(0), index.squeeze(0)), 1):
response.append((stringify(candidates[index]), float(score)))
if index < breakingpt:
outputs.append("EmpChat")
elif index < breakingpt2:
outputs.append("DailyDialog")
else:
outputs.append("Reddit")
return response, outputs
def get_bleu4(split, history_len=1):
"""
Print BLEU scores and output contexts and retrieved responses.
"""
if history_len < 1:
history_len = 1
source_ct = [0, 0, 0]
net_parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
bleu_parlai_dict = ParlAIDictionary.create_from_reddit_style(bleu_dictionary)
scorer = bleu.Scorer(BLEU_PAD_IDX, BLEU_EOS_IDX, BLEU_UNK_IDX)
outf = open("retrieved_split_" + args.name + "_" + split + ".txt", "w")
def _get_dataset(reddit_dict, parlai_dict):
if args.task == "dailydialog":
return DDDataset(
split,
parlai_dict,
data_folder=args.dailydialog_folder,
history_len=history_len,
)
elif args.task == "empchat":
return EmpDataset(
split,
parlai_dict,
data_folder=args.empchat_folder,
history_len=history_len,
reactonly=args.reactonly,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
elif args.task == "reddit":
return RedditDataset(
data_folder=args.reddit_folder,
chunk_id=999,
dict_=reddit_dict,
max_hist_len=history_len,
rm_blank_sentences=True,
)
else:
raise ValueError("Task unrecognized!")
net_dataset = _get_dataset(net_dictionary, net_parlai_dict)
bleu_dataset = _get_dataset(bleu_dictionary, bleu_parlai_dict)
sample_index = range(len(bleu_dataset))
for data_idx in sample_index:
net_context, _ = net_dataset[data_idx][:2]
bleu_context, bleu_sentence = bleu_dataset[data_idx][:2]
target_tokens = bleu_sentence
if args.fasttext is not None:
target_tokens = target_tokens[args.fasttext :]
context = bleu_parlai_dict.vec2txt(bleu_context.numpy().tolist())
responses, sources = predict(net_context)
response = responses[0][0]
source = sources[0]
if source == "Reddit":
source_ct[0] += 1
elif source == "EmpChat":
source_ct[1] += 1
else:
source_ct[2] += 1
if args.task == "empchat":
cid, sid = bleu_dataset.getid(data_idx)
else:
cid = sid = -1
# This is a hack, because the other datasets have no .getid() method
if args.fasttext is not None:
response = " ".join(response.split()[args.fasttext :])
outf.write("\t".join([str(cid), str(sid), context, response, source]) + "\n")
hypo_tokens = torch.IntTensor(bleu_parlai_dict.txt2vec(response))
# Use this tokenization even if a BERT tokenizer exists, to match the BLEU
# calculation when not using BERT
scorer.add(target_tokens.type(torch.IntTensor), hypo_tokens)
print(scorer.result_string(order=1))
print(scorer.result_string(order=2))
print(scorer.result_string(order=3))
print(scorer.result_string(order=4))
print(actual_ct)
print(
f"EmpatheticDialogues {int(source_ct[1]):d}: selected "
f"{float(source_ct[1]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[1]) / sum(actual_ct)}"
)
print(
f"DailyDialog {int(source_ct[2]):d}: selected "
f"{float(source_ct[2]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[2]) / sum(actual_ct)}"
)
print(
f"Reddit {int(source_ct[0]):d}: selected "
f"{float(source_ct[0]) / sum(source_ct)}%, but total: "
f"{float(actual_ct[0]) / sum(actual_ct)}"
)
get_bleu4("valid", history_len=args.max_hist_len)
get_bleu4("test", history_len=args.max_hist_len)
| _starts_with_gt | identifier_name |
debugger-script.js | /*
* Copyright (C) 2010 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"use strict";
(function () {
var DebuggerScript = {};
/** @enum */
const PauseOnExceptionsState = {
DontPauseOnExceptions: 0,
PauseOnAllExceptions: 1,
PauseOnUncaughtExceptions: 2
};
DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
Debug.clearBreakOnException();
Debug.clearBreakOnUncaughtException();
/**
* @param {?CompileEvent} eventData
*/
DebuggerScript.getAfterCompileScript = function(eventData)
{
var script = eventData.script().value();
if (!script.is_debugger_script)
return DebuggerScript._formatScript(eventData.script().value());
return null;
}
/** @type {!Map<!ScopeType, string>} */
DebuggerScript._scopeTypeNames = new Map();
DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
DebuggerScript._scopeTypeNames.set(ScopeType.Local, "local");
DebuggerScript._scopeTypeNames.set(ScopeType.With, "with");
DebuggerScript._scopeTypeNames.set(ScopeType.Closure, "closure");
DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
/**
* @param {function()} fun
* @return {?Array<!Scope>}
*/
DebuggerScript.getFunctionScopes = function(fun)
{
var mirror = MakeMirror(fun);
if (!mirror.isFunction())
return null;
var functionMirror = /** @type {!FunctionMirror} */(mirror);
var count = functionMirror.scopeCount();
if (count == 0)
return null;
var result = [];
for (var i = 0; i < count; i++) {
var scopeDetails = functionMirror.scope(i).details();
var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
if (!scopeObject)
continue;
result.push({
type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
object: scopeObject,
name: scopeDetails.name() || ""
});
}
return result;
}
/**
* @param {Object} object
* @return {?RawLocation}
*/
DebuggerScript.getGeneratorObjectLocation = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (!mirror.isGenerator())
return null;
var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
var funcMirror = generatorMirror.func();
if (!funcMirror.resolved())
return null;
var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
var script = funcMirror.script();
if (script && location) {
return {
scriptId: "" + script.id(),
lineNumber: location.line,
columnNumber: location.column
};
}
return null;
}
/**
* @param {Object} object
* @return {!Array<!{value: *}>|undefined}
*/
DebuggerScript.getCollectionEntries = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (mirror.isMap())
return /** @type {!MapMirror} */(mirror).entries();
if (mirror.isSet() || mirror.isIterator()) {
var result = [];
var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
for (var i = 0; i < values.length; ++i)
result.push({ value: values[i] });
return result;
}
}
/**
* @param {string|undefined} contextData
* @return {number}
*/
DebuggerScript._executionContextId = function(contextData)
{
if (!contextData)
return 0;
var match = contextData.match(/^[^,]*,([^,]*),.*$/);
if (!match)
return 0;
return parseInt(match[1], 10) || 0;
}
/**
* @param {string|undefined} contextData
* @return {string}
*/
DebuggerScript._executionContextAuxData = function(contextData)
{
if (!contextData)
return "";
var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
return match ? match[1] : "";
}
/**
* @param {string} contextGroupId
* @return {!Array<!FormattedScript>}
*/
DebuggerScript.getScripts = function(contextGroupId)
{
var result = [];
var scripts = Debug.scripts();
var contextDataPrefix = null;
if (contextGroupId)
contextDataPrefix = contextGroupId + ",";
for (var i = 0; i < scripts.length; ++i) {
var script = scripts[i];
if (contextDataPrefix) {
if (!script.context_data)
continue;
// Context data is a string in the following format:
// <contextGroupId>,<contextId>,<auxData>
if (script.context_data.indexOf(contextDataPrefix) !== 0)
continue;
}
if (script.is_debugger_script)
continue;
result.push(DebuggerScript._formatScript(script));
}
return result;
}
/**
* @param {!Script} script
* @return {!FormattedScript}
*/
DebuggerScript._formatScript = function(script)
{
var lineEnds = script.line_ends;
var lineCount = lineEnds.length;
var endLine = script.line_offset + lineCount - 1;
var endColumn; | endLine += 1;
endColumn = 0;
} else {
if (lineCount === 1)
endColumn = script.source.length + script.column_offset;
else
endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
}
return {
id: script.id,
name: script.nameOrSourceURL(),
sourceURL: script.source_url,
sourceMappingURL: script.source_mapping_url,
source: script.source,
startLine: script.line_offset,
startColumn: script.column_offset,
endLine: endLine,
endColumn: endColumn,
executionContextId: DebuggerScript._executionContextId(script.context_data),
// Note that we cannot derive aux data from context id because of compilation cache.
executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
};
}
/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
*/
DebuggerScript.setBreakpoint = function(execState, info)
{
var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
var locations = Debug.findBreakPointActualLocations(breakId);
if (!locations.length)
return undefined;
info.lineNumber = locations[0].line;
info.columnNumber = locations[0].column;
return breakId.toString();
}
/**
* @param {!ExecutionState} execState
* @param {!{breakpointId: number}} info
*/
DebuggerScript.removeBreakpoint = function(execState, info)
{
Debug.findBreakPoint(info.breakpointId, true);
}
/**
* @return {number}
*/
DebuggerScript.pauseOnExceptionsState = function()
{
return DebuggerScript._pauseOnExceptionsState;
}
/**
* @param {number} newState
*/
DebuggerScript.setPauseOnExceptionsState = function(newState)
{
DebuggerScript._pauseOnExceptionsState = newState;
if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
Debug.setBreakOnException();
else
Debug.clearBreakOnException();
if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
Debug.setBreakOnUncaughtException();
else
Debug.clearBreakOnUncaughtException();
}
/**
* @param {!ExecutionState} execState
* @param {number} limit
* @return {!Array<!JavaScriptCallFrame>}
*/
DebuggerScript.currentCallFrames = function(execState, limit)
{
var frames = [];
for (var i = 0; i < execState.frameCount() && (!limit || i < limit); ++i)
frames.push(DebuggerScript._frameMirrorToJSCallFrame(execState.frame(i)));
return frames;
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepIntoStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepIn);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepFrameStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepFrame);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOverStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepNext);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOutOfFunction = function(execState)
{
execState.prepareStep(Debug.StepAction.StepOut);
}
DebuggerScript.clearStepping = function()
{
Debug.clearStepping();
}
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
// or throws exception with message.
/**
* @param {number} scriptId
* @param {string} newSource
* @param {boolean} preview
* @return {!Array<*>}
*/
DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
{
var scripts = Debug.scripts();
var scriptToEdit = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == scriptId) {
scriptToEdit = scripts[i];
break;
}
}
if (!scriptToEdit)
throw("Script not found");
var changeLog = [];
try {
var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
return [0, result.stack_modified];
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
var details = /** @type {!LiveEditErrorDetails} */(e.details);
if (details.type === "liveedit_compile_error") {
var startPosition = details.position.start;
return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
}
}
throw e;
}
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.clearBreakpoints = function(execState)
{
Debug.clearAllBreakPoints();
}
/**
* @param {!ExecutionState} execState
* @param {!{enabled: boolean}} info
*/
DebuggerScript.setBreakpointsActivated = function(execState, info)
{
Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
}
/**
* @param {!BreakEvent} eventData
*/
DebuggerScript.getBreakpointNumbers = function(eventData)
{
var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
for (var i = 0; i < breakpoints.length; i++) {
var breakpoint = breakpoints[i];
var scriptBreakPoint = breakpoint.script_break_point();
numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
}
return numbers;
}
// NOTE: This function is performance critical, as it can be run on every
// statement that generates an async event (like addEventListener) to support
// asynchronous call stacks. Thus, when possible, initialize the data lazily.
/**
* @param {!FrameMirror} frameMirror
* @return {!JavaScriptCallFrame}
*/
DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
// Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
// The frameMirror and scopeMirror can be accessed only while paused on the debugger.
var frameDetails = frameMirror.details();
var funcObject = frameDetails.func();
var sourcePosition = frameDetails.sourcePosition();
var thisObject = frameDetails.receiver();
var isAtReturn = !!frameDetails.isAtReturn();
var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
var scopeMirrors = frameMirror.allScopes(false);
/** @type {!Array<number>} */
var scopeTypes = new Array(scopeMirrors.length);
/** @type {?Array<!Object>} */
var scopeObjects = new Array(scopeMirrors.length);
/** @type {!Array<string|undefined>} */
var scopeNames = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeStartPositions = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeEndPositions = new Array(scopeMirrors.length);
/** @type {?Array<function()|null>} */
var scopeFunctions = new Array(scopeMirrors.length);
for (var i = 0; i < scopeMirrors.length; ++i) {
var scopeDetails = scopeMirrors[i].details();
scopeTypes[i] = scopeDetails.type();
scopeObjects[i] = scopeDetails.object();
scopeNames[i] = scopeDetails.name();
scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
}
// Calculated lazily.
var scopeChain;
var funcMirror;
var location;
/** @type {!Array<?RawLocation>} */
var scopeStartLocations;
/** @type {!Array<?RawLocation>} */
var scopeEndLocations;
var details;
/**
* @param {!ScriptMirror|undefined} script
* @param {number} pos
* @return {?RawLocation}
*/
function createLocation(script, pos)
{
if (!script)
return null;
var location = script.locationFromPosition(pos, true);
return {
"lineNumber": location.line,
"columnNumber": location.column,
"scriptId": String(script.id())
}
}
/**
* @return {!Array<!Object>}
*/
function ensureScopeChain()
{
if (!scopeChain) {
scopeChain = [];
scopeStartLocations = [];
scopeEndLocations = [];
for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
if (scopeObject) {
scopeTypes[j] = scopeTypes[i];
scopeNames[j] = scopeNames[i];
scopeChain[j] = scopeObject;
var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
if (!funcMirror || !funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
var script = /** @type {!FunctionMirror} */(funcMirror).script();
scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
++j;
}
}
scopeTypes.length = scopeChain.length;
scopeNames.length = scopeChain.length;
scopeObjects = null; // Free for GC.
scopeFunctions = null;
scopeStartPositions = null;
scopeEndPositions = null;
}
return scopeChain;
}
/**
* @return {!JavaScriptCallFrameDetails}
*/
function lazyDetails()
{
if (!details) {
var scopeObjects = ensureScopeChain();
var script = ensureFuncMirror().script();
/** @type {!Array<Scope>} */
var scopes = [];
for (var i = 0; i < scopeObjects.length; ++i) {
var scope = {
"type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
"object": scopeObjects[i],
};
if (scopeNames[i])
scope.name = scopeNames[i];
if (scopeStartLocations[i])
scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
if (scopeEndLocations[i])
scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
scopes.push(scope);
}
details = {
"functionName": ensureFuncMirror().debugName(),
"location": {
"lineNumber": line(),
"columnNumber": column(),
"scriptId": String(script.id())
},
"this": thisObject,
"scopeChain": scopes
};
var functionLocation = ensureFuncMirror().sourceLocation();
if (functionLocation) {
details.functionLocation = {
"lineNumber": functionLocation.line,
"columnNumber": functionLocation.column,
"scriptId": String(script.id())
};
}
if (isAtReturn)
details.returnValue = returnValue;
}
return details;
}
/**
* @return {!FunctionMirror}
*/
function ensureFuncMirror()
{
if (!funcMirror) {
funcMirror = MakeMirror(funcObject);
if (!funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
}
return /** @type {!FunctionMirror} */(funcMirror);
}
/**
* @return {!{line: number, column: number}}
*/
function ensureLocation()
{
if (!location) {
var script = ensureFuncMirror().script();
if (script)
location = script.locationFromPosition(sourcePosition, true);
if (!location)
location = { line: 0, column: 0 };
}
return location;
}
/**
* @return {number}
*/
function line()
{
return ensureLocation().line;
}
/**
* @return {number}
*/
function column()
{
return ensureLocation().column;
}
/**
* @return {number}
*/
function contextId()
{
var mirror = ensureFuncMirror();
// Old V8 do not have context() function on these objects
if (!mirror.context)
return DebuggerScript._executionContextId(mirror.script().value().context_data);
var context = mirror.context();
if (context)
return DebuggerScript._executionContextId(context.data());
return 0;
}
/**
* @return {number|undefined}
*/
function sourceID()
{
var script = ensureFuncMirror().script();
return script && script.id();
}
/**
* @param {string} expression
* @return {*}
*/
function evaluate(expression)
{
return frameMirror.evaluate(expression, false).value();
}
/** @return {undefined} */
function restart()
{
return frameMirror.restart();
}
/**
* @param {number} scopeNumber
* @param {string} variableName
* @param {*} newValue
*/
function setVariableValue(scopeNumber, variableName, newValue)
{
var scopeMirror = frameMirror.scope(scopeNumber);
if (!scopeMirror)
throw new Error("Incorrect scope index");
scopeMirror.setVariableValue(variableName, newValue);
}
return {
"sourceID": sourceID,
"line": line,
"column": column,
"contextId": contextId,
"thisObject": thisObject,
"evaluate": evaluate,
"restart": restart,
"setVariableValue": setVariableValue,
"isAtReturn": isAtReturn,
"details": lazyDetails
};
}
/**
* @param {number} scopeType
* @param {!Object} scopeObject
* @return {!Object|undefined}
*/
DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
{
var result;
switch (scopeType) {
case ScopeType.Local:
case ScopeType.Closure:
case ScopeType.Catch:
case ScopeType.Block:
case ScopeType.Script:
// For transient objects we create a "persistent" copy that contains
// the same properties.
// Reset scope object prototype to null so that the proto properties
// don't appear in the local scope section.
var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
// Almost always Script scope will be empty, so just filter out that noise.
// Also drop empty Block scopes, should we get any.
if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
break;
result = { __proto__: null };
for (var j = 0; j < properties.length; j++) {
var name = properties[j].name();
if (name.length === 0 || name.charAt(0) === ".")
continue; // Skip internal variables like ".arguments" and variables with empty name
result[name] = properties[j].value_;
}
break;
case ScopeType.Global:
case ScopeType.With:
result = scopeObject;
break;
}
return result;
}
// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
ToggleMirrorCache(false);
return DebuggerScript;
})(); | // V8 will not count last line if script source ends with \n.
if (script.source[script.source.length - 1] === '\n') { | random_line_split |
debugger-script.js | /*
* Copyright (C) 2010 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"use strict";
(function () {
var DebuggerScript = {};
/** @enum */
const PauseOnExceptionsState = {
DontPauseOnExceptions: 0,
PauseOnAllExceptions: 1,
PauseOnUncaughtExceptions: 2
};
DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
Debug.clearBreakOnException();
Debug.clearBreakOnUncaughtException();
/**
* @param {?CompileEvent} eventData
*/
DebuggerScript.getAfterCompileScript = function(eventData)
{
var script = eventData.script().value();
if (!script.is_debugger_script)
return DebuggerScript._formatScript(eventData.script().value());
return null;
}
/** @type {!Map<!ScopeType, string>} */
DebuggerScript._scopeTypeNames = new Map();
DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
DebuggerScript._scopeTypeNames.set(ScopeType.Local, "local");
DebuggerScript._scopeTypeNames.set(ScopeType.With, "with");
DebuggerScript._scopeTypeNames.set(ScopeType.Closure, "closure");
DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
/**
* @param {function()} fun
* @return {?Array<!Scope>}
*/
DebuggerScript.getFunctionScopes = function(fun)
{
var mirror = MakeMirror(fun);
if (!mirror.isFunction())
return null;
var functionMirror = /** @type {!FunctionMirror} */(mirror);
var count = functionMirror.scopeCount();
if (count == 0)
return null;
var result = [];
for (var i = 0; i < count; i++) {
var scopeDetails = functionMirror.scope(i).details();
var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
if (!scopeObject)
continue;
result.push({
type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
object: scopeObject,
name: scopeDetails.name() || ""
});
}
return result;
}
/**
* @param {Object} object
* @return {?RawLocation}
*/
DebuggerScript.getGeneratorObjectLocation = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (!mirror.isGenerator())
return null;
var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
var funcMirror = generatorMirror.func();
if (!funcMirror.resolved())
return null;
var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
var script = funcMirror.script();
if (script && location) {
return {
scriptId: "" + script.id(),
lineNumber: location.line,
columnNumber: location.column
};
}
return null;
}
/**
* @param {Object} object
* @return {!Array<!{value: *}>|undefined}
*/
DebuggerScript.getCollectionEntries = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (mirror.isMap())
return /** @type {!MapMirror} */(mirror).entries();
if (mirror.isSet() || mirror.isIterator()) {
var result = [];
var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
for (var i = 0; i < values.length; ++i)
result.push({ value: values[i] });
return result;
}
}
/**
* @param {string|undefined} contextData
* @return {number}
*/
DebuggerScript._executionContextId = function(contextData)
{
if (!contextData)
return 0;
var match = contextData.match(/^[^,]*,([^,]*),.*$/);
if (!match)
return 0;
return parseInt(match[1], 10) || 0;
}
/**
* @param {string|undefined} contextData
* @return {string}
*/
DebuggerScript._executionContextAuxData = function(contextData)
{
if (!contextData)
return "";
var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
return match ? match[1] : "";
}
/**
* @param {string} contextGroupId
* @return {!Array<!FormattedScript>}
*/
DebuggerScript.getScripts = function(contextGroupId)
{
var result = [];
var scripts = Debug.scripts();
var contextDataPrefix = null;
if (contextGroupId)
contextDataPrefix = contextGroupId + ",";
for (var i = 0; i < scripts.length; ++i) {
var script = scripts[i];
if (contextDataPrefix) {
if (!script.context_data)
continue;
// Context data is a string in the following format:
// <contextGroupId>,<contextId>,<auxData>
if (script.context_data.indexOf(contextDataPrefix) !== 0)
continue;
}
if (script.is_debugger_script)
continue;
result.push(DebuggerScript._formatScript(script));
}
return result;
}
/**
* @param {!Script} script
* @return {!FormattedScript}
*/
DebuggerScript._formatScript = function(script)
{
var lineEnds = script.line_ends;
var lineCount = lineEnds.length;
var endLine = script.line_offset + lineCount - 1;
var endColumn;
// V8 will not count last line if script source ends with \n.
if (script.source[script.source.length - 1] === '\n') {
endLine += 1;
endColumn = 0;
} else {
if (lineCount === 1)
endColumn = script.source.length + script.column_offset;
else
endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
}
return {
id: script.id,
name: script.nameOrSourceURL(),
sourceURL: script.source_url,
sourceMappingURL: script.source_mapping_url,
source: script.source,
startLine: script.line_offset,
startColumn: script.column_offset,
endLine: endLine,
endColumn: endColumn,
executionContextId: DebuggerScript._executionContextId(script.context_data),
// Note that we cannot derive aux data from context id because of compilation cache.
executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
};
}
/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
*/
DebuggerScript.setBreakpoint = function(execState, info)
{
var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
var locations = Debug.findBreakPointActualLocations(breakId);
if (!locations.length)
return undefined;
info.lineNumber = locations[0].line;
info.columnNumber = locations[0].column;
return breakId.toString();
}
/**
* @param {!ExecutionState} execState
* @param {!{breakpointId: number}} info
*/
DebuggerScript.removeBreakpoint = function(execState, info)
{
Debug.findBreakPoint(info.breakpointId, true);
}
/**
* @return {number}
*/
DebuggerScript.pauseOnExceptionsState = function()
{
return DebuggerScript._pauseOnExceptionsState;
}
/**
* @param {number} newState
*/
DebuggerScript.setPauseOnExceptionsState = function(newState)
{
DebuggerScript._pauseOnExceptionsState = newState;
if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
Debug.setBreakOnException();
else
Debug.clearBreakOnException();
if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
Debug.setBreakOnUncaughtException();
else
Debug.clearBreakOnUncaughtException();
}
/**
* @param {!ExecutionState} execState
* @param {number} limit
* @return {!Array<!JavaScriptCallFrame>}
*/
DebuggerScript.currentCallFrames = function(execState, limit)
{
var frames = [];
for (var i = 0; i < execState.frameCount() && (!limit || i < limit); ++i)
frames.push(DebuggerScript._frameMirrorToJSCallFrame(execState.frame(i)));
return frames;
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepIntoStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepIn);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepFrameStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepFrame);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOverStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepNext);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOutOfFunction = function(execState)
{
execState.prepareStep(Debug.StepAction.StepOut);
}
DebuggerScript.clearStepping = function()
{
Debug.clearStepping();
}
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
// or throws exception with message.
/**
* @param {number} scriptId
* @param {string} newSource
* @param {boolean} preview
* @return {!Array<*>}
*/
DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
{
var scripts = Debug.scripts();
var scriptToEdit = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == scriptId) {
scriptToEdit = scripts[i];
break;
}
}
if (!scriptToEdit)
throw("Script not found");
var changeLog = [];
try {
var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
return [0, result.stack_modified];
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
var details = /** @type {!LiveEditErrorDetails} */(e.details);
if (details.type === "liveedit_compile_error") {
var startPosition = details.position.start;
return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
}
}
throw e;
}
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.clearBreakpoints = function(execState)
{
Debug.clearAllBreakPoints();
}
/**
* @param {!ExecutionState} execState
* @param {!{enabled: boolean}} info
*/
DebuggerScript.setBreakpointsActivated = function(execState, info)
{
Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
}
/**
* @param {!BreakEvent} eventData
*/
DebuggerScript.getBreakpointNumbers = function(eventData)
{
var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
for (var i = 0; i < breakpoints.length; i++) {
var breakpoint = breakpoints[i];
var scriptBreakPoint = breakpoint.script_break_point();
numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
}
return numbers;
}
// NOTE: This function is performance critical, as it can be run on every
// statement that generates an async event (like addEventListener) to support
// asynchronous call stacks. Thus, when possible, initialize the data lazily.
/**
* @param {!FrameMirror} frameMirror
* @return {!JavaScriptCallFrame}
*/
DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
// Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
// The frameMirror and scopeMirror can be accessed only while paused on the debugger.
var frameDetails = frameMirror.details();
var funcObject = frameDetails.func();
var sourcePosition = frameDetails.sourcePosition();
var thisObject = frameDetails.receiver();
var isAtReturn = !!frameDetails.isAtReturn();
var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
var scopeMirrors = frameMirror.allScopes(false);
/** @type {!Array<number>} */
var scopeTypes = new Array(scopeMirrors.length);
/** @type {?Array<!Object>} */
var scopeObjects = new Array(scopeMirrors.length);
/** @type {!Array<string|undefined>} */
var scopeNames = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeStartPositions = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeEndPositions = new Array(scopeMirrors.length);
/** @type {?Array<function()|null>} */
var scopeFunctions = new Array(scopeMirrors.length);
for (var i = 0; i < scopeMirrors.length; ++i) {
var scopeDetails = scopeMirrors[i].details();
scopeTypes[i] = scopeDetails.type();
scopeObjects[i] = scopeDetails.object();
scopeNames[i] = scopeDetails.name();
scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
}
// Calculated lazily.
var scopeChain;
var funcMirror;
var location;
/** @type {!Array<?RawLocation>} */
var scopeStartLocations;
/** @type {!Array<?RawLocation>} */
var scopeEndLocations;
var details;
/**
* @param {!ScriptMirror|undefined} script
* @param {number} pos
* @return {?RawLocation}
*/
function createLocation(script, pos)
{
if (!script)
return null;
var location = script.locationFromPosition(pos, true);
return {
"lineNumber": location.line,
"columnNumber": location.column,
"scriptId": String(script.id())
}
}
/**
* @return {!Array<!Object>}
*/
function ensureScopeChain()
{
if (!scopeChain) {
scopeChain = [];
scopeStartLocations = [];
scopeEndLocations = [];
for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
if (scopeObject) {
scopeTypes[j] = scopeTypes[i];
scopeNames[j] = scopeNames[i];
scopeChain[j] = scopeObject;
var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
if (!funcMirror || !funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
var script = /** @type {!FunctionMirror} */(funcMirror).script();
scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
++j;
}
}
scopeTypes.length = scopeChain.length;
scopeNames.length = scopeChain.length;
scopeObjects = null; // Free for GC.
scopeFunctions = null;
scopeStartPositions = null;
scopeEndPositions = null;
}
return scopeChain;
}
/**
* @return {!JavaScriptCallFrameDetails}
*/
function lazyDetails()
{
if (!details) {
var scopeObjects = ensureScopeChain();
var script = ensureFuncMirror().script();
/** @type {!Array<Scope>} */
var scopes = [];
for (var i = 0; i < scopeObjects.length; ++i) {
var scope = {
"type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
"object": scopeObjects[i],
};
if (scopeNames[i])
scope.name = scopeNames[i];
if (scopeStartLocations[i])
scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
if (scopeEndLocations[i])
scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
scopes.push(scope);
}
details = {
"functionName": ensureFuncMirror().debugName(),
"location": {
"lineNumber": line(),
"columnNumber": column(),
"scriptId": String(script.id())
},
"this": thisObject,
"scopeChain": scopes
};
var functionLocation = ensureFuncMirror().sourceLocation();
if (functionLocation) {
details.functionLocation = {
"lineNumber": functionLocation.line,
"columnNumber": functionLocation.column,
"scriptId": String(script.id())
};
}
if (isAtReturn)
details.returnValue = returnValue;
}
return details;
}
/**
* @return {!FunctionMirror}
*/
function ensureFuncMirror()
{
if (!funcMirror) {
funcMirror = MakeMirror(funcObject);
if (!funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
}
return /** @type {!FunctionMirror} */(funcMirror);
}
/**
* @return {!{line: number, column: number}}
*/
function ensureLocation()
|
/**
* @return {number}
*/
function line()
{
return ensureLocation().line;
}
/**
* @return {number}
*/
function column()
{
return ensureLocation().column;
}
/**
* @return {number}
*/
function contextId()
{
var mirror = ensureFuncMirror();
// Old V8 do not have context() function on these objects
if (!mirror.context)
return DebuggerScript._executionContextId(mirror.script().value().context_data);
var context = mirror.context();
if (context)
return DebuggerScript._executionContextId(context.data());
return 0;
}
/**
* @return {number|undefined}
*/
function sourceID()
{
var script = ensureFuncMirror().script();
return script && script.id();
}
/**
* @param {string} expression
* @return {*}
*/
function evaluate(expression)
{
return frameMirror.evaluate(expression, false).value();
}
/** @return {undefined} */
function restart()
{
return frameMirror.restart();
}
/**
* @param {number} scopeNumber
* @param {string} variableName
* @param {*} newValue
*/
function setVariableValue(scopeNumber, variableName, newValue)
{
var scopeMirror = frameMirror.scope(scopeNumber);
if (!scopeMirror)
throw new Error("Incorrect scope index");
scopeMirror.setVariableValue(variableName, newValue);
}
return {
"sourceID": sourceID,
"line": line,
"column": column,
"contextId": contextId,
"thisObject": thisObject,
"evaluate": evaluate,
"restart": restart,
"setVariableValue": setVariableValue,
"isAtReturn": isAtReturn,
"details": lazyDetails
};
}
/**
* @param {number} scopeType
* @param {!Object} scopeObject
* @return {!Object|undefined}
*/
DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
{
var result;
switch (scopeType) {
case ScopeType.Local:
case ScopeType.Closure:
case ScopeType.Catch:
case ScopeType.Block:
case ScopeType.Script:
// For transient objects we create a "persistent" copy that contains
// the same properties.
// Reset scope object prototype to null so that the proto properties
// don't appear in the local scope section.
var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
// Almost always Script scope will be empty, so just filter out that noise.
// Also drop empty Block scopes, should we get any.
if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
break;
result = { __proto__: null };
for (var j = 0; j < properties.length; j++) {
var name = properties[j].name();
if (name.length === 0 || name.charAt(0) === ".")
continue; // Skip internal variables like ".arguments" and variables with empty name
result[name] = properties[j].value_;
}
break;
case ScopeType.Global:
case ScopeType.With:
result = scopeObject;
break;
}
return result;
}
// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
ToggleMirrorCache(false);
return DebuggerScript;
})();
| {
if (!location) {
var script = ensureFuncMirror().script();
if (script)
location = script.locationFromPosition(sourcePosition, true);
if (!location)
location = { line: 0, column: 0 };
}
return location;
} | identifier_body |
debugger-script.js | /*
* Copyright (C) 2010 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"use strict";
(function () {
var DebuggerScript = {};
/** @enum */
const PauseOnExceptionsState = {
DontPauseOnExceptions: 0,
PauseOnAllExceptions: 1,
PauseOnUncaughtExceptions: 2
};
DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
Debug.clearBreakOnException();
Debug.clearBreakOnUncaughtException();
/**
* @param {?CompileEvent} eventData
*/
DebuggerScript.getAfterCompileScript = function(eventData)
{
var script = eventData.script().value();
if (!script.is_debugger_script)
return DebuggerScript._formatScript(eventData.script().value());
return null;
}
/** @type {!Map<!ScopeType, string>} */
DebuggerScript._scopeTypeNames = new Map();
DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
DebuggerScript._scopeTypeNames.set(ScopeType.Local, "local");
DebuggerScript._scopeTypeNames.set(ScopeType.With, "with");
DebuggerScript._scopeTypeNames.set(ScopeType.Closure, "closure");
DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
/**
* @param {function()} fun
* @return {?Array<!Scope>}
*/
DebuggerScript.getFunctionScopes = function(fun)
{
var mirror = MakeMirror(fun);
if (!mirror.isFunction())
return null;
var functionMirror = /** @type {!FunctionMirror} */(mirror);
var count = functionMirror.scopeCount();
if (count == 0)
return null;
var result = [];
for (var i = 0; i < count; i++) {
var scopeDetails = functionMirror.scope(i).details();
var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
if (!scopeObject)
continue;
result.push({
type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
object: scopeObject,
name: scopeDetails.name() || ""
});
}
return result;
}
/**
* @param {Object} object
* @return {?RawLocation}
*/
DebuggerScript.getGeneratorObjectLocation = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (!mirror.isGenerator())
return null;
var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
var funcMirror = generatorMirror.func();
if (!funcMirror.resolved())
return null;
var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
var script = funcMirror.script();
if (script && location) {
return {
scriptId: "" + script.id(),
lineNumber: location.line,
columnNumber: location.column
};
}
return null;
}
/**
* @param {Object} object
* @return {!Array<!{value: *}>|undefined}
*/
DebuggerScript.getCollectionEntries = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (mirror.isMap())
return /** @type {!MapMirror} */(mirror).entries();
if (mirror.isSet() || mirror.isIterator()) {
var result = [];
var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
for (var i = 0; i < values.length; ++i)
result.push({ value: values[i] });
return result;
}
}
/**
* @param {string|undefined} contextData
* @return {number}
*/
DebuggerScript._executionContextId = function(contextData)
{
if (!contextData)
return 0;
var match = contextData.match(/^[^,]*,([^,]*),.*$/);
if (!match)
return 0;
return parseInt(match[1], 10) || 0;
}
/**
* @param {string|undefined} contextData
* @return {string}
*/
DebuggerScript._executionContextAuxData = function(contextData)
{
if (!contextData)
return "";
var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
return match ? match[1] : "";
}
/**
* @param {string} contextGroupId
* @return {!Array<!FormattedScript>}
*/
DebuggerScript.getScripts = function(contextGroupId)
{
var result = [];
var scripts = Debug.scripts();
var contextDataPrefix = null;
if (contextGroupId)
contextDataPrefix = contextGroupId + ",";
for (var i = 0; i < scripts.length; ++i) {
var script = scripts[i];
if (contextDataPrefix) {
if (!script.context_data)
continue;
// Context data is a string in the following format:
// <contextGroupId>,<contextId>,<auxData>
if (script.context_data.indexOf(contextDataPrefix) !== 0)
continue;
}
if (script.is_debugger_script)
continue;
result.push(DebuggerScript._formatScript(script));
}
return result;
}
/**
* @param {!Script} script
* @return {!FormattedScript}
*/
DebuggerScript._formatScript = function(script)
{
var lineEnds = script.line_ends;
var lineCount = lineEnds.length;
var endLine = script.line_offset + lineCount - 1;
var endColumn;
// V8 will not count last line if script source ends with \n.
if (script.source[script.source.length - 1] === '\n') {
endLine += 1;
endColumn = 0;
} else {
if (lineCount === 1)
endColumn = script.source.length + script.column_offset;
else
endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
}
return {
id: script.id,
name: script.nameOrSourceURL(),
sourceURL: script.source_url,
sourceMappingURL: script.source_mapping_url,
source: script.source,
startLine: script.line_offset,
startColumn: script.column_offset,
endLine: endLine,
endColumn: endColumn,
executionContextId: DebuggerScript._executionContextId(script.context_data),
// Note that we cannot derive aux data from context id because of compilation cache.
executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
};
}
/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
*/
DebuggerScript.setBreakpoint = function(execState, info)
{
var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
var locations = Debug.findBreakPointActualLocations(breakId);
if (!locations.length)
return undefined;
info.lineNumber = locations[0].line;
info.columnNumber = locations[0].column;
return breakId.toString();
}
/**
* @param {!ExecutionState} execState
* @param {!{breakpointId: number}} info
*/
DebuggerScript.removeBreakpoint = function(execState, info)
{
Debug.findBreakPoint(info.breakpointId, true);
}
/**
* @return {number}
*/
DebuggerScript.pauseOnExceptionsState = function()
{
return DebuggerScript._pauseOnExceptionsState;
}
/**
* @param {number} newState
*/
DebuggerScript.setPauseOnExceptionsState = function(newState)
{
DebuggerScript._pauseOnExceptionsState = newState;
if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
Debug.setBreakOnException();
else
Debug.clearBreakOnException();
if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
Debug.setBreakOnUncaughtException();
else
Debug.clearBreakOnUncaughtException();
}
/**
* @param {!ExecutionState} execState
* @param {number} limit
* @return {!Array<!JavaScriptCallFrame>}
*/
DebuggerScript.currentCallFrames = function(execState, limit)
{
var frames = [];
for (var i = 0; i < execState.frameCount() && (!limit || i < limit); ++i)
frames.push(DebuggerScript._frameMirrorToJSCallFrame(execState.frame(i)));
return frames;
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepIntoStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepIn);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepFrameStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepFrame);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOverStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepNext);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOutOfFunction = function(execState)
{
execState.prepareStep(Debug.StepAction.StepOut);
}
DebuggerScript.clearStepping = function()
{
Debug.clearStepping();
}
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
// or throws exception with message.
/**
* @param {number} scriptId
* @param {string} newSource
* @param {boolean} preview
* @return {!Array<*>}
*/
DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
{
var scripts = Debug.scripts();
var scriptToEdit = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == scriptId) {
scriptToEdit = scripts[i];
break;
}
}
if (!scriptToEdit)
throw("Script not found");
var changeLog = [];
try {
var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
return [0, result.stack_modified];
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
var details = /** @type {!LiveEditErrorDetails} */(e.details);
if (details.type === "liveedit_compile_error") {
var startPosition = details.position.start;
return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
}
}
throw e;
}
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.clearBreakpoints = function(execState)
{
Debug.clearAllBreakPoints();
}
/**
* @param {!ExecutionState} execState
* @param {!{enabled: boolean}} info
*/
DebuggerScript.setBreakpointsActivated = function(execState, info)
{
Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
}
/**
* @param {!BreakEvent} eventData
*/
DebuggerScript.getBreakpointNumbers = function(eventData)
{
var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
for (var i = 0; i < breakpoints.length; i++) {
var breakpoint = breakpoints[i];
var scriptBreakPoint = breakpoint.script_break_point();
numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
}
return numbers;
}
// NOTE: This function is performance critical, as it can be run on every
// statement that generates an async event (like addEventListener) to support
// asynchronous call stacks. Thus, when possible, initialize the data lazily.
/**
* @param {!FrameMirror} frameMirror
* @return {!JavaScriptCallFrame}
*/
DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
// Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
// The frameMirror and scopeMirror can be accessed only while paused on the debugger.
var frameDetails = frameMirror.details();
var funcObject = frameDetails.func();
var sourcePosition = frameDetails.sourcePosition();
var thisObject = frameDetails.receiver();
var isAtReturn = !!frameDetails.isAtReturn();
var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
var scopeMirrors = frameMirror.allScopes(false);
/** @type {!Array<number>} */
var scopeTypes = new Array(scopeMirrors.length);
/** @type {?Array<!Object>} */
var scopeObjects = new Array(scopeMirrors.length);
/** @type {!Array<string|undefined>} */
var scopeNames = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeStartPositions = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeEndPositions = new Array(scopeMirrors.length);
/** @type {?Array<function()|null>} */
var scopeFunctions = new Array(scopeMirrors.length);
for (var i = 0; i < scopeMirrors.length; ++i) {
var scopeDetails = scopeMirrors[i].details();
scopeTypes[i] = scopeDetails.type();
scopeObjects[i] = scopeDetails.object();
scopeNames[i] = scopeDetails.name();
scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
}
// Calculated lazily.
var scopeChain;
var funcMirror;
var location;
/** @type {!Array<?RawLocation>} */
var scopeStartLocations;
/** @type {!Array<?RawLocation>} */
var scopeEndLocations;
var details;
/**
* @param {!ScriptMirror|undefined} script
* @param {number} pos
* @return {?RawLocation}
*/
function createLocation(script, pos)
{
if (!script)
return null;
var location = script.locationFromPosition(pos, true);
return {
"lineNumber": location.line,
"columnNumber": location.column,
"scriptId": String(script.id())
}
}
/**
* @return {!Array<!Object>}
*/
function ensureScopeChain()
{
if (!scopeChain) {
scopeChain = [];
scopeStartLocations = [];
scopeEndLocations = [];
for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
if (scopeObject) {
scopeTypes[j] = scopeTypes[i];
scopeNames[j] = scopeNames[i];
scopeChain[j] = scopeObject;
var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
if (!funcMirror || !funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
var script = /** @type {!FunctionMirror} */(funcMirror).script();
scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
++j;
}
}
scopeTypes.length = scopeChain.length;
scopeNames.length = scopeChain.length;
scopeObjects = null; // Free for GC.
scopeFunctions = null;
scopeStartPositions = null;
scopeEndPositions = null;
}
return scopeChain;
}
/**
* @return {!JavaScriptCallFrameDetails}
*/
function lazyDetails()
{
if (!details) {
var scopeObjects = ensureScopeChain();
var script = ensureFuncMirror().script();
/** @type {!Array<Scope>} */
var scopes = [];
for (var i = 0; i < scopeObjects.length; ++i) {
var scope = {
"type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
"object": scopeObjects[i],
};
if (scopeNames[i])
scope.name = scopeNames[i];
if (scopeStartLocations[i])
scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
if (scopeEndLocations[i])
scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
scopes.push(scope);
}
details = {
"functionName": ensureFuncMirror().debugName(),
"location": {
"lineNumber": line(),
"columnNumber": column(),
"scriptId": String(script.id())
},
"this": thisObject,
"scopeChain": scopes
};
var functionLocation = ensureFuncMirror().sourceLocation();
if (functionLocation) {
details.functionLocation = {
"lineNumber": functionLocation.line,
"columnNumber": functionLocation.column,
"scriptId": String(script.id())
};
}
if (isAtReturn)
details.returnValue = returnValue;
}
return details;
}
/**
* @return {!FunctionMirror}
*/
function ensureFuncMirror()
{
if (!funcMirror) {
funcMirror = MakeMirror(funcObject);
if (!funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
}
return /** @type {!FunctionMirror} */(funcMirror);
}
/**
* @return {!{line: number, column: number}}
*/
function ensureLocation()
{
if (!location) {
var script = ensureFuncMirror().script();
if (script)
location = script.locationFromPosition(sourcePosition, true);
if (!location)
location = { line: 0, column: 0 };
}
return location;
}
/**
* @return {number}
*/
function line()
{
return ensureLocation().line;
}
/**
* @return {number}
*/
function | ()
{
return ensureLocation().column;
}
/**
* @return {number}
*/
function contextId()
{
var mirror = ensureFuncMirror();
// Old V8 do not have context() function on these objects
if (!mirror.context)
return DebuggerScript._executionContextId(mirror.script().value().context_data);
var context = mirror.context();
if (context)
return DebuggerScript._executionContextId(context.data());
return 0;
}
/**
* @return {number|undefined}
*/
function sourceID()
{
var script = ensureFuncMirror().script();
return script && script.id();
}
/**
* @param {string} expression
* @return {*}
*/
function evaluate(expression)
{
return frameMirror.evaluate(expression, false).value();
}
/** @return {undefined} */
function restart()
{
return frameMirror.restart();
}
/**
* @param {number} scopeNumber
* @param {string} variableName
* @param {*} newValue
*/
function setVariableValue(scopeNumber, variableName, newValue)
{
var scopeMirror = frameMirror.scope(scopeNumber);
if (!scopeMirror)
throw new Error("Incorrect scope index");
scopeMirror.setVariableValue(variableName, newValue);
}
return {
"sourceID": sourceID,
"line": line,
"column": column,
"contextId": contextId,
"thisObject": thisObject,
"evaluate": evaluate,
"restart": restart,
"setVariableValue": setVariableValue,
"isAtReturn": isAtReturn,
"details": lazyDetails
};
}
/**
* @param {number} scopeType
* @param {!Object} scopeObject
* @return {!Object|undefined}
*/
DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
{
var result;
switch (scopeType) {
case ScopeType.Local:
case ScopeType.Closure:
case ScopeType.Catch:
case ScopeType.Block:
case ScopeType.Script:
// For transient objects we create a "persistent" copy that contains
// the same properties.
// Reset scope object prototype to null so that the proto properties
// don't appear in the local scope section.
var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
// Almost always Script scope will be empty, so just filter out that noise.
// Also drop empty Block scopes, should we get any.
if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
break;
result = { __proto__: null };
for (var j = 0; j < properties.length; j++) {
var name = properties[j].name();
if (name.length === 0 || name.charAt(0) === ".")
continue; // Skip internal variables like ".arguments" and variables with empty name
result[name] = properties[j].value_;
}
break;
case ScopeType.Global:
case ScopeType.With:
result = scopeObject;
break;
}
return result;
}
// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
ToggleMirrorCache(false);
return DebuggerScript;
})();
| column | identifier_name |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::rc::Rc;
use std::cell::Cell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::u32;
extern crate chariot_drs as lib;
use lib::DrsFile as Archive;
extern crate number_prefix;
use number_prefix::{binary_prefix, Prefixed, Standalone};
extern crate gdk;
extern crate gtk;
use gtk::prelude::Inhibit;
use gtk::{Builder, Button, Entry as EntryBox, FileChooserDialog, ListStore, TreeView,
TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None);
let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn | (
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String {
s
}
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
let t: TreeView = builder.get_object("entryinfo_tree").unwrap();
let sel = t.get_selection();
sel.set_mode(gtk::SelectionMode::Multiple);
t
};
window.set_title("DRS Studio");
window.set_position(gtk::WindowPosition::Center);
window.get_preferred_width();
window.set_default_size(1440, 900);
let ei_store = ListStore::new(&[Type::String, Type::String, Type::String, Type::String]);
entryinfo_tree.set_model(Some(&ei_store));
entryinfo_tree.set_headers_visible(true);
add_column!(entryinfo_tree, "ID", Column::ID.into());
add_column!(entryinfo_tree, "Type", Column::Type.into());
add_column!(entryinfo_tree, "Size", Column::Size.into());
add_column!(entryinfo_tree, "Offset", Column::Offset.into());
setup_tree(entryinfo_tree.clone(), extract_button.clone());
let archive: Rc<Cell<Option<Archive>>> = Rc::new(Cell::new(None));
enable_sortable_cols(&ei_store, &entryinfo_tree);
enable_archive_button(
archive.clone(),
extract_button.clone(),
archive_button.clone(),
archive_entrybox.clone(),
ei_store,
);
enable_extract_button(archive.clone(), extract_button.clone(), entryinfo_tree);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
window.show_all();
gtk::main();
}
| select_dir_dialog | identifier_name |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::rc::Rc;
use std::cell::Cell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::u32;
extern crate chariot_drs as lib;
use lib::DrsFile as Archive;
extern crate number_prefix;
use number_prefix::{binary_prefix, Prefixed, Standalone};
extern crate gdk;
extern crate gtk;
use gtk::prelude::Inhibit;
use gtk::{Builder, Button, Entry as EntryBox, FileChooserDialog, ListStore, TreeView,
TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None); | let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn select_dir_dialog(
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String {
s
}
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
let t: TreeView = builder.get_object("entryinfo_tree").unwrap();
let sel = t.get_selection();
sel.set_mode(gtk::SelectionMode::Multiple);
t
};
window.set_title("DRS Studio");
window.set_position(gtk::WindowPosition::Center);
window.get_preferred_width();
window.set_default_size(1440, 900);
let ei_store = ListStore::new(&[Type::String, Type::String, Type::String, Type::String]);
entryinfo_tree.set_model(Some(&ei_store));
entryinfo_tree.set_headers_visible(true);
add_column!(entryinfo_tree, "ID", Column::ID.into());
add_column!(entryinfo_tree, "Type", Column::Type.into());
add_column!(entryinfo_tree, "Size", Column::Size.into());
add_column!(entryinfo_tree, "Offset", Column::Offset.into());
setup_tree(entryinfo_tree.clone(), extract_button.clone());
let archive: Rc<Cell<Option<Archive>>> = Rc::new(Cell::new(None));
enable_sortable_cols(&ei_store, &entryinfo_tree);
enable_archive_button(
archive.clone(),
extract_button.clone(),
archive_button.clone(),
archive_entrybox.clone(),
ei_store,
);
enable_extract_button(archive.clone(), extract_button.clone(), entryinfo_tree);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
window.show_all();
gtk::main();
} | random_line_split | |
main.rs | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::rc::Rc;
use std::cell::Cell;
use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
use std::u32;
extern crate chariot_drs as lib;
use lib::DrsFile as Archive;
extern crate number_prefix;
use number_prefix::{binary_prefix, Prefixed, Standalone};
extern crate gdk;
extern crate gtk;
use gtk::prelude::Inhibit;
use gtk::{Builder, Button, Entry as EntryBox, FileChooserDialog, ListStore, TreeView,
TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None);
let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn select_dir_dialog(
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String |
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
let t: TreeView = builder.get_object("entryinfo_tree").unwrap();
let sel = t.get_selection();
sel.set_mode(gtk::SelectionMode::Multiple);
t
};
window.set_title("DRS Studio");
window.set_position(gtk::WindowPosition::Center);
window.get_preferred_width();
window.set_default_size(1440, 900);
let ei_store = ListStore::new(&[Type::String, Type::String, Type::String, Type::String]);
entryinfo_tree.set_model(Some(&ei_store));
entryinfo_tree.set_headers_visible(true);
add_column!(entryinfo_tree, "ID", Column::ID.into());
add_column!(entryinfo_tree, "Type", Column::Type.into());
add_column!(entryinfo_tree, "Size", Column::Size.into());
add_column!(entryinfo_tree, "Offset", Column::Offset.into());
setup_tree(entryinfo_tree.clone(), extract_button.clone());
let archive: Rc<Cell<Option<Archive>>> = Rc::new(Cell::new(None));
enable_sortable_cols(&ei_store, &entryinfo_tree);
enable_archive_button(
archive.clone(),
extract_button.clone(),
archive_button.clone(),
archive_entrybox.clone(),
ei_store,
);
enable_extract_button(archive.clone(), extract_button.clone(), entryinfo_tree);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
window.show_all();
gtk::main();
}
| {
s
} | identifier_body |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// email@foo.com --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// useless@madeup.com --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// email=email@foo.com&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("email@foo.com");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn | (cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | craft_cbc_admin_token | identifier_name |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size); | test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// email@foo.com --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// useless@madeup.com --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// email=email@foo.com&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("email@foo.com");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn craft_cbc_admin_token(cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block); | random_line_split |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data |
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// email@foo.com --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// useless@madeup.com --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// email=email@foo.com&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("email@foo.com");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn craft_cbc_admin_token(cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
} | identifier_body |
block.rs | //! Implementations of cryptographic attacks against block ciphers.
use utils::data::Data;
use utils::metrics;
use victims::block::{EcbOrCbc, EcbWithSuffix, EcbWithAffixes, EcbUserProfile, CbcCookie};
/// Determine whether a block cipher is using ECB or CBC mode.
///
/// Given a black box which encrypts (padded) user data under ECB mode or CBC mode at random,
/// detect which mode it is using.
pub fn is_ecb_mode(ecb_cbc_box: &mut EcbOrCbc) -> bool {
// Find an upper bound on the block size of the cipher by encrypting some empty data.
let block_size = ecb_cbc_box.encrypt(&Data::new()).len();
// Provide some input data which will definitely result in repeated blocks under ECB mode.
let input = Data::from_bytes(vec![0; 10 * block_size]);
let encrypted = ecb_cbc_box.encrypt(&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block |
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// email@foo.com --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// useless@madeup.com --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// email=email@foo.com&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("email@foo.com");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn craft_cbc_admin_token(cbc_cookie_box: &CbcCookie) -> Data {
// First, provide the user data "aaaaaaaaaaaaaaaa:admin<true:aa<a" and get the
// resulting token as raw bytes.
let token = cbc_cookie_box.make_token("aaaaaaaaaaaaaaaa:admin<true:aa<a");
let mut bytes = token.bytes().to_vec();
// Now, by flipping some of the bits in this token, we can obtain an admin token. Specifically,
// in CBC mode, flipping a bit in one ciphertext block scrambles the block it occurs in, and
// reproduces the exact same edit in the following block after decryption. This means that by
// choosing the bits we flip to occur in the block immediately before the one containing
// ':admin<true:' we can edit ':' into ';' and '<' into '='. This requires flipping the final
// bit of each of bytes 32, 38, 43 and 46.
for position in &[32, 38, 43, 46] {
bytes[*position] ^= 1;
}
Data::from_bytes(bytes)
} | {
suffix.push(byte as u8);
continue 'outer;
} | conditional_block |
emoji-picker-qt.py | #!/usr/bin/python
# Copyright (c) 2020 Maryushi3
import emoji_data_python as edp
import sys
import pyautogui
from Xlib import display
from PyQt5.QtWidgets import QApplication, QGridLayout, QLabel, QLineEdit, QScrollArea, QSizePolicy, QStackedLayout, QVBoxLayout, QWidget
from PyQt5.QtCore import QEvent, QSettings, Qt, pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5 import QtTest
# globals
emojiGridLayout = None
mainWindow = None
emojiGridColumnCount = 5
emojiGridRowCount = 4
emojiToShowCount = 0
fullRowsCount = 0
lastRowEmojiCount = 0
emojiFontSize = 20
selectedEmojiPosition = list((0,0))
willExitOnItsOwn = False
selectedEmojiChar=''
settingsFile = None
historyList = []
foundAnyEmoji = True
layoutStack = None
font = QFont()
font.setPointSize(emojiFontSize)
# quits without a lag
def | ():
mainWindow.hide()
quit()
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data
return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
foundAnyEmoji = False
layoutStack.setCurrentIndex(1)
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList)
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setFixedSize(self.width, self.height)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
# needed for filling the grid out from the outside
global mainWindow
mainWindow = self
# esc handling
self.keyPressed.connect(on_key)
self.show()
lineEdit.setFocus()
def initSettings(self):
global settingsFile
global historyList
settingsFile = QSettings("emoji-picker-qtpy", "history");
historyList = settingsFile.value('history/history')
# key handling
keyPressed = pyqtSignal(int)
def keyPressEvent(self, event):
super(EmojiPickerWindow, self).keyPressEvent(event)
self.keyPressed.emit(event.key())
# focus handling
global willExitOnItsOwn
def eventFilter(self, object, event):
if event.type()== QEvent.WindowDeactivate or event.type()== QEvent.FocusOut:
if (not willExitOnItsOwn):
quitNicely()
return False
# clickable label
class QClickableLabel(QLabel):
clicked=pyqtSignal(str)
def __init__(self, parent=None):
QLabel.__init__(self, parent)
def mousePressEvent(self, ev):
self.clicked.emit(self.text())
def enterEvent(self, ev):
emoji_hovered(self)
# keyboard handling override for QlineEdit
class QLineEditWithArrows(QLineEdit):
def keyPressEvent(self, ev):
global selectedEmojiChar
global foundAnyEmoji
if(ev.key() == Qt.Key_Right):
move_selection("right")
if(ev.key() == Qt.Key_Left):
move_selection("left")
if(ev.key() == Qt.Key_Up):
move_selection("up")
if(ev.key() == Qt.Key_Down):
move_selection("down")
if(ev.key() == Qt.Key_Return or ev.key() == Qt.Key_Enter):
if foundAnyEmoji:
execute_emoji(selectedEmojiChar)
else:
quitNicely()
if(ev.key() == Qt.Key_Tab):
pass
else:
QLineEdit.keyPressEvent(self,ev)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = EmojiPickerWindow()
sys.exit(app.exec_())
| quitNicely | identifier_name |
emoji-picker-qt.py | #!/usr/bin/python
# Copyright (c) 2020 Maryushi3
import emoji_data_python as edp
import sys
import pyautogui
from Xlib import display
from PyQt5.QtWidgets import QApplication, QGridLayout, QLabel, QLineEdit, QScrollArea, QSizePolicy, QStackedLayout, QVBoxLayout, QWidget
from PyQt5.QtCore import QEvent, QSettings, Qt, pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5 import QtTest
# globals
emojiGridLayout = None
mainWindow = None
emojiGridColumnCount = 5
emojiGridRowCount = 4
emojiToShowCount = 0
fullRowsCount = 0
lastRowEmojiCount = 0
emojiFontSize = 20
selectedEmojiPosition = list((0,0))
willExitOnItsOwn = False
selectedEmojiChar=''
settingsFile = None
historyList = []
foundAnyEmoji = True
layoutStack = None
font = QFont()
font.setPointSize(emojiFontSize)
# quits without a lag
def quitNicely():
mainWindow.hide()
quit()
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data
return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
|
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList)
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setFixedSize(self.width, self.height)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
# needed for filling the grid out from the outside
global mainWindow
mainWindow = self
# esc handling
self.keyPressed.connect(on_key)
self.show()
lineEdit.setFocus()
def initSettings(self):
global settingsFile
global historyList
settingsFile = QSettings("emoji-picker-qtpy", "history");
historyList = settingsFile.value('history/history')
# key handling
keyPressed = pyqtSignal(int)
def keyPressEvent(self, event):
super(EmojiPickerWindow, self).keyPressEvent(event)
self.keyPressed.emit(event.key())
# focus handling
global willExitOnItsOwn
def eventFilter(self, object, event):
if event.type()== QEvent.WindowDeactivate or event.type()== QEvent.FocusOut:
if (not willExitOnItsOwn):
quitNicely()
return False
# clickable label
class QClickableLabel(QLabel):
clicked=pyqtSignal(str)
def __init__(self, parent=None):
QLabel.__init__(self, parent)
def mousePressEvent(self, ev):
self.clicked.emit(self.text())
def enterEvent(self, ev):
emoji_hovered(self)
# keyboard handling override for QlineEdit
class QLineEditWithArrows(QLineEdit):
def keyPressEvent(self, ev):
global selectedEmojiChar
global foundAnyEmoji
if(ev.key() == Qt.Key_Right):
move_selection("right")
if(ev.key() == Qt.Key_Left):
move_selection("left")
if(ev.key() == Qt.Key_Up):
move_selection("up")
if(ev.key() == Qt.Key_Down):
move_selection("down")
if(ev.key() == Qt.Key_Return or ev.key() == Qt.Key_Enter):
if foundAnyEmoji:
execute_emoji(selectedEmojiChar)
else:
quitNicely()
if(ev.key() == Qt.Key_Tab):
pass
else:
QLineEdit.keyPressEvent(self,ev)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = EmojiPickerWindow()
sys.exit(app.exec_())
| foundAnyEmoji = False
layoutStack.setCurrentIndex(1) | conditional_block |
emoji-picker-qt.py | #!/usr/bin/python
# Copyright (c) 2020 Maryushi3
import emoji_data_python as edp
import sys
import pyautogui
from Xlib import display
from PyQt5.QtWidgets import QApplication, QGridLayout, QLabel, QLineEdit, QScrollArea, QSizePolicy, QStackedLayout, QVBoxLayout, QWidget
from PyQt5.QtCore import QEvent, QSettings, Qt, pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5 import QtTest
# globals
emojiGridLayout = None
mainWindow = None
emojiGridColumnCount = 5
emojiGridRowCount = 4
emojiToShowCount = 0
fullRowsCount = 0
lastRowEmojiCount = 0
emojiFontSize = 20
selectedEmojiPosition = list((0,0))
willExitOnItsOwn = False
selectedEmojiChar=''
settingsFile = None
historyList = []
foundAnyEmoji = True
layoutStack = None
font = QFont()
font.setPointSize(emojiFontSize)
# quits without a lag
def quitNicely():
mainWindow.hide()
quit()
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data
return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
foundAnyEmoji = False
layoutStack.setCurrentIndex(1)
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
|
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setFixedSize(self.width, self.height)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
# needed for filling the grid out from the outside
global mainWindow
mainWindow = self
# esc handling
self.keyPressed.connect(on_key)
self.show()
lineEdit.setFocus()
def initSettings(self):
global settingsFile
global historyList
settingsFile = QSettings("emoji-picker-qtpy", "history");
historyList = settingsFile.value('history/history')
# key handling
keyPressed = pyqtSignal(int)
def keyPressEvent(self, event):
super(EmojiPickerWindow, self).keyPressEvent(event)
self.keyPressed.emit(event.key())
# focus handling
global willExitOnItsOwn
def eventFilter(self, object, event):
if event.type()== QEvent.WindowDeactivate or event.type()== QEvent.FocusOut:
if (not willExitOnItsOwn):
quitNicely()
return False
# clickable label
class QClickableLabel(QLabel):
clicked=pyqtSignal(str)
def __init__(self, parent=None):
QLabel.__init__(self, parent)
def mousePressEvent(self, ev):
self.clicked.emit(self.text())
def enterEvent(self, ev):
emoji_hovered(self)
# keyboard handling override for QlineEdit
class QLineEditWithArrows(QLineEdit):
def keyPressEvent(self, ev):
global selectedEmojiChar
global foundAnyEmoji
if(ev.key() == Qt.Key_Right):
move_selection("right")
if(ev.key() == Qt.Key_Left):
move_selection("left")
if(ev.key() == Qt.Key_Up):
move_selection("up")
if(ev.key() == Qt.Key_Down):
move_selection("down")
if(ev.key() == Qt.Key_Return or ev.key() == Qt.Key_Enter):
if foundAnyEmoji:
execute_emoji(selectedEmojiChar)
else:
quitNicely()
if(ev.key() == Qt.Key_Tab):
pass
else:
QLineEdit.keyPressEvent(self,ev)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = EmojiPickerWindow()
sys.exit(app.exec_())
| selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList) | identifier_body |
emoji-picker-qt.py | #!/usr/bin/python
# Copyright (c) 2020 Maryushi3
import emoji_data_python as edp
import sys
import pyautogui
from Xlib import display
from PyQt5.QtWidgets import QApplication, QGridLayout, QLabel, QLineEdit, QScrollArea, QSizePolicy, QStackedLayout, QVBoxLayout, QWidget
from PyQt5.QtCore import QEvent, QSettings, Qt, pyqtSignal
from PyQt5.QtGui import QFont
from PyQt5 import QtTest
# globals
emojiGridLayout = None
mainWindow = None
emojiGridColumnCount = 5
emojiGridRowCount = 4
emojiToShowCount = 0
fullRowsCount = 0
lastRowEmojiCount = 0
emojiFontSize = 20
selectedEmojiPosition = list((0,0))
willExitOnItsOwn = False
selectedEmojiChar=''
settingsFile = None
historyList = []
foundAnyEmoji = True
layoutStack = None
font = QFont()
font.setPointSize(emojiFontSize)
# quits without a lag
def quitNicely():
mainWindow.hide()
quit() | return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
foundAnyEmoji = False
layoutStack.setCurrentIndex(1)
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList)
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setFixedSize(self.width, self.height)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
# needed for filling the grid out from the outside
global mainWindow
mainWindow = self
# esc handling
self.keyPressed.connect(on_key)
self.show()
lineEdit.setFocus()
def initSettings(self):
global settingsFile
global historyList
settingsFile = QSettings("emoji-picker-qtpy", "history");
historyList = settingsFile.value('history/history')
# key handling
keyPressed = pyqtSignal(int)
def keyPressEvent(self, event):
super(EmojiPickerWindow, self).keyPressEvent(event)
self.keyPressed.emit(event.key())
# focus handling
global willExitOnItsOwn
def eventFilter(self, object, event):
if event.type()== QEvent.WindowDeactivate or event.type()== QEvent.FocusOut:
if (not willExitOnItsOwn):
quitNicely()
return False
# clickable label
class QClickableLabel(QLabel):
clicked=pyqtSignal(str)
def __init__(self, parent=None):
QLabel.__init__(self, parent)
def mousePressEvent(self, ev):
self.clicked.emit(self.text())
def enterEvent(self, ev):
emoji_hovered(self)
# keyboard handling override for QlineEdit
class QLineEditWithArrows(QLineEdit):
def keyPressEvent(self, ev):
global selectedEmojiChar
global foundAnyEmoji
if(ev.key() == Qt.Key_Right):
move_selection("right")
if(ev.key() == Qt.Key_Left):
move_selection("left")
if(ev.key() == Qt.Key_Up):
move_selection("up")
if(ev.key() == Qt.Key_Down):
move_selection("down")
if(ev.key() == Qt.Key_Return or ev.key() == Qt.Key_Enter):
if foundAnyEmoji:
execute_emoji(selectedEmojiChar)
else:
quitNicely()
if(ev.key() == Qt.Key_Tab):
pass
else:
QLineEdit.keyPressEvent(self,ev)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = EmojiPickerWindow()
sys.exit(app.exec_()) |
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data | random_line_split |
qasync.py | """ Tools for working with async queues and tasks.
These are mostly failed experiments, too much complexity. Futures based
techniques compose better and are only slightly more expensive in terms of
overheads. I'm keeping these for now, but probably they will be deleted.
"""
import asyncio
import queue
import logging
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor
from odc.ppt import EOS_MARKER
log = logging.getLogger(__name__)
async def async_q2q_map(func, q_in, q_out,
eos_marker=EOS_MARKER,
eos_passthrough=True,
**kwargs):
"""Like `map` but operating on values from/to queues.
Roughly equivalent to:
> while not end of stream:
> q_out.put(func(q_in.get(), **kwargs))
Processing stops when `eos_marker` object is observed on input, by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
|
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = run(N, 20, 0.1)
assert len(xx) == N + 1
assert 1 < st.max_active <= 20
assert set(xx) == expect
st, xx = run(N, 4, 0.01)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
st, xx = run(N, 4, 0.01, eos_passthrough=False)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
def test_gen2q():
async def gen_func(idx, state):
if state.count >= state.max_count:
return EOS_MARKER
cc = state.count
state.count += 1
await asyncio.sleep(state.dt)
return cc
async def sink(q):
xx = []
while True:
x = await q.get()
if x is EOS_MARKER:
return xx
xx.append(x)
return xx
async def run_async(nconcurrent, max_count=100, dt=0.1):
state = SimpleNamespace(count=0,
max_count=max_count,
dt=dt)
gen = lambda idx: gen_func(idx, state)
q = asyncio.Queue(maxsize=10)
g2q = asyncio.ensure_future(gen2q_async(gen, q, nconcurrent))
xx = await sink(q)
return g2q.result(), xx
loop = asyncio.new_event_loop()
def run(*args, **kwargs):
return loop.run_until_complete(run_async(*args, **kwargs))
n, xx = run(10, max_count=100, dt=0.1)
assert len(xx) == n
assert len(xx) == 100
assert set(xx) == set(range(100))
| x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break | conditional_block |
qasync.py | """ Tools for working with async queues and tasks.
These are mostly failed experiments, too much complexity. Futures based
techniques compose better and are only slightly more expensive in terms of
overheads. I'm keeping these for now, but probably they will be deleted.
"""
import asyncio
import queue
import logging
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor
from odc.ppt import EOS_MARKER
log = logging.getLogger(__name__)
async def async_q2q_map(func, q_in, q_out,
eos_marker=EOS_MARKER,
eos_passthrough=True,
**kwargs):
"""Like `map` but operating on values from/to queues.
Roughly equivalent to:
> while not end of stream:
> q_out.put(func(q_in.get(), **kwargs))
Processing stops when `eos_marker` object is observed on input, by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def | (func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = run(N, 20, 0.1)
assert len(xx) == N + 1
assert 1 < st.max_active <= 20
assert set(xx) == expect
st, xx = run(N, 4, 0.01)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
st, xx = run(N, 4, 0.01, eos_passthrough=False)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
def test_gen2q():
async def gen_func(idx, state):
if state.count >= state.max_count:
return EOS_MARKER
cc = state.count
state.count += 1
await asyncio.sleep(state.dt)
return cc
async def sink(q):
xx = []
while True:
x = await q.get()
if x is EOS_MARKER:
return xx
xx.append(x)
return xx
async def run_async(nconcurrent, max_count=100, dt=0.1):
state = SimpleNamespace(count=0,
max_count=max_count,
dt=dt)
gen = lambda idx: gen_func(idx, state)
q = asyncio.Queue(maxsize=10)
g2q = asyncio.ensure_future(gen2q_async(gen, q, nconcurrent))
xx = await sink(q)
return g2q.result(), xx
loop = asyncio.new_event_loop()
def run(*args, **kwargs):
return loop.run_until_complete(run_async(*args, **kwargs))
n, xx = run(10, max_count=100, dt=0.1)
assert len(xx) == n
assert len(xx) == 100
assert set(xx) == set(range(100))
| run_test | identifier_name |
qasync.py | """ Tools for working with async queues and tasks.
These are mostly failed experiments, too much complexity. Futures based
techniques compose better and are only slightly more expensive in terms of
overheads. I'm keeping these for now, but probably they will be deleted.
"""
import asyncio
import queue
import logging
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor
from odc.ppt import EOS_MARKER
log = logging.getLogger(__name__)
async def async_q2q_map(func, q_in, q_out,
eos_marker=EOS_MARKER,
eos_passthrough=True,
**kwargs):
"""Like `map` but operating on values from/to queues.
Roughly equivalent to:
> while not end of stream:
> q_out.put(func(q_in.get(), **kwargs))
Processing stops when `eos_marker` object is observed on input, by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0 | loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = run(N, 20, 0.1)
assert len(xx) == N + 1
assert 1 < st.max_active <= 20
assert set(xx) == expect
st, xx = run(N, 4, 0.01)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
st, xx = run(N, 4, 0.01, eos_passthrough=False)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
def test_gen2q():
async def gen_func(idx, state):
if state.count >= state.max_count:
return EOS_MARKER
cc = state.count
state.count += 1
await asyncio.sleep(state.dt)
return cc
async def sink(q):
xx = []
while True:
x = await q.get()
if x is EOS_MARKER:
return xx
xx.append(x)
return xx
async def run_async(nconcurrent, max_count=100, dt=0.1):
state = SimpleNamespace(count=0,
max_count=max_count,
dt=dt)
gen = lambda idx: gen_func(idx, state)
q = asyncio.Queue(maxsize=10)
g2q = asyncio.ensure_future(gen2q_async(gen, q, nconcurrent))
xx = await sink(q)
return g2q.result(), xx
loop = asyncio.new_event_loop()
def run(*args, **kwargs):
return loop.run_until_complete(run_async(*args, **kwargs))
n, xx = run(10, max_count=100, dt=0.1)
assert len(xx) == n
assert len(xx) == 100
assert set(xx) == set(range(100)) | random_line_split | |
qasync.py | """ Tools for working with async queues and tasks.
These are mostly failed experiments, too much complexity. Futures based
techniques compose better and are only slightly more expensive in terms of
overheads. I'm keeping these for now, but probably they will be deleted.
"""
import asyncio
import queue
import logging
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor
from odc.ppt import EOS_MARKER
log = logging.getLogger(__name__)
async def async_q2q_map(func, q_in, q_out,
eos_marker=EOS_MARKER,
eos_passthrough=True,
**kwargs):
"""Like `map` but operating on values from/to queues.
Roughly equivalent to:
> while not end of stream:
> q_out.put(func(q_in.get(), **kwargs))
Processing stops when `eos_marker` object is observed on input, by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
|
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = run(N, 20, 0.1)
assert len(xx) == N + 1
assert 1 < st.max_active <= 20
assert set(xx) == expect
st, xx = run(N, 4, 0.01)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
st, xx = run(N, 4, 0.01, eos_passthrough=False)
assert len(xx) == N + 1
assert 1 < st.max_active <= 4
assert set(xx) == expect
def test_gen2q():
async def gen_func(idx, state):
if state.count >= state.max_count:
return EOS_MARKER
cc = state.count
state.count += 1
await asyncio.sleep(state.dt)
return cc
async def sink(q):
xx = []
while True:
x = await q.get()
if x is EOS_MARKER:
return xx
xx.append(x)
return xx
async def run_async(nconcurrent, max_count=100, dt=0.1):
state = SimpleNamespace(count=0,
max_count=max_count,
dt=dt)
gen = lambda idx: gen_func(idx, state)
q = asyncio.Queue(maxsize=10)
g2q = asyncio.ensure_future(gen2q_async(gen, q, nconcurrent))
xx = await sink(q)
return g2q.result(), xx
loop = asyncio.new_event_loop()
def run(*args, **kwargs):
return loop.run_until_complete(run_async(*args, **kwargs))
n, xx = run(10, max_count=100, dt=0.1)
assert len(xx) == n
assert len(xx) == 100
assert set(xx) == set(range(100))
| while not safe_put(x, dst):
await asyncio.sleep(dt) | identifier_body |
prxlistcache.go | // Package ais provides core functionality for the AIStore object storage.
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*/
package ais
import (
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cluster"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/cmn/mono"
"github.com/NVIDIA/aistore/hk"
"github.com/NVIDIA/aistore/memsys"
jsoniter "github.com/json-iterator/go"
)
// The motivation behind list-objects caching is to (drastically) reduce latency
// of listing large buckets by multiple users.
// This includes (but is not limited to) the AI use case when training workers execute the same
// logic and list the same dataset.
// When a user asks AIS proxy for the next N random objects (in a given order), the user cannot
// know where those objects are located in the cluster. In the worst-case scenario, all objects
// could reside on a single target. Hence, we query each target for the N (objects),
// merge-sort the results, and select the first N from it. Naively, we would be discarding the
// rest - cache, though, allows us /not to forget/ but use the results for the subsequent requests
// and across multiple users.
// A given cache instance is defined by the (bucket, prefix, fast) tuple. The main entry point is
// the next() method that returns the next N objects. Caches populate themselves from the storage
// targets on as-needed basis.
// The flow:
// - User asks for N objects
// For each target:
// next(N):
// - if N objects from the target are in cache return them
// - if 0 objects are in cache, fetch N objects to cache and return them
// - if 0 < x < N objects are in cache, fetch N objects to cache and return
// first N objects from cache
// objs = selectFirst(N, merge(targetResults))
// send objs to user
// Cache structure:
// listObjCache -> (bucket, prefix, fast - from smsg) -> TARGET ID -> locTarget
// Cache invalidation
// If error occurs when fetching information from targets, task's cache is invalidated.
// Otherwise cache is invalidated when the proxy is low on memory resources.
// User can explicitly invalidate cache (free the memory to the system) via API call.
const hkListObjectName = "list-objects-cache"
type (
// TODO: when starting to list, run XactBckLoadLomCache on each target async
listObjCache struct {
mtx sync.Mutex
p *proxyrunner
reqs map[string]*locReq // string(bck, prefix, fast) -> *locReq
}
locReq struct {
mtx sync.Mutex
targets map[string]*locTarget // target ID -> *locTarget
bck *cluster.Bck
parent *listObjCache
msg *cmn.SelectMsg
lastUsage int64
}
locTarget struct {
mtx sync.Mutex
parent *locReq
t *cluster.Snode
buff []*cmn.BucketEntry
done bool
}
locTargetResp struct {
status int
err error
list *cmn.BucketList
}
fetchResult struct {
err error
lists []*cmn.BucketList
allOK bool
}
)
var (
listCache *listObjCache
bucketPrefixStaleTime = 5 * cmn.GCO.Get().Client.ListObjects
)
func newListObjectsCache(p *proxyrunner) *listObjCache {
return &listObjCache{p: p, reqs: make(map[string]*locReq)}
}
func initListObjectsCache(p *proxyrunner) {
// ListObjects timeout was set to 0 in config.
// We should be housekeep from time to time anyway.
if bucketPrefixStaleTime == 0 {
bucketPrefixStaleTime = 5 * time.Minute
}
listCache = newListObjectsCache(p)
hk.Reg(hkListObjectName, func() time.Duration { return housekeepListCache(p) }, bucketPrefixStaleTime)
}
// TODO: Remove old entries, or those which take a lot of memory
// until MemPressure/PctMemUsed falls below some level.
func housekeepListCache(p *proxyrunner) time.Duration {
if p.gmm.MemPressure() <= memsys.MemPressureModerate {
return bucketPrefixStaleTime
}
now := mono.NanoTime()
listCache.mtx.Lock()
defer listCache.mtx.Unlock()
for k, v := range listCache.reqs {
if v.lastUsage+int64(bucketPrefixStaleTime) < now {
delete(listCache.reqs, k)
}
}
return bucketPrefixStaleTime
}
func newRequestCacheEntry(parent *listObjCache, bck *cluster.Bck, msg *cmn.SelectMsg) *locReq {
return &locReq{
parent: parent,
bck: bck,
targets: make(map[string]*locTarget),
msg: msg,
}
}
func newTargetCacheEntry(parent *locReq, t *cluster.Snode) *locTarget {
return &locTarget{parent: parent, t: t}
}
//////////////////////////
// listObjCache //
//////////////////////////
func (c *listObjCache) next(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, pageSize uint) (result fetchResult) {
cmn.Assert(smsg.UUID != "")
if smap.CountTargets() == 0 {
return fetchResult{err: fmt.Errorf("no targets registered")}
}
entries := c.allTargetsEntries(smsg, smap, bck)
cmn.Assert(len(entries) > 0)
entries[0].parent.mtx.Lock()
result = c.initResultsFromEntries(entries, smsg, pageSize, smsg.UUID)
if result.allOK && result.err == nil {
result = c.fetchAll(entries, smsg, pageSize)
}
entries[0].parent.mtx.Unlock()
c.mtx.Lock()
delete(c.reqs, smsg.ListObjectsCacheID(bck.Bck))
c.mtx.Unlock()
return result
}
func (c *listObjCache) targetEntry(t *cluster.Snode, smsg cmn.SelectMsg, bck *cluster.Bck) *locTarget {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
requestEntry, ok := c.reqs[id]
if !ok {
requestEntry = newRequestCacheEntry(c, bck, &smsg)
c.reqs[id] = requestEntry
}
c.mtx.Unlock()
defer func() {
requestEntry.lastUsage = mono.NanoTime()
}()
requestEntry.mtx.Lock()
targetEntry, ok := requestEntry.targets[t.ID()]
if !ok {
targetEntry = newTargetCacheEntry(requestEntry, t)
requestEntry.targets[t.ID()] = targetEntry
}
requestEntry.mtx.Unlock()
return targetEntry
}
func (c *listObjCache) leftovers(smsg cmn.SelectMsg, bck *cluster.Bck) map[string]*locTarget {
if smsg.Passthrough {
return nil
}
id := smsg.ListObjectsCacheID(bck.Bck)
requestEntry, ok := c.getRequestEntry(id)
if !ok {
return nil
}
// find pages that are unused or partially used
requestEntry.mtx.Lock()
defer requestEntry.mtx.Unlock()
tce := make(map[string]*locTarget)
for _, targetEntry := range requestEntry.targets {
targetEntry.mtx.Lock()
cnt := len(targetEntry.buff)
if cnt == 0 || cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[cnt-1].Name) {
targetEntry.mtx.Unlock()
continue
}
entry, ok := tce[targetEntry.t.ID()]
if !ok {
entry = &locTarget{parent: targetEntry.parent, t: targetEntry.t, buff: make([]*cmn.BucketEntry, 0)}
tce[targetEntry.t.ID()] = entry
}
// First case: the entire page was unused
if !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[0].Name) {
entry.buff = append(entry.buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) | (cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost
// Target prepare the final result.
res := p.call(*args)
preallocSize := cmn.DefaultListPageSize
if smsg.PageSize != 0 {
preallocSize = smsg.PageSize
}
if res.err != nil {
return &locTargetResp{list: nil, status: res.status, err: res.err}
}
if len(res.outjson) == 0 {
s := cmn.Min(int(size), len(c.buff))
if s == 0 {
s = len(c.buff)
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[:s]}, status: res.status, err: res.err}
}
bucketList := &cmn.BucketList{Entries: make([]*cmn.BucketEntry, 0, preallocSize)}
if err := jsoniter.Unmarshal(res.outjson, &bucketList); err != nil {
return &locTargetResp{list: nil, status: http.StatusInternalServerError, err: err}
}
res.outjson = nil
if len(bucketList.Entries) < int(size) || size == 0 {
c.done = true
}
if smsg.Passthrough {
return &locTargetResp{list: bucketList, status: http.StatusOK}
}
c.mtx.Lock()
c.mergePage(bucketList.Entries)
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
c.mtx.Unlock()
j = cmn.Max(j, 0)
if size != 0 {
last := cmn.Min(len(c.buff), int(size)+j)
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:last]}, status: http.StatusOK}
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:]}, status: http.StatusOK}
}
// Prepares callArgs for list object init or list objects result call.
// Should be called with Lock or RLock acquired.
func (c *locTarget) newListObjectsTaskMsg(smsg cmn.SelectMsg, bck *cluster.Bck, q url.Values) *callArgs {
p := c.parent.parent.p
if len(c.buff) > 0 {
// Request only new objects.
smsg.PageMarker = c.buff[len(c.buff)-1].Name
}
// Cache all props, filter only requested props later.
smsg.Props = strings.Join(cmn.GetPropsAll, ",")
var (
config = cmn.GCO.Get()
smap = p.owner.smap.get()
aisMsg = p.newAisMsg(&cmn.ActionMsg{Action: cmn.ActListObjects, Value: smsg}, smap, nil)
body = cmn.MustMarshal(aisMsg)
)
return &callArgs{
si: c.t,
req: cmn.ReqArgs{
Method: http.MethodPost,
Path: cmn.URLPath(cmn.Version, cmn.Buckets, bck.Name),
Query: q,
Body: body,
},
timeout: config.Timeout.MaxHostBusy + config.Timeout.CplaneOperation,
}
}
func (c *locTarget) renewTaskOnRemote(args *callArgs) (int, error) {
res := c.parent.parent.p.call(*args)
return res.status, res.err
}
func gatherTargetListObjsResults(uuid string, ch chan *locTargetResp, expectedListsSize int, smsg *cmn.SelectMsg) (result fetchResult) {
result.allOK = true
allNotFound := true
result.lists = make([]*cmn.BucketList, 0, expectedListsSize)
requestedProps := smsg.PropsSet()
for singleResult := range ch {
result.err = singleResult.err
if singleResult.status == http.StatusNotFound {
continue
}
allNotFound = false
if result.err != nil || singleResult.status != http.StatusOK {
result.allOK = false
break
}
result.lists = append(result.lists, filteredPropsList(singleResult.list, requestedProps))
}
if allNotFound {
result.allOK = false
result.err = fmt.Errorf("task %s %s", uuid, cmn.DoesNotExist)
}
return result
}
// Filters only requested props. New bucket list is allocated!
func filteredPropsList(list *cmn.BucketList, propsSet cmn.StringSet) (resultList *cmn.BucketList) {
if list == nil {
return nil
}
resultList = &cmn.BucketList{}
resultList.PageMarker = list.PageMarker
resultList.Entries = make([]*cmn.BucketEntry, len(list.Entries))
for i, entry := range list.Entries {
newEntry := &cmn.BucketEntry{}
resultList.Entries[i] = newEntry
newEntry.Flags = entry.Flags
newEntry.Name = entry.Name
if propsSet.Contains(cmn.GetPropsChecksum) {
newEntry.Checksum = entry.Checksum
}
if propsSet.Contains(cmn.GetPropsSize) {
newEntry.Size = entry.Size
}
if propsSet.Contains(cmn.GetPropsAtime) {
newEntry.Atime = entry.Atime
}
if propsSet.Contains(cmn.GetPropsVersion) {
newEntry.Version = entry.Version
}
if propsSet.Contains(cmn.GetTargetURL) {
newEntry.TargetURL = entry.TargetURL
}
if propsSet.Contains(cmn.GetPropsCopies) {
newEntry.Copies = entry.Copies
}
}
return resultList
}
func newTaskResultQuery(bck cmn.Bck) (q url.Values) {
q = cmn.AddBckToQuery(q, bck)
q.Set(cmn.URLParamTaskAction, cmn.TaskResult)
q.Set(cmn.URLParamSilent, "true")
return q
}
| getRequestEntry | identifier_name |
prxlistcache.go | // Package ais provides core functionality for the AIStore object storage.
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*/
package ais
import (
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cluster"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/cmn/mono"
"github.com/NVIDIA/aistore/hk"
"github.com/NVIDIA/aistore/memsys"
jsoniter "github.com/json-iterator/go"
)
// The motivation behind list-objects caching is to (drastically) reduce latency
// of listing large buckets by multiple users.
// This includes (but is not limited to) the AI use case when training workers execute the same
// logic and list the same dataset.
// When a user asks AIS proxy for the next N random objects (in a given order), the user cannot
// know where those objects are located in the cluster. In the worst-case scenario, all objects
// could reside on a single target. Hence, we query each target for the N (objects),
// merge-sort the results, and select the first N from it. Naively, we would be discarding the
// rest - cache, though, allows us /not to forget/ but use the results for the subsequent requests
// and across multiple users.
// A given cache instance is defined by the (bucket, prefix, fast) tuple. The main entry point is
// the next() method that returns the next N objects. Caches populate themselves from the storage
// targets on as-needed basis.
// The flow:
// - User asks for N objects
// For each target:
// next(N):
// - if N objects from the target are in cache return them
// - if 0 objects are in cache, fetch N objects to cache and return them
// - if 0 < x < N objects are in cache, fetch N objects to cache and return
// first N objects from cache
// objs = selectFirst(N, merge(targetResults))
// send objs to user
// Cache structure:
// listObjCache -> (bucket, prefix, fast - from smsg) -> TARGET ID -> locTarget
// Cache invalidation
// If error occurs when fetching information from targets, task's cache is invalidated.
// Otherwise cache is invalidated when the proxy is low on memory resources.
// User can explicitly invalidate cache (free the memory to the system) via API call.
const hkListObjectName = "list-objects-cache"
type (
// TODO: when starting to list, run XactBckLoadLomCache on each target async
listObjCache struct {
mtx sync.Mutex
p *proxyrunner
reqs map[string]*locReq // string(bck, prefix, fast) -> *locReq
}
locReq struct {
mtx sync.Mutex
targets map[string]*locTarget // target ID -> *locTarget
bck *cluster.Bck
parent *listObjCache
msg *cmn.SelectMsg
lastUsage int64
}
locTarget struct {
mtx sync.Mutex
parent *locReq
t *cluster.Snode
buff []*cmn.BucketEntry
done bool
}
locTargetResp struct {
status int
err error
list *cmn.BucketList
}
fetchResult struct {
err error
lists []*cmn.BucketList
allOK bool
}
)
var (
listCache *listObjCache
bucketPrefixStaleTime = 5 * cmn.GCO.Get().Client.ListObjects
)
func newListObjectsCache(p *proxyrunner) *listObjCache {
return &listObjCache{p: p, reqs: make(map[string]*locReq)}
}
func initListObjectsCache(p *proxyrunner) {
// ListObjects timeout was set to 0 in config.
// We should be housekeep from time to time anyway.
if bucketPrefixStaleTime == 0 {
bucketPrefixStaleTime = 5 * time.Minute
}
listCache = newListObjectsCache(p)
hk.Reg(hkListObjectName, func() time.Duration { return housekeepListCache(p) }, bucketPrefixStaleTime)
}
// TODO: Remove old entries, or those which take a lot of memory
// until MemPressure/PctMemUsed falls below some level.
func housekeepListCache(p *proxyrunner) time.Duration {
if p.gmm.MemPressure() <= memsys.MemPressureModerate {
return bucketPrefixStaleTime
}
now := mono.NanoTime()
listCache.mtx.Lock()
defer listCache.mtx.Unlock()
for k, v := range listCache.reqs {
if v.lastUsage+int64(bucketPrefixStaleTime) < now {
delete(listCache.reqs, k)
}
}
return bucketPrefixStaleTime
}
func newRequestCacheEntry(parent *listObjCache, bck *cluster.Bck, msg *cmn.SelectMsg) *locReq {
return &locReq{
parent: parent,
bck: bck,
targets: make(map[string]*locTarget),
msg: msg,
}
}
func newTargetCacheEntry(parent *locReq, t *cluster.Snode) *locTarget {
return &locTarget{parent: parent, t: t}
}
//////////////////////////
// listObjCache //
//////////////////////////
func (c *listObjCache) next(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, pageSize uint) (result fetchResult) {
cmn.Assert(smsg.UUID != "")
if smap.CountTargets() == 0 {
return fetchResult{err: fmt.Errorf("no targets registered")}
}
entries := c.allTargetsEntries(smsg, smap, bck)
cmn.Assert(len(entries) > 0)
entries[0].parent.mtx.Lock()
result = c.initResultsFromEntries(entries, smsg, pageSize, smsg.UUID)
if result.allOK && result.err == nil {
result = c.fetchAll(entries, smsg, pageSize)
}
entries[0].parent.mtx.Unlock()
c.mtx.Lock()
delete(c.reqs, smsg.ListObjectsCacheID(bck.Bck))
c.mtx.Unlock()
return result
}
func (c *listObjCache) targetEntry(t *cluster.Snode, smsg cmn.SelectMsg, bck *cluster.Bck) *locTarget {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
requestEntry, ok := c.reqs[id]
if !ok {
requestEntry = newRequestCacheEntry(c, bck, &smsg)
c.reqs[id] = requestEntry
}
c.mtx.Unlock()
defer func() {
requestEntry.lastUsage = mono.NanoTime()
}()
requestEntry.mtx.Lock()
targetEntry, ok := requestEntry.targets[t.ID()]
if !ok {
targetEntry = newTargetCacheEntry(requestEntry, t)
requestEntry.targets[t.ID()] = targetEntry
}
requestEntry.mtx.Unlock()
return targetEntry
}
func (c *listObjCache) leftovers(smsg cmn.SelectMsg, bck *cluster.Bck) map[string]*locTarget {
if smsg.Passthrough {
return nil
}
id := smsg.ListObjectsCacheID(bck.Bck)
requestEntry, ok := c.getRequestEntry(id)
if !ok {
return nil
}
// find pages that are unused or partially used
requestEntry.mtx.Lock()
defer requestEntry.mtx.Unlock()
tce := make(map[string]*locTarget)
for _, targetEntry := range requestEntry.targets {
targetEntry.mtx.Lock()
cnt := len(targetEntry.buff)
if cnt == 0 || cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[cnt-1].Name) {
targetEntry.mtx.Unlock()
continue
}
entry, ok := tce[targetEntry.t.ID()]
if !ok {
entry = &locTarget{parent: targetEntry.parent, t: targetEntry.t, buff: make([]*cmn.BucketEntry, 0)}
tce[targetEntry.t.ID()] = entry
}
// First case: the entire page was unused
if !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[0].Name) {
entry.buff = append(entry.buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) getRequestEntry(cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost
// Target prepare the final result.
res := p.call(*args)
preallocSize := cmn.DefaultListPageSize
if smsg.PageSize != 0 {
preallocSize = smsg.PageSize
}
if res.err != nil {
return &locTargetResp{list: nil, status: res.status, err: res.err}
}
if len(res.outjson) == 0 {
s := cmn.Min(int(size), len(c.buff))
if s == 0 {
s = len(c.buff)
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[:s]}, status: res.status, err: res.err}
}
bucketList := &cmn.BucketList{Entries: make([]*cmn.BucketEntry, 0, preallocSize)}
if err := jsoniter.Unmarshal(res.outjson, &bucketList); err != nil {
return &locTargetResp{list: nil, status: http.StatusInternalServerError, err: err}
}
res.outjson = nil
if len(bucketList.Entries) < int(size) || size == 0 {
c.done = true
}
if smsg.Passthrough {
return &locTargetResp{list: bucketList, status: http.StatusOK}
}
c.mtx.Lock()
c.mergePage(bucketList.Entries)
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
c.mtx.Unlock()
j = cmn.Max(j, 0)
if size != 0 {
last := cmn.Min(len(c.buff), int(size)+j)
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:last]}, status: http.StatusOK}
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:]}, status: http.StatusOK}
}
// Prepares callArgs for list object init or list objects result call.
// Should be called with Lock or RLock acquired.
func (c *locTarget) newListObjectsTaskMsg(smsg cmn.SelectMsg, bck *cluster.Bck, q url.Values) *callArgs {
p := c.parent.parent.p
if len(c.buff) > 0 {
// Request only new objects.
smsg.PageMarker = c.buff[len(c.buff)-1].Name
}
// Cache all props, filter only requested props later.
smsg.Props = strings.Join(cmn.GetPropsAll, ",")
var (
config = cmn.GCO.Get()
smap = p.owner.smap.get()
aisMsg = p.newAisMsg(&cmn.ActionMsg{Action: cmn.ActListObjects, Value: smsg}, smap, nil)
body = cmn.MustMarshal(aisMsg)
)
return &callArgs{
si: c.t,
req: cmn.ReqArgs{
Method: http.MethodPost,
Path: cmn.URLPath(cmn.Version, cmn.Buckets, bck.Name),
Query: q,
Body: body,
},
timeout: config.Timeout.MaxHostBusy + config.Timeout.CplaneOperation,
}
}
func (c *locTarget) renewTaskOnRemote(args *callArgs) (int, error) |
func gatherTargetListObjsResults(uuid string, ch chan *locTargetResp, expectedListsSize int, smsg *cmn.SelectMsg) (result fetchResult) {
result.allOK = true
allNotFound := true
result.lists = make([]*cmn.BucketList, 0, expectedListsSize)
requestedProps := smsg.PropsSet()
for singleResult := range ch {
result.err = singleResult.err
if singleResult.status == http.StatusNotFound {
continue
}
allNotFound = false
if result.err != nil || singleResult.status != http.StatusOK {
result.allOK = false
break
}
result.lists = append(result.lists, filteredPropsList(singleResult.list, requestedProps))
}
if allNotFound {
result.allOK = false
result.err = fmt.Errorf("task %s %s", uuid, cmn.DoesNotExist)
}
return result
}
// Filters only requested props. New bucket list is allocated!
func filteredPropsList(list *cmn.BucketList, propsSet cmn.StringSet) (resultList *cmn.BucketList) {
if list == nil {
return nil
}
resultList = &cmn.BucketList{}
resultList.PageMarker = list.PageMarker
resultList.Entries = make([]*cmn.BucketEntry, len(list.Entries))
for i, entry := range list.Entries {
newEntry := &cmn.BucketEntry{}
resultList.Entries[i] = newEntry
newEntry.Flags = entry.Flags
newEntry.Name = entry.Name
if propsSet.Contains(cmn.GetPropsChecksum) {
newEntry.Checksum = entry.Checksum
}
if propsSet.Contains(cmn.GetPropsSize) {
newEntry.Size = entry.Size
}
if propsSet.Contains(cmn.GetPropsAtime) {
newEntry.Atime = entry.Atime
}
if propsSet.Contains(cmn.GetPropsVersion) {
newEntry.Version = entry.Version
}
if propsSet.Contains(cmn.GetTargetURL) {
newEntry.TargetURL = entry.TargetURL
}
if propsSet.Contains(cmn.GetPropsCopies) {
newEntry.Copies = entry.Copies
}
}
return resultList
}
func newTaskResultQuery(bck cmn.Bck) (q url.Values) {
q = cmn.AddBckToQuery(q, bck)
q.Set(cmn.URLParamTaskAction, cmn.TaskResult)
q.Set(cmn.URLParamSilent, "true")
return q
}
| {
res := c.parent.parent.p.call(*args)
return res.status, res.err
} | identifier_body |
prxlistcache.go | // Package ais provides core functionality for the AIStore object storage.
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*/
package ais
import (
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cluster"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/cmn/mono"
"github.com/NVIDIA/aistore/hk"
"github.com/NVIDIA/aistore/memsys"
jsoniter "github.com/json-iterator/go"
)
// The motivation behind list-objects caching is to (drastically) reduce latency
// of listing large buckets by multiple users.
// This includes (but is not limited to) the AI use case when training workers execute the same
// logic and list the same dataset.
// When a user asks AIS proxy for the next N random objects (in a given order), the user cannot
// know where those objects are located in the cluster. In the worst-case scenario, all objects
// could reside on a single target. Hence, we query each target for the N (objects),
// merge-sort the results, and select the first N from it. Naively, we would be discarding the
// rest - cache, though, allows us /not to forget/ but use the results for the subsequent requests
// and across multiple users.
// A given cache instance is defined by the (bucket, prefix, fast) tuple. The main entry point is
// the next() method that returns the next N objects. Caches populate themselves from the storage
// targets on as-needed basis.
// The flow:
// - User asks for N objects
// For each target:
// next(N):
// - if N objects from the target are in cache return them
// - if 0 objects are in cache, fetch N objects to cache and return them
// - if 0 < x < N objects are in cache, fetch N objects to cache and return
// first N objects from cache
// objs = selectFirst(N, merge(targetResults))
// send objs to user
// Cache structure:
// listObjCache -> (bucket, prefix, fast - from smsg) -> TARGET ID -> locTarget
// Cache invalidation
// If error occurs when fetching information from targets, task's cache is invalidated.
// Otherwise cache is invalidated when the proxy is low on memory resources.
// User can explicitly invalidate cache (free the memory to the system) via API call.
const hkListObjectName = "list-objects-cache"
type (
// TODO: when starting to list, run XactBckLoadLomCache on each target async
listObjCache struct {
mtx sync.Mutex
p *proxyrunner
reqs map[string]*locReq // string(bck, prefix, fast) -> *locReq
}
locReq struct {
mtx sync.Mutex
targets map[string]*locTarget // target ID -> *locTarget
bck *cluster.Bck
parent *listObjCache
msg *cmn.SelectMsg
lastUsage int64
}
locTarget struct {
mtx sync.Mutex
parent *locReq
t *cluster.Snode
buff []*cmn.BucketEntry
done bool
}
locTargetResp struct {
status int
err error
list *cmn.BucketList
}
fetchResult struct {
err error
lists []*cmn.BucketList
allOK bool
}
)
var (
listCache *listObjCache
bucketPrefixStaleTime = 5 * cmn.GCO.Get().Client.ListObjects
)
func newListObjectsCache(p *proxyrunner) *listObjCache {
return &listObjCache{p: p, reqs: make(map[string]*locReq)}
}
func initListObjectsCache(p *proxyrunner) {
// ListObjects timeout was set to 0 in config.
// We should be housekeep from time to time anyway.
if bucketPrefixStaleTime == 0 {
bucketPrefixStaleTime = 5 * time.Minute
}
listCache = newListObjectsCache(p)
hk.Reg(hkListObjectName, func() time.Duration { return housekeepListCache(p) }, bucketPrefixStaleTime)
}
// TODO: Remove old entries, or those which take a lot of memory
// until MemPressure/PctMemUsed falls below some level.
func housekeepListCache(p *proxyrunner) time.Duration {
if p.gmm.MemPressure() <= memsys.MemPressureModerate {
return bucketPrefixStaleTime
}
now := mono.NanoTime()
listCache.mtx.Lock()
defer listCache.mtx.Unlock()
for k, v := range listCache.reqs {
if v.lastUsage+int64(bucketPrefixStaleTime) < now {
delete(listCache.reqs, k)
}
}
return bucketPrefixStaleTime
}
func newRequestCacheEntry(parent *listObjCache, bck *cluster.Bck, msg *cmn.SelectMsg) *locReq {
return &locReq{
parent: parent,
bck: bck,
targets: make(map[string]*locTarget),
msg: msg,
}
}
func newTargetCacheEntry(parent *locReq, t *cluster.Snode) *locTarget {
return &locTarget{parent: parent, t: t}
}
//////////////////////////
// listObjCache //
//////////////////////////
func (c *listObjCache) next(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, pageSize uint) (result fetchResult) {
cmn.Assert(smsg.UUID != "")
if smap.CountTargets() == 0 {
return fetchResult{err: fmt.Errorf("no targets registered")}
}
entries := c.allTargetsEntries(smsg, smap, bck)
cmn.Assert(len(entries) > 0)
entries[0].parent.mtx.Lock()
result = c.initResultsFromEntries(entries, smsg, pageSize, smsg.UUID)
if result.allOK && result.err == nil {
result = c.fetchAll(entries, smsg, pageSize)
}
entries[0].parent.mtx.Unlock()
c.mtx.Lock()
delete(c.reqs, smsg.ListObjectsCacheID(bck.Bck))
c.mtx.Unlock()
return result
}
func (c *listObjCache) targetEntry(t *cluster.Snode, smsg cmn.SelectMsg, bck *cluster.Bck) *locTarget {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
requestEntry, ok := c.reqs[id]
if !ok {
requestEntry = newRequestCacheEntry(c, bck, &smsg)
c.reqs[id] = requestEntry
}
c.mtx.Unlock()
defer func() {
requestEntry.lastUsage = mono.NanoTime()
}()
requestEntry.mtx.Lock()
targetEntry, ok := requestEntry.targets[t.ID()]
if !ok {
targetEntry = newTargetCacheEntry(requestEntry, t)
requestEntry.targets[t.ID()] = targetEntry
}
requestEntry.mtx.Unlock()
return targetEntry
}
func (c *listObjCache) leftovers(smsg cmn.SelectMsg, bck *cluster.Bck) map[string]*locTarget {
if smsg.Passthrough {
return nil
}
id := smsg.ListObjectsCacheID(bck.Bck)
requestEntry, ok := c.getRequestEntry(id)
if !ok {
return nil
}
// find pages that are unused or partially used
requestEntry.mtx.Lock()
defer requestEntry.mtx.Unlock()
tce := make(map[string]*locTarget)
for _, targetEntry := range requestEntry.targets {
targetEntry.mtx.Lock()
cnt := len(targetEntry.buff)
if cnt == 0 || cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[cnt-1].Name) {
targetEntry.mtx.Unlock()
continue
}
entry, ok := tce[targetEntry.t.ID()]
if !ok {
entry = &locTarget{parent: targetEntry.parent, t: targetEntry.t, buff: make([]*cmn.BucketEntry, 0)}
tce[targetEntry.t.ID()] = entry
}
// First case: the entire page was unused
if !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[0].Name) {
entry.buff = append(entry.buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) getRequestEntry(cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost | res := p.call(*args)
preallocSize := cmn.DefaultListPageSize
if smsg.PageSize != 0 {
preallocSize = smsg.PageSize
}
if res.err != nil {
return &locTargetResp{list: nil, status: res.status, err: res.err}
}
if len(res.outjson) == 0 {
s := cmn.Min(int(size), len(c.buff))
if s == 0 {
s = len(c.buff)
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[:s]}, status: res.status, err: res.err}
}
bucketList := &cmn.BucketList{Entries: make([]*cmn.BucketEntry, 0, preallocSize)}
if err := jsoniter.Unmarshal(res.outjson, &bucketList); err != nil {
return &locTargetResp{list: nil, status: http.StatusInternalServerError, err: err}
}
res.outjson = nil
if len(bucketList.Entries) < int(size) || size == 0 {
c.done = true
}
if smsg.Passthrough {
return &locTargetResp{list: bucketList, status: http.StatusOK}
}
c.mtx.Lock()
c.mergePage(bucketList.Entries)
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
c.mtx.Unlock()
j = cmn.Max(j, 0)
if size != 0 {
last := cmn.Min(len(c.buff), int(size)+j)
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:last]}, status: http.StatusOK}
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:]}, status: http.StatusOK}
}
// Prepares callArgs for list object init or list objects result call.
// Should be called with Lock or RLock acquired.
func (c *locTarget) newListObjectsTaskMsg(smsg cmn.SelectMsg, bck *cluster.Bck, q url.Values) *callArgs {
p := c.parent.parent.p
if len(c.buff) > 0 {
// Request only new objects.
smsg.PageMarker = c.buff[len(c.buff)-1].Name
}
// Cache all props, filter only requested props later.
smsg.Props = strings.Join(cmn.GetPropsAll, ",")
var (
config = cmn.GCO.Get()
smap = p.owner.smap.get()
aisMsg = p.newAisMsg(&cmn.ActionMsg{Action: cmn.ActListObjects, Value: smsg}, smap, nil)
body = cmn.MustMarshal(aisMsg)
)
return &callArgs{
si: c.t,
req: cmn.ReqArgs{
Method: http.MethodPost,
Path: cmn.URLPath(cmn.Version, cmn.Buckets, bck.Name),
Query: q,
Body: body,
},
timeout: config.Timeout.MaxHostBusy + config.Timeout.CplaneOperation,
}
}
func (c *locTarget) renewTaskOnRemote(args *callArgs) (int, error) {
res := c.parent.parent.p.call(*args)
return res.status, res.err
}
func gatherTargetListObjsResults(uuid string, ch chan *locTargetResp, expectedListsSize int, smsg *cmn.SelectMsg) (result fetchResult) {
result.allOK = true
allNotFound := true
result.lists = make([]*cmn.BucketList, 0, expectedListsSize)
requestedProps := smsg.PropsSet()
for singleResult := range ch {
result.err = singleResult.err
if singleResult.status == http.StatusNotFound {
continue
}
allNotFound = false
if result.err != nil || singleResult.status != http.StatusOK {
result.allOK = false
break
}
result.lists = append(result.lists, filteredPropsList(singleResult.list, requestedProps))
}
if allNotFound {
result.allOK = false
result.err = fmt.Errorf("task %s %s", uuid, cmn.DoesNotExist)
}
return result
}
// Filters only requested props. New bucket list is allocated!
func filteredPropsList(list *cmn.BucketList, propsSet cmn.StringSet) (resultList *cmn.BucketList) {
if list == nil {
return nil
}
resultList = &cmn.BucketList{}
resultList.PageMarker = list.PageMarker
resultList.Entries = make([]*cmn.BucketEntry, len(list.Entries))
for i, entry := range list.Entries {
newEntry := &cmn.BucketEntry{}
resultList.Entries[i] = newEntry
newEntry.Flags = entry.Flags
newEntry.Name = entry.Name
if propsSet.Contains(cmn.GetPropsChecksum) {
newEntry.Checksum = entry.Checksum
}
if propsSet.Contains(cmn.GetPropsSize) {
newEntry.Size = entry.Size
}
if propsSet.Contains(cmn.GetPropsAtime) {
newEntry.Atime = entry.Atime
}
if propsSet.Contains(cmn.GetPropsVersion) {
newEntry.Version = entry.Version
}
if propsSet.Contains(cmn.GetTargetURL) {
newEntry.TargetURL = entry.TargetURL
}
if propsSet.Contains(cmn.GetPropsCopies) {
newEntry.Copies = entry.Copies
}
}
return resultList
}
func newTaskResultQuery(bck cmn.Bck) (q url.Values) {
q = cmn.AddBckToQuery(q, bck)
q.Set(cmn.URLParamTaskAction, cmn.TaskResult)
q.Set(cmn.URLParamSilent, "true")
return q
} |
// Target prepare the final result. | random_line_split |
prxlistcache.go | // Package ais provides core functionality for the AIStore object storage.
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*/
package ais
import (
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"time"
"github.com/NVIDIA/aistore/3rdparty/glog"
"github.com/NVIDIA/aistore/cluster"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/cmn/mono"
"github.com/NVIDIA/aistore/hk"
"github.com/NVIDIA/aistore/memsys"
jsoniter "github.com/json-iterator/go"
)
// The motivation behind list-objects caching is to (drastically) reduce latency
// of listing large buckets by multiple users.
// This includes (but is not limited to) the AI use case when training workers execute the same
// logic and list the same dataset.
// When a user asks AIS proxy for the next N random objects (in a given order), the user cannot
// know where those objects are located in the cluster. In the worst-case scenario, all objects
// could reside on a single target. Hence, we query each target for the N (objects),
// merge-sort the results, and select the first N from it. Naively, we would be discarding the
// rest - cache, though, allows us /not to forget/ but use the results for the subsequent requests
// and across multiple users.
// A given cache instance is defined by the (bucket, prefix, fast) tuple. The main entry point is
// the next() method that returns the next N objects. Caches populate themselves from the storage
// targets on as-needed basis.
// The flow:
// - User asks for N objects
// For each target:
// next(N):
// - if N objects from the target are in cache return them
// - if 0 objects are in cache, fetch N objects to cache and return them
// - if 0 < x < N objects are in cache, fetch N objects to cache and return
// first N objects from cache
// objs = selectFirst(N, merge(targetResults))
// send objs to user
// Cache structure:
// listObjCache -> (bucket, prefix, fast - from smsg) -> TARGET ID -> locTarget
// Cache invalidation
// If error occurs when fetching information from targets, task's cache is invalidated.
// Otherwise cache is invalidated when the proxy is low on memory resources.
// User can explicitly invalidate cache (free the memory to the system) via API call.
const hkListObjectName = "list-objects-cache"
type (
// TODO: when starting to list, run XactBckLoadLomCache on each target async
listObjCache struct {
mtx sync.Mutex
p *proxyrunner
reqs map[string]*locReq // string(bck, prefix, fast) -> *locReq
}
locReq struct {
mtx sync.Mutex
targets map[string]*locTarget // target ID -> *locTarget
bck *cluster.Bck
parent *listObjCache
msg *cmn.SelectMsg
lastUsage int64
}
locTarget struct {
mtx sync.Mutex
parent *locReq
t *cluster.Snode
buff []*cmn.BucketEntry
done bool
}
locTargetResp struct {
status int
err error
list *cmn.BucketList
}
fetchResult struct {
err error
lists []*cmn.BucketList
allOK bool
}
)
var (
listCache *listObjCache
bucketPrefixStaleTime = 5 * cmn.GCO.Get().Client.ListObjects
)
func newListObjectsCache(p *proxyrunner) *listObjCache {
return &listObjCache{p: p, reqs: make(map[string]*locReq)}
}
func initListObjectsCache(p *proxyrunner) {
// ListObjects timeout was set to 0 in config.
// We should be housekeep from time to time anyway.
if bucketPrefixStaleTime == 0 {
bucketPrefixStaleTime = 5 * time.Minute
}
listCache = newListObjectsCache(p)
hk.Reg(hkListObjectName, func() time.Duration { return housekeepListCache(p) }, bucketPrefixStaleTime)
}
// TODO: Remove old entries, or those which take a lot of memory
// until MemPressure/PctMemUsed falls below some level.
func housekeepListCache(p *proxyrunner) time.Duration {
if p.gmm.MemPressure() <= memsys.MemPressureModerate {
return bucketPrefixStaleTime
}
now := mono.NanoTime()
listCache.mtx.Lock()
defer listCache.mtx.Unlock()
for k, v := range listCache.reqs {
if v.lastUsage+int64(bucketPrefixStaleTime) < now {
delete(listCache.reqs, k)
}
}
return bucketPrefixStaleTime
}
func newRequestCacheEntry(parent *listObjCache, bck *cluster.Bck, msg *cmn.SelectMsg) *locReq {
return &locReq{
parent: parent,
bck: bck,
targets: make(map[string]*locTarget),
msg: msg,
}
}
func newTargetCacheEntry(parent *locReq, t *cluster.Snode) *locTarget {
return &locTarget{parent: parent, t: t}
}
//////////////////////////
// listObjCache //
//////////////////////////
func (c *listObjCache) next(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, pageSize uint) (result fetchResult) {
cmn.Assert(smsg.UUID != "")
if smap.CountTargets() == 0 {
return fetchResult{err: fmt.Errorf("no targets registered")}
}
entries := c.allTargetsEntries(smsg, smap, bck)
cmn.Assert(len(entries) > 0)
entries[0].parent.mtx.Lock()
result = c.initResultsFromEntries(entries, smsg, pageSize, smsg.UUID)
if result.allOK && result.err == nil {
result = c.fetchAll(entries, smsg, pageSize)
}
entries[0].parent.mtx.Unlock()
c.mtx.Lock()
delete(c.reqs, smsg.ListObjectsCacheID(bck.Bck))
c.mtx.Unlock()
return result
}
func (c *listObjCache) targetEntry(t *cluster.Snode, smsg cmn.SelectMsg, bck *cluster.Bck) *locTarget {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
requestEntry, ok := c.reqs[id]
if !ok {
requestEntry = newRequestCacheEntry(c, bck, &smsg)
c.reqs[id] = requestEntry
}
c.mtx.Unlock()
defer func() {
requestEntry.lastUsage = mono.NanoTime()
}()
requestEntry.mtx.Lock()
targetEntry, ok := requestEntry.targets[t.ID()]
if !ok {
targetEntry = newTargetCacheEntry(requestEntry, t)
requestEntry.targets[t.ID()] = targetEntry
}
requestEntry.mtx.Unlock()
return targetEntry
}
func (c *listObjCache) leftovers(smsg cmn.SelectMsg, bck *cluster.Bck) map[string]*locTarget {
if smsg.Passthrough {
return nil
}
id := smsg.ListObjectsCacheID(bck.Bck)
requestEntry, ok := c.getRequestEntry(id)
if !ok {
return nil
}
// find pages that are unused or partially used
requestEntry.mtx.Lock()
defer requestEntry.mtx.Unlock()
tce := make(map[string]*locTarget)
for _, targetEntry := range requestEntry.targets {
targetEntry.mtx.Lock()
cnt := len(targetEntry.buff)
if cnt == 0 || cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[cnt-1].Name) {
targetEntry.mtx.Unlock()
continue
}
entry, ok := tce[targetEntry.t.ID()]
if !ok {
entry = &locTarget{parent: targetEntry.parent, t: targetEntry.t, buff: make([]*cmn.BucketEntry, 0)}
tce[targetEntry.t.ID()] = entry
}
// First case: the entire page was unused
if !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[0].Name) {
entry.buff = append(entry.buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) getRequestEntry(cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost
// Target prepare the final result.
res := p.call(*args)
preallocSize := cmn.DefaultListPageSize
if smsg.PageSize != 0 {
preallocSize = smsg.PageSize
}
if res.err != nil {
return &locTargetResp{list: nil, status: res.status, err: res.err}
}
if len(res.outjson) == 0 {
s := cmn.Min(int(size), len(c.buff))
if s == 0 |
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[:s]}, status: res.status, err: res.err}
}
bucketList := &cmn.BucketList{Entries: make([]*cmn.BucketEntry, 0, preallocSize)}
if err := jsoniter.Unmarshal(res.outjson, &bucketList); err != nil {
return &locTargetResp{list: nil, status: http.StatusInternalServerError, err: err}
}
res.outjson = nil
if len(bucketList.Entries) < int(size) || size == 0 {
c.done = true
}
if smsg.Passthrough {
return &locTargetResp{list: bucketList, status: http.StatusOK}
}
c.mtx.Lock()
c.mergePage(bucketList.Entries)
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
c.mtx.Unlock()
j = cmn.Max(j, 0)
if size != 0 {
last := cmn.Min(len(c.buff), int(size)+j)
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:last]}, status: http.StatusOK}
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:]}, status: http.StatusOK}
}
// Prepares callArgs for list object init or list objects result call.
// Should be called with Lock or RLock acquired.
func (c *locTarget) newListObjectsTaskMsg(smsg cmn.SelectMsg, bck *cluster.Bck, q url.Values) *callArgs {
p := c.parent.parent.p
if len(c.buff) > 0 {
// Request only new objects.
smsg.PageMarker = c.buff[len(c.buff)-1].Name
}
// Cache all props, filter only requested props later.
smsg.Props = strings.Join(cmn.GetPropsAll, ",")
var (
config = cmn.GCO.Get()
smap = p.owner.smap.get()
aisMsg = p.newAisMsg(&cmn.ActionMsg{Action: cmn.ActListObjects, Value: smsg}, smap, nil)
body = cmn.MustMarshal(aisMsg)
)
return &callArgs{
si: c.t,
req: cmn.ReqArgs{
Method: http.MethodPost,
Path: cmn.URLPath(cmn.Version, cmn.Buckets, bck.Name),
Query: q,
Body: body,
},
timeout: config.Timeout.MaxHostBusy + config.Timeout.CplaneOperation,
}
}
func (c *locTarget) renewTaskOnRemote(args *callArgs) (int, error) {
res := c.parent.parent.p.call(*args)
return res.status, res.err
}
func gatherTargetListObjsResults(uuid string, ch chan *locTargetResp, expectedListsSize int, smsg *cmn.SelectMsg) (result fetchResult) {
result.allOK = true
allNotFound := true
result.lists = make([]*cmn.BucketList, 0, expectedListsSize)
requestedProps := smsg.PropsSet()
for singleResult := range ch {
result.err = singleResult.err
if singleResult.status == http.StatusNotFound {
continue
}
allNotFound = false
if result.err != nil || singleResult.status != http.StatusOK {
result.allOK = false
break
}
result.lists = append(result.lists, filteredPropsList(singleResult.list, requestedProps))
}
if allNotFound {
result.allOK = false
result.err = fmt.Errorf("task %s %s", uuid, cmn.DoesNotExist)
}
return result
}
// Filters only requested props. New bucket list is allocated!
func filteredPropsList(list *cmn.BucketList, propsSet cmn.StringSet) (resultList *cmn.BucketList) {
if list == nil {
return nil
}
resultList = &cmn.BucketList{}
resultList.PageMarker = list.PageMarker
resultList.Entries = make([]*cmn.BucketEntry, len(list.Entries))
for i, entry := range list.Entries {
newEntry := &cmn.BucketEntry{}
resultList.Entries[i] = newEntry
newEntry.Flags = entry.Flags
newEntry.Name = entry.Name
if propsSet.Contains(cmn.GetPropsChecksum) {
newEntry.Checksum = entry.Checksum
}
if propsSet.Contains(cmn.GetPropsSize) {
newEntry.Size = entry.Size
}
if propsSet.Contains(cmn.GetPropsAtime) {
newEntry.Atime = entry.Atime
}
if propsSet.Contains(cmn.GetPropsVersion) {
newEntry.Version = entry.Version
}
if propsSet.Contains(cmn.GetTargetURL) {
newEntry.TargetURL = entry.TargetURL
}
if propsSet.Contains(cmn.GetPropsCopies) {
newEntry.Copies = entry.Copies
}
}
return resultList
}
func newTaskResultQuery(bck cmn.Bck) (q url.Values) {
q = cmn.AddBckToQuery(q, bck)
q.Set(cmn.URLParamTaskAction, cmn.TaskResult)
q.Set(cmn.URLParamSilent, "true")
return q
}
| {
s = len(c.buff)
} | conditional_block |
lib.rs | //! # sunvox-sys
//!
//! FFI bindings to the Sunvox library (http://warmplace.ru/soft/sunvox).
// --- Crate attributes --- //
#![allow(non_camel_case_types)]
// --- ==== --- //
// --- External crates --- //
extern crate libc;
// --- ==== --- //
// --- Use --- //
use libc::{c_void, c_int, c_uint, c_char, c_uchar, c_short, c_ushort};
// --- ==== --- //
/// Single note off.
pub const NOTECMD_NOTE_OFF: c_int = 128;
/// Notes of all synths off.
pub const NOTECMD_ALL_NOTES_OFF: c_int = 129;
/// Stop and clean all synths.
pub const NOTECMD_CLEAN_SYNTHS: c_int = 130;
pub const NOTECMD_STOP: c_int = 131;
pub const NOTECMD_PLAY: c_int = 132;
// I can't find these in the official header file, but they're defined in
// https://github.com/metrasynth/sunvox-dll-python/blob/master/sunvox/types.py
/// Change the pitch of a currently playing note.
pub const NOTECMD_SET_PITCH: c_int = 133;
/// Apply effect in this note cell to the corresponding one in the previous track.
pub const NOTECMD_PREV_TRACK: c_int = 134;
/// A single note cell in a pattern.
#[repr(C)]
#[derive(Clone, Debug)]
pub struct | {
/// The note column.
///
/// - 0: Nothing.
/// - 1 to 127 inclusive: A normal note.
/// - 128+: See the `NOTECMD` constants.
pub note: c_uchar,
/// The velocity column (note velocity).
///
/// - 0: Empty (default).
/// - 1 to 129 inclusive: The specified velocity + 1.
pub vel: c_uchar,
/// The module column (module to affect).
///
/// - 0: Empty (none).
/// - 1 to 255 inclusive: The specified module + 1.
pub module: c_uchar,
/// Padding.
pub nothing: c_uchar,
/// The value of the controller/effect column.
///
/// Interpreted as a hexadecimal number, the first two digits are the
/// controller of the selected module to affect, and the last two digits
/// are the number of an effect. Set a pair of digits to zero to
/// ignore that part.
pub ctl: c_ushort,
/// The value of the controller/effect parameter column.
pub ctl_val: c_ushort,
}
/// Supresses debug output from the SunVox library.
pub const SV_INIT_FLAG_NO_DEBUG_OUTPUT: c_uint = 1 << 0;
/// Interaction with sound card is on the user side.
///
/// See `sv_audio_callback()`.
pub const SV_INIT_FLAG_USER_AUDIO_CALLBACK: c_uint = 1 << 1;
/// Audio is signed 16-bit (`c_short`).
pub const SV_INIT_FLAG_AUDIO_INT16: c_uint = 1 << 2;
/// Audio is float (`c_float`).
pub const SV_INIT_FLAG_AUDIO_FLOAT32: c_uint = 1 << 3;
/// Audio callback and song modification functions are in a single thread.
pub const SV_INIT_FLAG_ONE_THREAD: c_uint = 1 << 4;
pub const SV_MODULE_FLAG_EXISTS: c_int = 1;
pub const SV_MODULE_FLAG_EFFECT: c_int = 2;
pub const SV_MODULE_INPUTS_OFF: c_int = 16;
pub const SV_MODULE_INPUTS_MASK: c_int = 255 << SV_MODULE_INPUTS_OFF;
pub const SV_MODULE_OUTPUTS_OFF: c_int = 16 + 8;
pub const SV_MODULE_OUTPUTS_MASK: c_int = 255 << SV_MODULE_OUTPUTS_OFF;
pub const SV_STYPE_INT16: c_int = 0;
pub const SV_STYPE_INT32: c_int = 1;
pub const SV_STYPE_FLOAT32: c_int = 2;
pub const SV_STYPE_FLOAT64: c_int = 3;
#[link(name = "sunvox")]
extern "C" {
/// Gets the next piece of SunVox audio.
///
/// With `sv_audio_callback()` you can ignore the built-in SunVox sound
/// output mechanism and use some other sound system. Set the
/// `SV_INIT_FLAG_USER_AUDIO_CALLBACK` flag when calling `sv_init()` if
/// you want to use this function.
///
/// # Parameters
///
/// - buf: Destination buffer. If `SV_INIT_FLAG_AUDIO_INT16` was passed to
/// `sv_init()`, this is a buffer of `c_short`s. If `SV_INIT_FLAG_AUDIO_FLOAT32`
/// was passed, this is a buffer of `c_float`s. Stereo data will be interleaved
/// in this buffer: LRLR... ; where the LR is one frame (Left+Right channels).
/// - frames: Number of frames in destination buffer.
/// - latency: Audio latency (in frames).
/// - out_time: Output time (in ticks).
///
/// The `out_time` parameter is elaborated on a little bit in this thread:
/// http://www.warmplace.ru/forum/viewtopic.php?f=12&t=4152
///
/// For normal use, pass the value of `sv_get_ticks()`, as detailed in that
/// thread.
pub fn sv_audio_callback(buf: *mut c_void,
frames: c_int,
latency: c_int,
out_time: c_uint)
-> c_int;
/// Opens a slot.
///
/// A slot is like an instance of the SunVox engine. Each slot can have a
/// single project loaded at a time. The library supports up to four slots,
/// 0 to 3 inclusive. This call appears to hang if called with a number
/// outside this range.
///
/// Returns 0 on success, -1 on failure. Failure conditions include the
/// slot already being open.
///
/// I say "like" an instance of the engine because I think all slots share
/// the same tick counter, which you can get by calling `sv_get_ticks()`.
pub fn sv_open_slot(slot: c_int) -> c_int;
/// Closes a slot. See `sv_open_slot()` for more details.
pub fn sv_close_slot(slot: c_int) -> c_int;
/// Locks a slot.
///
/// There are a few functions that need to be called between a
/// `sv_lock_slot()`/`sv_unlock_slot()` pair. These are marked with
/// "USE LOCK/UNLOCK!".
pub fn sv_lock_slot(slot: c_int) -> c_int;
/// Unlocks a slot. See `sv_lock_slot()` for more details.
pub fn sv_unlock_slot(slot: c_int) -> c_int;
/// Initializes the library.
///
/// The `flags` parameter takes either zero (for default options), or a
/// number of `SV_INIT_FLAG_xxx` constants ORed together.
pub fn sv_init(dev: *const c_char, freq: c_int, channels: c_int, flags: c_uint) -> c_int;
/// Deinitializes the library.
pub fn sv_deinit() -> c_int;
/// Gets the internal sample type of the SunVox engine.
///
/// Returns one of the `SV_STYPE_xxx` constants.
///
/// Use it to get the scope buffer type from `get_module_scope()` function.
pub fn sv_get_sample_type() -> c_int;
/// Loads a SunVox project file into the specified slot.
pub fn sv_load(slot: c_int, name: *const c_char) -> c_int;
/// Loads a SunVox project from file data in memory.
pub fn sv_load_from_memory(slot: c_int, data: *mut c_void, data_size: c_uint) -> c_int;
/// Starts playing the project from the current play cursor position.
pub fn sv_play(slot: c_int) -> c_int;
/// Starts playing the project from the beginning.
pub fn sv_play_from_beginning(slot: c_int) -> c_int;
/// Stops playing the project. The play cursor stays where it is.
pub fn sv_stop(slot: c_int) -> c_int;
/// Enables or disables autostop.
///
/// - 0: Disable autostop.
/// - 1: Enable autostop.
///
/// When disabled, the project plays in a loop.
pub fn sv_set_autostop(slot: c_int, autostop: c_int) -> c_int;
/// Gets whether the project is stopped (ie. not playing).
///
/// Returns 0 if it is playing, 1 if it is stopped.
pub fn sv_end_of_song(slot: c_int) -> c_int;
/// Rewinds the project to the beginning.
pub fn sv_rewind(slot: c_int, line_num: c_int) -> c_int;
/// Sets the volume of the project.
pub fn sv_volume(slot: c_int, vol: c_int) -> c_int;
/// Causes an event to occur as though it had been played in a pattern.
///
/// `track_num` is in the range 0 to 15 inclusive, and refers to the track
/// number in a special hidden pattern.
pub fn sv_send_event(slot: c_int,
track_num: c_int,
note: c_int,
vel: c_int,
module: c_int,
ctl: c_int,
ctl_val: c_int)
-> c_int;
/// Gets the line number of the play cursor.
pub fn sv_get_current_line(slot: c_int) -> c_int;
/// Gets the line number of the play in fixed point format: 27.5
///
/// TODO: Figure out exactly what this means.
/// I'm guessing it means 27 bits for the integer part and 5 bits for the
/// fractional part.
pub fn sv_get_current_line2(slot: c_int) -> c_int;
/// Gets the current signal level/amplitude for a given audio channel
/// in the range 0 to 255 inclusive.
pub fn sv_get_current_signal_level(slot: c_int, channel: c_int) -> c_int;
/// Gets the name of the currently loaded project.
///
/// Returns NULL if no project is loaded.
pub fn sv_get_song_name(slot: c_int) -> *const c_char;
/// Gets the Beats Per Minute of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_bpm(slot: c_int) -> c_int;
/// Gets the Ticks Per Line of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_tpl(slot: c_int) -> c_int;
/// Gets the currently loaded song's length in audio samples/frames.
pub fn sv_get_song_length_frames(slot: c_int) -> c_uint;
/// Gets the currently loaded song's length in pattern lines.
pub fn sv_get_song_length_lines(slot: c_int) -> c_uint;
/// Creates a new module. USE LOCK/UNLOCK!
pub fn sv_new_module(slot: c_int,
_type: *const c_char,
name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Removes the specified module. USE LOCK/UNLOCK!
pub fn sv_remove_module(slot: c_int, mod_num: c_int) -> c_int;
/// Connects the source to the destination. USE LOCK/UNLOCK!
pub fn sv_connect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Disconnects the source from the destination. USE LOCK/UNLOCK!
pub fn sv_disconnect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Loads a module.
///
/// Supported file formats: `sunsynth`, `xi`, `wav`, `aiff`
pub fn sv_load_module(slot: c_int,
file_name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Loads a sample to an existing Sampler.
///
/// To replace the whole sampler, set `sample_slot` to -1.
pub fn sv_sampler_load(slot: c_int,
sampler_module: c_int,
file_name: *const c_char,
sample_slot: c_int)
-> c_int;
/// Gets the number of modules in the currently loaded project?
///
/// Does not seem to directly correspond to that.
/// TODO: Investigate this.
///
/// Returns zero if no project is loaded.
pub fn sv_get_number_of_modules(slot: c_int) -> c_int;
pub fn sv_get_module_flags(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_inputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_outputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_name(slot: c_int, mod_num: c_int) -> *const c_char;
pub fn sv_get_module_xy(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_color(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_scope(slot: c_int,
mod_num: c_int,
channel: c_int,
buffer_offset: *mut c_int,
buffer_size: *mut c_int)
-> *mut c_void;
/// TODO
///
/// Return value: received number of samples (may be less or equal to `samples_to_read`).
pub fn sv_get_module_scope2(slot: c_int,
mod_num: c_int,
channel: c_int,
read_buf: *mut c_short,
samples_to_read: c_uint)
-> c_uint;
pub fn sv_get_number_of_module_ctls(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_ctl_name(slot: c_int, mod_num: c_int, ctl_num: c_int) -> *const c_char;
pub fn sv_get_module_ctl_value(slot: c_int,
mod_num: c_int,
ctl_num: c_int,
scaled: c_int)
-> c_int;
pub fn sv_get_number_of_patterns(slot: c_int) -> c_int;
pub fn sv_get_pattern_x(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_y(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_tracks(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_lines(slot: c_int, pat_num: c_int) -> c_int;
/// TODO
///
/// How to use sv_get_pattern_data():
///
/// - `int pat_tracks = sv_get_pattern_tracks(slot, pat_num);`
/// - `sunvox_note* data = sv_get_pattern_data(slot, pat_num);`
/// - `sunvox_note* n = &data[ line_number * pat_tracks + track_number ];`
/// - ... and then do someting with note n
pub fn sv_get_pattern_data(slot: c_int, pat_num: c_int) -> *mut sunvox_note;
/// TODO
///
/// USE LOCK/UNLOCK!
pub fn sv_pattern_mute(slot: c_int, pat_num: c_int, mute: c_int) -> c_int;
/// Gets the current tick counter
///
/// Returns a value between 0 and 0xFFFFFFFF inclusive.
///
/// SunVox engine uses its own time space, measured in ticks.
pub fn sv_get_ticks() -> c_uint;
/// Gets the number of SunVox ticks per second.
pub fn sv_get_ticks_per_second() -> c_uint;
}
| sunvox_note | identifier_name |
lib.rs | //! # sunvox-sys
//!
//! FFI bindings to the Sunvox library (http://warmplace.ru/soft/sunvox).
// --- Crate attributes --- //
#![allow(non_camel_case_types)]
// --- ==== --- //
// --- External crates --- //
extern crate libc;
// --- ==== --- //
// --- Use --- //
use libc::{c_void, c_int, c_uint, c_char, c_uchar, c_short, c_ushort};
// --- ==== --- //
/// Single note off.
pub const NOTECMD_NOTE_OFF: c_int = 128;
/// Notes of all synths off.
pub const NOTECMD_ALL_NOTES_OFF: c_int = 129;
/// Stop and clean all synths.
pub const NOTECMD_CLEAN_SYNTHS: c_int = 130;
pub const NOTECMD_STOP: c_int = 131;
pub const NOTECMD_PLAY: c_int = 132;
// I can't find these in the official header file, but they're defined in
// https://github.com/metrasynth/sunvox-dll-python/blob/master/sunvox/types.py
/// Change the pitch of a currently playing note.
pub const NOTECMD_SET_PITCH: c_int = 133;
/// Apply effect in this note cell to the corresponding one in the previous track.
pub const NOTECMD_PREV_TRACK: c_int = 134;
|
/// A single note cell in a pattern.
#[repr(C)]
#[derive(Clone, Debug)]
pub struct sunvox_note {
/// The note column.
///
/// - 0: Nothing.
/// - 1 to 127 inclusive: A normal note.
/// - 128+: See the `NOTECMD` constants.
pub note: c_uchar,
/// The velocity column (note velocity).
///
/// - 0: Empty (default).
/// - 1 to 129 inclusive: The specified velocity + 1.
pub vel: c_uchar,
/// The module column (module to affect).
///
/// - 0: Empty (none).
/// - 1 to 255 inclusive: The specified module + 1.
pub module: c_uchar,
/// Padding.
pub nothing: c_uchar,
/// The value of the controller/effect column.
///
/// Interpreted as a hexadecimal number, the first two digits are the
/// controller of the selected module to affect, and the last two digits
/// are the number of an effect. Set a pair of digits to zero to
/// ignore that part.
pub ctl: c_ushort,
/// The value of the controller/effect parameter column.
pub ctl_val: c_ushort,
}
/// Supresses debug output from the SunVox library.
pub const SV_INIT_FLAG_NO_DEBUG_OUTPUT: c_uint = 1 << 0;
/// Interaction with sound card is on the user side.
///
/// See `sv_audio_callback()`.
pub const SV_INIT_FLAG_USER_AUDIO_CALLBACK: c_uint = 1 << 1;
/// Audio is signed 16-bit (`c_short`).
pub const SV_INIT_FLAG_AUDIO_INT16: c_uint = 1 << 2;
/// Audio is float (`c_float`).
pub const SV_INIT_FLAG_AUDIO_FLOAT32: c_uint = 1 << 3;
/// Audio callback and song modification functions are in a single thread.
pub const SV_INIT_FLAG_ONE_THREAD: c_uint = 1 << 4;
pub const SV_MODULE_FLAG_EXISTS: c_int = 1;
pub const SV_MODULE_FLAG_EFFECT: c_int = 2;
pub const SV_MODULE_INPUTS_OFF: c_int = 16;
pub const SV_MODULE_INPUTS_MASK: c_int = 255 << SV_MODULE_INPUTS_OFF;
pub const SV_MODULE_OUTPUTS_OFF: c_int = 16 + 8;
pub const SV_MODULE_OUTPUTS_MASK: c_int = 255 << SV_MODULE_OUTPUTS_OFF;
pub const SV_STYPE_INT16: c_int = 0;
pub const SV_STYPE_INT32: c_int = 1;
pub const SV_STYPE_FLOAT32: c_int = 2;
pub const SV_STYPE_FLOAT64: c_int = 3;
#[link(name = "sunvox")]
extern "C" {
/// Gets the next piece of SunVox audio.
///
/// With `sv_audio_callback()` you can ignore the built-in SunVox sound
/// output mechanism and use some other sound system. Set the
/// `SV_INIT_FLAG_USER_AUDIO_CALLBACK` flag when calling `sv_init()` if
/// you want to use this function.
///
/// # Parameters
///
/// - buf: Destination buffer. If `SV_INIT_FLAG_AUDIO_INT16` was passed to
/// `sv_init()`, this is a buffer of `c_short`s. If `SV_INIT_FLAG_AUDIO_FLOAT32`
/// was passed, this is a buffer of `c_float`s. Stereo data will be interleaved
/// in this buffer: LRLR... ; where the LR is one frame (Left+Right channels).
/// - frames: Number of frames in destination buffer.
/// - latency: Audio latency (in frames).
/// - out_time: Output time (in ticks).
///
/// The `out_time` parameter is elaborated on a little bit in this thread:
/// http://www.warmplace.ru/forum/viewtopic.php?f=12&t=4152
///
/// For normal use, pass the value of `sv_get_ticks()`, as detailed in that
/// thread.
pub fn sv_audio_callback(buf: *mut c_void,
frames: c_int,
latency: c_int,
out_time: c_uint)
-> c_int;
/// Opens a slot.
///
/// A slot is like an instance of the SunVox engine. Each slot can have a
/// single project loaded at a time. The library supports up to four slots,
/// 0 to 3 inclusive. This call appears to hang if called with a number
/// outside this range.
///
/// Returns 0 on success, -1 on failure. Failure conditions include the
/// slot already being open.
///
/// I say "like" an instance of the engine because I think all slots share
/// the same tick counter, which you can get by calling `sv_get_ticks()`.
pub fn sv_open_slot(slot: c_int) -> c_int;
/// Closes a slot. See `sv_open_slot()` for more details.
pub fn sv_close_slot(slot: c_int) -> c_int;
/// Locks a slot.
///
/// There are a few functions that need to be called between a
/// `sv_lock_slot()`/`sv_unlock_slot()` pair. These are marked with
/// "USE LOCK/UNLOCK!".
pub fn sv_lock_slot(slot: c_int) -> c_int;
/// Unlocks a slot. See `sv_lock_slot()` for more details.
pub fn sv_unlock_slot(slot: c_int) -> c_int;
/// Initializes the library.
///
/// The `flags` parameter takes either zero (for default options), or a
/// number of `SV_INIT_FLAG_xxx` constants ORed together.
pub fn sv_init(dev: *const c_char, freq: c_int, channels: c_int, flags: c_uint) -> c_int;
/// Deinitializes the library.
pub fn sv_deinit() -> c_int;
/// Gets the internal sample type of the SunVox engine.
///
/// Returns one of the `SV_STYPE_xxx` constants.
///
/// Use it to get the scope buffer type from `get_module_scope()` function.
pub fn sv_get_sample_type() -> c_int;
/// Loads a SunVox project file into the specified slot.
pub fn sv_load(slot: c_int, name: *const c_char) -> c_int;
/// Loads a SunVox project from file data in memory.
pub fn sv_load_from_memory(slot: c_int, data: *mut c_void, data_size: c_uint) -> c_int;
/// Starts playing the project from the current play cursor position.
pub fn sv_play(slot: c_int) -> c_int;
/// Starts playing the project from the beginning.
pub fn sv_play_from_beginning(slot: c_int) -> c_int;
/// Stops playing the project. The play cursor stays where it is.
pub fn sv_stop(slot: c_int) -> c_int;
/// Enables or disables autostop.
///
/// - 0: Disable autostop.
/// - 1: Enable autostop.
///
/// When disabled, the project plays in a loop.
pub fn sv_set_autostop(slot: c_int, autostop: c_int) -> c_int;
/// Gets whether the project is stopped (ie. not playing).
///
/// Returns 0 if it is playing, 1 if it is stopped.
pub fn sv_end_of_song(slot: c_int) -> c_int;
/// Rewinds the project to the beginning.
pub fn sv_rewind(slot: c_int, line_num: c_int) -> c_int;
/// Sets the volume of the project.
pub fn sv_volume(slot: c_int, vol: c_int) -> c_int;
/// Causes an event to occur as though it had been played in a pattern.
///
/// `track_num` is in the range 0 to 15 inclusive, and refers to the track
/// number in a special hidden pattern.
pub fn sv_send_event(slot: c_int,
track_num: c_int,
note: c_int,
vel: c_int,
module: c_int,
ctl: c_int,
ctl_val: c_int)
-> c_int;
/// Gets the line number of the play cursor.
pub fn sv_get_current_line(slot: c_int) -> c_int;
/// Gets the line number of the play in fixed point format: 27.5
///
/// TODO: Figure out exactly what this means.
/// I'm guessing it means 27 bits for the integer part and 5 bits for the
/// fractional part.
pub fn sv_get_current_line2(slot: c_int) -> c_int;
/// Gets the current signal level/amplitude for a given audio channel
/// in the range 0 to 255 inclusive.
pub fn sv_get_current_signal_level(slot: c_int, channel: c_int) -> c_int;
/// Gets the name of the currently loaded project.
///
/// Returns NULL if no project is loaded.
pub fn sv_get_song_name(slot: c_int) -> *const c_char;
/// Gets the Beats Per Minute of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_bpm(slot: c_int) -> c_int;
/// Gets the Ticks Per Line of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_tpl(slot: c_int) -> c_int;
/// Gets the currently loaded song's length in audio samples/frames.
pub fn sv_get_song_length_frames(slot: c_int) -> c_uint;
/// Gets the currently loaded song's length in pattern lines.
pub fn sv_get_song_length_lines(slot: c_int) -> c_uint;
/// Creates a new module. USE LOCK/UNLOCK!
pub fn sv_new_module(slot: c_int,
_type: *const c_char,
name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Removes the specified module. USE LOCK/UNLOCK!
pub fn sv_remove_module(slot: c_int, mod_num: c_int) -> c_int;
/// Connects the source to the destination. USE LOCK/UNLOCK!
pub fn sv_connect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Disconnects the source from the destination. USE LOCK/UNLOCK!
pub fn sv_disconnect_module(slot: c_int, source: c_int, destination: c_int) -> c_int;
/// Loads a module.
///
/// Supported file formats: `sunsynth`, `xi`, `wav`, `aiff`
pub fn sv_load_module(slot: c_int,
file_name: *const c_char,
x: c_int,
y: c_int,
z: c_int)
-> c_int;
/// Loads a sample to an existing Sampler.
///
/// To replace the whole sampler, set `sample_slot` to -1.
pub fn sv_sampler_load(slot: c_int,
sampler_module: c_int,
file_name: *const c_char,
sample_slot: c_int)
-> c_int;
/// Gets the number of modules in the currently loaded project?
///
/// Does not seem to directly correspond to that.
/// TODO: Investigate this.
///
/// Returns zero if no project is loaded.
pub fn sv_get_number_of_modules(slot: c_int) -> c_int;
pub fn sv_get_module_flags(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_inputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_outputs(slot: c_int, mod_num: c_int) -> *mut c_int;
pub fn sv_get_module_name(slot: c_int, mod_num: c_int) -> *const c_char;
pub fn sv_get_module_xy(slot: c_int, mod_num: c_int) -> c_uint;
pub fn sv_get_module_color(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_scope(slot: c_int,
mod_num: c_int,
channel: c_int,
buffer_offset: *mut c_int,
buffer_size: *mut c_int)
-> *mut c_void;
/// TODO
///
/// Return value: received number of samples (may be less or equal to `samples_to_read`).
pub fn sv_get_module_scope2(slot: c_int,
mod_num: c_int,
channel: c_int,
read_buf: *mut c_short,
samples_to_read: c_uint)
-> c_uint;
pub fn sv_get_number_of_module_ctls(slot: c_int, mod_num: c_int) -> c_int;
pub fn sv_get_module_ctl_name(slot: c_int, mod_num: c_int, ctl_num: c_int) -> *const c_char;
pub fn sv_get_module_ctl_value(slot: c_int,
mod_num: c_int,
ctl_num: c_int,
scaled: c_int)
-> c_int;
pub fn sv_get_number_of_patterns(slot: c_int) -> c_int;
pub fn sv_get_pattern_x(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_y(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_tracks(slot: c_int, pat_num: c_int) -> c_int;
pub fn sv_get_pattern_lines(slot: c_int, pat_num: c_int) -> c_int;
/// TODO
///
/// How to use sv_get_pattern_data():
///
/// - `int pat_tracks = sv_get_pattern_tracks(slot, pat_num);`
/// - `sunvox_note* data = sv_get_pattern_data(slot, pat_num);`
/// - `sunvox_note* n = &data[ line_number * pat_tracks + track_number ];`
/// - ... and then do someting with note n
pub fn sv_get_pattern_data(slot: c_int, pat_num: c_int) -> *mut sunvox_note;
/// TODO
///
/// USE LOCK/UNLOCK!
pub fn sv_pattern_mute(slot: c_int, pat_num: c_int, mute: c_int) -> c_int;
/// Gets the current tick counter
///
/// Returns a value between 0 and 0xFFFFFFFF inclusive.
///
/// SunVox engine uses its own time space, measured in ticks.
pub fn sv_get_ticks() -> c_uint;
/// Gets the number of SunVox ticks per second.
pub fn sv_get_ticks_per_second() -> c_uint;
} | random_line_split | |
beacon.py | import logging
import re
import socket
import struct
import time
import uuid
from threading import Timer
from urllib.parse import quote
import zeroconf
import config
from plugin import GetPlugin
SHARE_TEMPLATE = '/TiVoConnect?Command=QueryContainer&Container=%s'
PLATFORM_MAIN = 'pyTivo'
PLATFORM_VIDEO = 'pc/pyTivo' # For the nice icon
# It's possible this function should live somewhere else, but for now this
# is the only module that needs it. -mjl
def bytes2str(data):
"""
Convert bytes to str as utf-8. sequence values (and keys) will also be converted.
"""
# pylint: disable=multiple-statements
if isinstance(data, bytes): return data.decode('utf-8')
if isinstance(data, dict): return dict(map(bytes2str, data.items()))
if isinstance(data, tuple): return map(bytes2str, data)
return data
def log_serviceinfo(logger, info):
"""
Write interesting attributes from a ServiceInfo to the log.
Information written depends on the log level, basic info
is written w/ log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def start(self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
add = sock.recv(length - len(block))
if not add:
break
block += add
return block
@staticmethod
def recv_packet(sock):
length = struct.unpack('!I', Beacon.recv_bytes(sock, 4))[0]
return Beacon.recv_bytes(sock, length)
@staticmethod
def send_packet(sock, packet):
sock.sendall(struct.pack('!I', len(packet)) + packet)
def listen(self):
""" For the direct-connect, TCP-style beacon """
import _thread
def server():
TCPSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPSock.bind(('', 2190))
TCPSock.listen(5)
while True:
# Wait for a connection
client, address = TCPSock.accept()
# Accept (and discard) the client's beacon
self.recv_packet(client)
# Send ours
self.send_packet(client, self.format_beacon('connected'))
client.close()
_thread.start_new_thread(server, ())
def get_name(self, address): | machine_name = re.compile('machine=(.*)\n').search
try:
tsock = socket.socket()
tsock.connect((address, 2190))
self.send_packet(tsock, our_beacon)
tivo_beacon = self.recv_packet(tsock)
tsock.close()
name = machine_name(tivo_beacon).groups()[0]
except:
name = address
return name | """ Exchange beacons, and extract the machine name. """
our_beacon = self.format_beacon('connected', False) | random_line_split |
beacon.py | import logging
import re
import socket
import struct
import time
import uuid
from threading import Timer
from urllib.parse import quote
import zeroconf
import config
from plugin import GetPlugin
SHARE_TEMPLATE = '/TiVoConnect?Command=QueryContainer&Container=%s'
PLATFORM_MAIN = 'pyTivo'
PLATFORM_VIDEO = 'pc/pyTivo' # For the nice icon
# It's possible this function should live somewhere else, but for now this
# is the only module that needs it. -mjl
def bytes2str(data):
"""
Convert bytes to str as utf-8. sequence values (and keys) will also be converted.
"""
# pylint: disable=multiple-statements
if isinstance(data, bytes): return data.decode('utf-8')
if isinstance(data, dict): return dict(map(bytes2str, data.items()))
if isinstance(data, tuple): return map(bytes2str, data)
return data
def log_serviceinfo(logger, info):
"""
Write interesting attributes from a ServiceInfo to the log.
Information written depends on the log level, basic info
is written w/ log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def start(self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
|
return block
@staticmethod
def recv_packet(sock):
length = struct.unpack('!I', Beacon.recv_bytes(sock, 4))[0]
return Beacon.recv_bytes(sock, length)
@staticmethod
def send_packet(sock, packet):
sock.sendall(struct.pack('!I', len(packet)) + packet)
def listen(self):
""" For the direct-connect, TCP-style beacon """
import _thread
def server():
TCPSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPSock.bind(('', 2190))
TCPSock.listen(5)
while True:
# Wait for a connection
client, address = TCPSock.accept()
# Accept (and discard) the client's beacon
self.recv_packet(client)
# Send ours
self.send_packet(client, self.format_beacon('connected'))
client.close()
_thread.start_new_thread(server, ())
def get_name(self, address):
""" Exchange beacons, and extract the machine name. """
our_beacon = self.format_beacon('connected', False)
machine_name = re.compile('machine=(.*)\n').search
try:
tsock = socket.socket()
tsock.connect((address, 2190))
self.send_packet(tsock, our_beacon)
tivo_beacon = self.recv_packet(tsock)
tsock.close()
name = machine_name(tivo_beacon).groups()[0]
except:
name = address
return name
| add = sock.recv(length - len(block))
if not add:
break
block += add | conditional_block |
beacon.py | import logging
import re
import socket
import struct
import time
import uuid
from threading import Timer
from urllib.parse import quote
import zeroconf
import config
from plugin import GetPlugin
SHARE_TEMPLATE = '/TiVoConnect?Command=QueryContainer&Container=%s'
PLATFORM_MAIN = 'pyTivo'
PLATFORM_VIDEO = 'pc/pyTivo' # For the nice icon
# It's possible this function should live somewhere else, but for now this
# is the only module that needs it. -mjl
def bytes2str(data):
"""
Convert bytes to str as utf-8. sequence values (and keys) will also be converted.
"""
# pylint: disable=multiple-statements
if isinstance(data, bytes): return data.decode('utf-8')
if isinstance(data, dict): return dict(map(bytes2str, data.items()))
if isinstance(data, tuple): return map(bytes2str, data)
return data
def log_serviceinfo(logger, info):
"""
Write interesting attributes from a ServiceInfo to the log.
Information written depends on the log level, basic info
is written w/ log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def | (self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
add = sock.recv(length - len(block))
if not add:
break
block += add
return block
@staticmethod
def recv_packet(sock):
length = struct.unpack('!I', Beacon.recv_bytes(sock, 4))[0]
return Beacon.recv_bytes(sock, length)
@staticmethod
def send_packet(sock, packet):
sock.sendall(struct.pack('!I', len(packet)) + packet)
def listen(self):
""" For the direct-connect, TCP-style beacon """
import _thread
def server():
TCPSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPSock.bind(('', 2190))
TCPSock.listen(5)
while True:
# Wait for a connection
client, address = TCPSock.accept()
# Accept (and discard) the client's beacon
self.recv_packet(client)
# Send ours
self.send_packet(client, self.format_beacon('connected'))
client.close()
_thread.start_new_thread(server, ())
def get_name(self, address):
""" Exchange beacons, and extract the machine name. """
our_beacon = self.format_beacon('connected', False)
machine_name = re.compile('machine=(.*)\n').search
try:
tsock = socket.socket()
tsock.connect((address, 2190))
self.send_packet(tsock, our_beacon)
tivo_beacon = self.recv_packet(tsock)
tsock.close()
name = machine_name(tivo_beacon).groups()[0]
except:
name = address
return name
| start | identifier_name |
beacon.py | import logging
import re
import socket
import struct
import time
import uuid
from threading import Timer
from urllib.parse import quote
import zeroconf
import config
from plugin import GetPlugin
SHARE_TEMPLATE = '/TiVoConnect?Command=QueryContainer&Container=%s'
PLATFORM_MAIN = 'pyTivo'
PLATFORM_VIDEO = 'pc/pyTivo' # For the nice icon
# It's possible this function should live somewhere else, but for now this
# is the only module that needs it. -mjl
def bytes2str(data):
"""
Convert bytes to str as utf-8. sequence values (and keys) will also be converted.
"""
# pylint: disable=multiple-statements
if isinstance(data, bytes): return data.decode('utf-8')
if isinstance(data, dict): return dict(map(bytes2str, data.items()))
if isinstance(data, tuple): return map(bytes2str, data)
return data
def log_serviceinfo(logger, info):
"""
Write interesting attributes from a ServiceInfo to the log.
Information written depends on the log level, basic info
is written w/ log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
|
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def start(self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
add = sock.recv(length - len(block))
if not add:
break
block += add
return block
@staticmethod
def recv_packet(sock):
length = struct.unpack('!I', Beacon.recv_bytes(sock, 4))[0]
return Beacon.recv_bytes(sock, length)
@staticmethod
def send_packet(sock, packet):
sock.sendall(struct.pack('!I', len(packet)) + packet)
def listen(self):
""" For the direct-connect, TCP-style beacon """
import _thread
def server():
TCPSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPSock.bind(('', 2190))
TCPSock.listen(5)
while True:
# Wait for a connection
client, address = TCPSock.accept()
# Accept (and discard) the client's beacon
self.recv_packet(client)
# Send ours
self.send_packet(client, self.format_beacon('connected'))
client.close()
_thread.start_new_thread(server, ())
def get_name(self, address):
""" Exchange beacons, and extract the machine name. """
our_beacon = self.format_beacon('connected', False)
machine_name = re.compile('machine=(.*)\n').search
try:
tsock = socket.socket()
tsock.connect((address, 2190))
self.send_packet(tsock, our_beacon)
tivo_beacon = self.recv_packet(tsock)
tsock.close()
name = machine_name(tivo_beacon).groups()[0]
except:
name = address
return name
| return ';'.join(self.services) | identifier_body |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
} | fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
}
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
} | }
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> { | random_line_split |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else |
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
}
| {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
} | conditional_block |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> |
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
}
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
}
| {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
} | identifier_body |
ctap.rs | // Licensed under the Apache License, Version 2.0 or the MIT License.
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright Tock Contributors 2022.
//! Client to Authenticator Protocol CTAPv2 over USB HID
//!
//! Based on the spec avaliable at: <https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-client-to-authenticator-protocol-v2.0-id-20180227.html>
use core::cell::Cell;
use core::cmp;
use super::descriptors;
use super::descriptors::Buffer64;
use super::descriptors::DescriptorType;
use super::descriptors::EndpointAddress;
use super::descriptors::EndpointDescriptor;
use super::descriptors::HIDCountryCode;
use super::descriptors::HIDDescriptor;
use super::descriptors::HIDSubordinateDescriptor;
use super::descriptors::InterfaceDescriptor;
use super::descriptors::ReportDescriptor;
use super::descriptors::TransferDirection;
use super::usbc_client_ctrl::ClientCtrl;
use kernel::hil;
use kernel::hil::usb::TransferType;
use kernel::utilities::cells::OptionalCell;
use kernel::utilities::cells::TakeCell;
use kernel::ErrorCode;
/// Use 1 Interrupt transfer IN/OUT endpoint
const ENDPOINT_NUM: usize = 1;
const OUT_BUFFER: usize = 0;
const IN_BUFFER: usize = 1;
static LANGUAGES: &'static [u16; 1] = &[
0x0409, // English (United States)
];
/// Max packet size specified by spec
pub const MAX_CTRL_PACKET_SIZE: u8 = 64;
const N_ENDPOINTS: usize = 2;
/// The HID report descriptor for CTAP
/// This is a combination of:
/// - the CTAP spec, example 8
/// - USB HID spec examples
/// Plus it matches: https://chromium.googlesource.com/chromiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn | (&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
}
} else {
// Make sure to put the RX buffer back.
self.recv_buffer.replace(buf);
hil::usb::OutResult::Ok
}
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
fn packet_transmitted(&'a self, endpoint: usize) {
self.send_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_transmitted(Ok(()), buf, endpoint);
});
});
}
}
| ctrl_status | identifier_name |
oracle.py | # Trains the network then uses it to make predictions
# Also transforms the data before and after the predictions are made
# A fairly generic interface, in that it can easily applied to other models
import logging
from timeit import default_timer as timer
from copy import deepcopy
from datetime import timedelta
import numpy as np
import pandas as pd
from alphai_feature_generation.cleaning import resample_ohlcv, fill_gaps
from alphai_feature_generation.transformation import GymDataTransformation
from alphai_time_series.transform import gaussianise
from alphai_delphi.oracle import AbstractOracle
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.data.providers import TrainDataProvider
import alphai_cromulon_oracle.cromulon.train as cromulon
import alphai_cromulon_oracle.cromulon.evaluate as cromulon_eval
from alphai_cromulon_oracle.flags import build_tensorflow_flags
import alphai_cromulon_oracle.topology as tp
from alphai_cromulon_oracle import DATETIME_FORMAT_COMPACT
from alphai_cromulon_oracle.helpers import TrainFileManager, logtime
NETWORK_NAME = 'cromulon'
CLIP_VALUE = 5.0 # Largest number allowed to enter the network
DEFAULT_N_CORRELATED_SERIES = 1
DEFAULT_N_CONV_FILTERS = 32
DEFAULT_CONV_KERNEL_SIZE = [3, 3]
FEATURE_TO_RANK_CORRELATIONS = 0 # Use the first feature to form correlation coefficients
TRAIN_FILE_NAME_TEMPLATE = "{}_train_" + NETWORK_NAME
logger = logging.getLogger(__name__)
class CromulonOracle(AbstractOracle):
def _sanity_check(self):
pass
def global_transform(self, data):
transformed_data = self._data_transformation.apply_global_transformations(data)
return transformed_data
def resample(self, data):
resampled_raw_data = resample_ohlcv(data, "{}T".format(self._data_transformation.features_resample_minutes))
return resampled_raw_data
def fill_nan(self, data):
filled_data = fill_gaps(data, self._data_transformation.fill_limit, dropna=True)
return filled_data
def save(self):
pass
@property
def target_feature(self):
return self._target_feature
def load(self):
pass
def get_universe(self):
pass
def __init__(self, config):
"""
:param configuration: Dictionary containing all the parameters. Full specifications can be found at:
oracle-cromulon-python/docs/cromulon_options.md
"""
super().__init__(config)
logger.info('Initialising Cromulon Oracle.')
self.config = self.update_configuration(self.config)
self._init_data_transformation()
self._train_path = self.config['train_path']
n_correlated_series = self.config.get('n_correlated_series', DEFAULT_N_CORRELATED_SERIES)
self._configuration = self.config
self._init_train_file_manager()
self._tensorflow_flags = build_tensorflow_flags(self.config) # Perhaps use separate config dict here?
if self._tensorflow_flags.predict_single_shares:
self._n_input_series = int(np.minimum(n_correlated_series, self.config['n_series']))
self._n_forecasts = 1
else:
self._n_input_series = self.config['n_series']
self._n_forecasts = self.config['n_forecasts']
self._topology = None
def _init_train_file_manager(self):
self._train_file_manager = TrainFileManager(
self._train_path,
TRAIN_FILE_NAME_TEMPLATE,
DATETIME_FORMAT_COMPACT
)
self._train_file_manager.ensure_path_exists()
def _init_data_transformation(self):
data_transformation_config = self.config['data_transformation']
self._feature_list = data_transformation_config['feature_config_list']
self._n_features = len(self._feature_list)
data_transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps)
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
|
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data):
testy = deepcopy(y_data)
self.print_verification_report(testy, 'Y_data')
def verify_x_data(self, x_data):
"""Check for nans or crazy numbers.
"""
testx = deepcopy(x_data).flatten()
xmin, xmax = self.print_verification_report(testx, 'X_data')
if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:
n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))
n_elements = len(testx)
x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)
logger.warning("Large inputs detected: clip values exceeding {}".format(CLIP_VALUE))
logger.info("{} of {} elements were clipped.".format(n_clipped_elements, n_elements))
return x_data
def update_configuration(self, config):
""" Pass on some config entries to data_transformation"""
config["data_transformation"]["n_classification_bins"] = config["n_classification_bins"]
config["data_transformation"]["nassets"] = config["nassets"]
config["data_transformation"]["classify_per_series"] = config["classify_per_series"]
config["data_transformation"]["normalise_per_series"] = config["normalise_per_series"]
return config
def _preprocess_inputs(self, train_x_dict):
""" Prepare training data to be fed into Cromulon. """
numpy_arrays = []
for key, value in train_x_dict.items():
numpy_arrays.append(value)
logger.info("Appending feature of shape {}".format(value.shape))
# Currently train_x will have dimensions [features; samples; timesteps; symbols]
train_x = np.stack(numpy_arrays, axis=0)
train_x = self.reorder_input_dimensions(train_x)
# Expand dataset if requested
if self._tensorflow_flags.predict_single_shares:
train_x = self.expand_input_data(train_x)
train_x = self.verify_x_data(train_x)
return train_x.astype(np.float32) # FIXME: set float32 in data transform, conditional on config file
def _preprocess_outputs(self, train_y_dict):
train_y = list(train_y_dict.values())[0]
train_y = np.swapaxes(train_y, axis1=1, axis2=2)
if self._tensorflow_flags.predict_single_shares:
n_feat_y = train_y.shape[2]
train_y = np.reshape(train_y, [-1, 1, 1, n_feat_y])
self.verify_y_data(train_y)
return train_y.astype(np.float32) # FIXME:set float32 in data transform, conditional on config file
def gaussianise_series(self, train_x):
""" Gaussianise each series within each batch - but don't normalise means
:param nparray train_x: Series in format [batches, features, series]. NB ensure all features
are of the same kind
:return: nparray The same data but now each series is gaussianised
"""
n_batches = train_x.shape[0]
for batch in range(n_batches):
train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)
return train_x
def reorder_input_dimensions(self, train_x):
""" Reassign ordering of dimensions.
:param train_x: Enters with dimensions [features; samples; timesteps; series]
:return: train_x Now with dimensions [samples; series ; time; features]
"""
source = [0, 1, 2, 3]
destination = [3, 0, 2, 1]
return np.moveaxis(train_x, source, destination)
def expand_input_data(self, train_x):
"""Converts to the form where each time series is predicted separately, though companion time series are
included as auxilliary features
:param nparray train_x: [samples; series ; time; features]
:return: nparray The expanded training dataset, still in the format [samples; series ; time; features]
"""
n_samples = train_x.shape[0]
n_series = train_x.shape[1]
n_timesteps = train_x.shape[2]
n_features = train_x.shape[3]
n_expanded_samples = n_samples * n_series
logger.info("Data found to hold {} samples, {} series, {} timesteps, {} features.".format(
n_samples, n_series, n_timesteps, n_features))
target_shape = [n_expanded_samples, self._n_input_series, n_timesteps, n_features]
found_duplicates = False
if self._n_input_series == 1:
corr_train_x = train_x.reshape(target_shape)
else:
corr_train_x = np.zeros(shape=target_shape)
for sample in range(n_samples):
# Series ordering may differ between batches - so we need the correlations for each batch
data_sample = train_x[sample, :, :, FEATURE_TO_RANK_CORRELATIONS]
neg_correlation_matrix = - np.corrcoef(data_sample, rowvar=False) # False since col represents a var
correlation_indices = neg_correlation_matrix.argsort(axis=1) # Sort negatives to get descending order
for series_index in range(n_series):
if correlation_indices[series_index, [0]] != series_index:
found_duplicates = True
sample_number = sample * n_series + series_index
for i in range(self._n_input_series):
corr_series_index = correlation_indices[series_index, i]
corr_train_x[sample_number, :, i] = train_x[sample, :, corr_series_index]
if found_duplicates:
logger.warning('Some NaNs or duplicate series were found in the data')
return corr_train_x
def initialise_topology(self, n_timesteps):
""" Set up the network topology based upon the configuration file, and shape of input data. """
layer_heights = self._configuration['layer_heights']
layer_widths = self._configuration['layer_widths']
layer_depths = np.ones(len(layer_heights), dtype=np.int)
default_layer_types = ['full'] * len(layer_heights)
layer_types = self._configuration.get('layer_types', default_layer_types)
# Override input layer to match data
layer_depths[0] = 1 # n input series currently fixed to 1
layer_heights[0] = n_timesteps
layer_widths[0] = self._n_features
# Setup convolutional layer configuration
conv_config = {}
conv_config["kernel_size"] = self._configuration.get('kernel_size', DEFAULT_CONV_KERNEL_SIZE)
conv_config["n_kernels"] = self._configuration.get('n_kernels', DEFAULT_N_CONV_FILTERS)
conv_config["dilation_rates"] = self._configuration.get('dilation_rates', 1)
conv_config["strides"] = self._configuration.get('strides', 1)
self._topology = tp.Topology(
n_timesteps=n_timesteps,
n_forecasts=self._n_forecasts,
n_classification_bins=self._configuration['n_classification_bins'],
layer_heights=layer_heights,
layer_widths=layer_widths,
layer_depths=layer_depths,
layer_types=layer_types,
activation_functions=self._configuration['activation_functions'],
n_features=self._n_features,
conv_config=conv_config
)
def _extract_target_feature(self, feature_list):
for feature in feature_list:
if feature['is_target']:
return feature['name']
raise ValueError("You must specify at least one target feature")
def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):
"""
Filters the dataframes inside the dict, returning a new dict with only the columns
available in the universe for that particular date
:param data: dict of dataframes
:type data: dict
:param current_timestamp: the current timestamp
:type datetime.datetime
:param universe: dataframe containing mapping of data -> list of assets
:type universe: pd.DataFrame
:return: dict of pd.DataFrame
:rtype dict
"""
current_date = current_timestamp.date()
assets = []
for idx, row in universe.iterrows():
if row.start_date <= current_date <= row.end_date:
assets = row.assets
break
filtered = {}
for feature, df in data.items():
filtered[feature] = df.drop(df.columns.difference(assets), axis=1)
return filtered
class OraclePrediction:
def __init__(self, mean_forecast, lower_bound, upper_bound, current_timestamp):
""" Container for the oracle predictions.
:param mean_forecast: Prediction values for various series at various times
:type mean_forecast: pd.DataFrame
:param lower_bound: Lower edge of the requested confidence interval
:type lower_bound: pd.DataFrame
:param upper_bound: Upper edge of the requested confidence interval
:type upper_bound: pd.DataFrame
:param current_timestamp: Timestamp when the prediction was made
:type target_timestamp: datetime
"""
self.mean_forecast = mean_forecast
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.current_timestamp = current_timestamp
def __repr__(self):
return "<Oracle prediction: {}>".format(self.__dict__) | return TRAIN_FILE_NAME_TEMPLATE | identifier_body |
oracle.py | # Trains the network then uses it to make predictions
# Also transforms the data before and after the predictions are made
# A fairly generic interface, in that it can easily applied to other models
import logging
from timeit import default_timer as timer
from copy import deepcopy
from datetime import timedelta
import numpy as np
import pandas as pd
from alphai_feature_generation.cleaning import resample_ohlcv, fill_gaps
from alphai_feature_generation.transformation import GymDataTransformation
from alphai_time_series.transform import gaussianise
from alphai_delphi.oracle import AbstractOracle
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.data.providers import TrainDataProvider
import alphai_cromulon_oracle.cromulon.train as cromulon
import alphai_cromulon_oracle.cromulon.evaluate as cromulon_eval
from alphai_cromulon_oracle.flags import build_tensorflow_flags
import alphai_cromulon_oracle.topology as tp
from alphai_cromulon_oracle import DATETIME_FORMAT_COMPACT
from alphai_cromulon_oracle.helpers import TrainFileManager, logtime
NETWORK_NAME = 'cromulon'
CLIP_VALUE = 5.0 # Largest number allowed to enter the network
DEFAULT_N_CORRELATED_SERIES = 1
DEFAULT_N_CONV_FILTERS = 32
DEFAULT_CONV_KERNEL_SIZE = [3, 3]
FEATURE_TO_RANK_CORRELATIONS = 0 # Use the first feature to form correlation coefficients
TRAIN_FILE_NAME_TEMPLATE = "{}_train_" + NETWORK_NAME
logger = logging.getLogger(__name__)
class CromulonOracle(AbstractOracle):
def _sanity_check(self):
pass
def global_transform(self, data):
transformed_data = self._data_transformation.apply_global_transformations(data)
return transformed_data
def resample(self, data):
resampled_raw_data = resample_ohlcv(data, "{}T".format(self._data_transformation.features_resample_minutes))
return resampled_raw_data
def fill_nan(self, data):
filled_data = fill_gaps(data, self._data_transformation.fill_limit, dropna=True)
return filled_data
def save(self):
pass
@property
def target_feature(self):
return self._target_feature
def load(self):
pass
def get_universe(self):
pass
def __init__(self, config):
"""
:param configuration: Dictionary containing all the parameters. Full specifications can be found at:
oracle-cromulon-python/docs/cromulon_options.md
"""
super().__init__(config)
logger.info('Initialising Cromulon Oracle.')
self.config = self.update_configuration(self.config)
self._init_data_transformation()
self._train_path = self.config['train_path']
n_correlated_series = self.config.get('n_correlated_series', DEFAULT_N_CORRELATED_SERIES)
self._configuration = self.config
self._init_train_file_manager()
self._tensorflow_flags = build_tensorflow_flags(self.config) # Perhaps use separate config dict here?
if self._tensorflow_flags.predict_single_shares:
self._n_input_series = int(np.minimum(n_correlated_series, self.config['n_series']))
self._n_forecasts = 1
else:
self._n_input_series = self.config['n_series']
self._n_forecasts = self.config['n_forecasts']
self._topology = None
def _init_train_file_manager(self):
self._train_file_manager = TrainFileManager(
self._train_path,
TRAIN_FILE_NAME_TEMPLATE,
DATETIME_FORMAT_COMPACT
)
self._train_file_manager.ensure_path_exists()
def _init_data_transformation(self):
data_transformation_config = self.config['data_transformation']
self._feature_list = data_transformation_config['feature_config_list']
self._n_features = len(self._feature_list)
data_transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
|
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
return TRAIN_FILE_NAME_TEMPLATE
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data):
testy = deepcopy(y_data)
self.print_verification_report(testy, 'Y_data')
def verify_x_data(self, x_data):
"""Check for nans or crazy numbers.
"""
testx = deepcopy(x_data).flatten()
xmin, xmax = self.print_verification_report(testx, 'X_data')
if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:
n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))
n_elements = len(testx)
x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)
logger.warning("Large inputs detected: clip values exceeding {}".format(CLIP_VALUE))
logger.info("{} of {} elements were clipped.".format(n_clipped_elements, n_elements))
return x_data
def update_configuration(self, config):
""" Pass on some config entries to data_transformation"""
config["data_transformation"]["n_classification_bins"] = config["n_classification_bins"]
config["data_transformation"]["nassets"] = config["nassets"]
config["data_transformation"]["classify_per_series"] = config["classify_per_series"]
config["data_transformation"]["normalise_per_series"] = config["normalise_per_series"]
return config
def _preprocess_inputs(self, train_x_dict):
""" Prepare training data to be fed into Cromulon. """
numpy_arrays = []
for key, value in train_x_dict.items():
numpy_arrays.append(value)
logger.info("Appending feature of shape {}".format(value.shape))
# Currently train_x will have dimensions [features; samples; timesteps; symbols]
train_x = np.stack(numpy_arrays, axis=0)
train_x = self.reorder_input_dimensions(train_x)
# Expand dataset if requested
if self._tensorflow_flags.predict_single_shares:
train_x = self.expand_input_data(train_x)
train_x = self.verify_x_data(train_x)
return train_x.astype(np.float32) # FIXME: set float32 in data transform, conditional on config file
def _preprocess_outputs(self, train_y_dict):
train_y = list(train_y_dict.values())[0]
train_y = np.swapaxes(train_y, axis1=1, axis2=2)
if self._tensorflow_flags.predict_single_shares:
n_feat_y = train_y.shape[2]
train_y = np.reshape(train_y, [-1, 1, 1, n_feat_y])
self.verify_y_data(train_y)
return train_y.astype(np.float32) # FIXME:set float32 in data transform, conditional on config file
def gaussianise_series(self, train_x):
""" Gaussianise each series within each batch - but don't normalise means
:param nparray train_x: Series in format [batches, features, series]. NB ensure all features
are of the same kind
:return: nparray The same data but now each series is gaussianised
"""
n_batches = train_x.shape[0]
for batch in range(n_batches):
train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)
return train_x
def reorder_input_dimensions(self, train_x):
""" Reassign ordering of dimensions.
:param train_x: Enters with dimensions [features; samples; timesteps; series]
:return: train_x Now with dimensions [samples; series ; time; features]
"""
source = [0, 1, 2, 3]
destination = [3, 0, 2, 1]
return np.moveaxis(train_x, source, destination)
def expand_input_data(self, train_x):
"""Converts to the form where each time series is predicted separately, though companion time series are
included as auxilliary features
:param nparray train_x: [samples; series ; time; features]
:return: nparray The expanded training dataset, still in the format [samples; series ; time; features]
"""
n_samples = train_x.shape[0]
n_series = train_x.shape[1]
n_timesteps = train_x.shape[2]
n_features = train_x.shape[3]
n_expanded_samples = n_samples * n_series
logger.info("Data found to hold {} samples, {} series, {} timesteps, {} features.".format(
n_samples, n_series, n_timesteps, n_features))
target_shape = [n_expanded_samples, self._n_input_series, n_timesteps, n_features]
found_duplicates = False
if self._n_input_series == 1:
corr_train_x = train_x.reshape(target_shape)
else:
corr_train_x = np.zeros(shape=target_shape)
for sample in range(n_samples):
# Series ordering may differ between batches - so we need the correlations for each batch
data_sample = train_x[sample, :, :, FEATURE_TO_RANK_CORRELATIONS]
neg_correlation_matrix = - np.corrcoef(data_sample, rowvar=False) # False since col represents a var
correlation_indices = neg_correlation_matrix.argsort(axis=1) # Sort negatives to get descending order
for series_index in range(n_series):
if correlation_indices[series_index, [0]] != series_index:
found_duplicates = True
sample_number = sample * n_series + series_index
for i in range(self._n_input_series):
corr_series_index = correlation_indices[series_index, i]
corr_train_x[sample_number, :, i] = train_x[sample, :, corr_series_index]
if found_duplicates:
logger.warning('Some NaNs or duplicate series were found in the data')
return corr_train_x
def initialise_topology(self, n_timesteps):
""" Set up the network topology based upon the configuration file, and shape of input data. """
layer_heights = self._configuration['layer_heights']
layer_widths = self._configuration['layer_widths']
layer_depths = np.ones(len(layer_heights), dtype=np.int)
default_layer_types = ['full'] * len(layer_heights)
layer_types = self._configuration.get('layer_types', default_layer_types)
# Override input layer to match data
layer_depths[0] = 1 # n input series currently fixed to 1
layer_heights[0] = n_timesteps
layer_widths[0] = self._n_features
# Setup convolutional layer configuration
conv_config = {}
conv_config["kernel_size"] = self._configuration.get('kernel_size', DEFAULT_CONV_KERNEL_SIZE)
conv_config["n_kernels"] = self._configuration.get('n_kernels', DEFAULT_N_CONV_FILTERS)
conv_config["dilation_rates"] = self._configuration.get('dilation_rates', 1)
conv_config["strides"] = self._configuration.get('strides', 1)
self._topology = tp.Topology(
n_timesteps=n_timesteps,
n_forecasts=self._n_forecasts,
n_classification_bins=self._configuration['n_classification_bins'],
layer_heights=layer_heights,
layer_widths=layer_widths,
layer_depths=layer_depths,
layer_types=layer_types,
activation_functions=self._configuration['activation_functions'],
n_features=self._n_features,
conv_config=conv_config
)
def _extract_target_feature(self, feature_list):
for feature in feature_list:
if feature['is_target']:
return feature['name']
raise ValueError("You must specify at least one target feature")
def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):
"""
Filters the dataframes inside the dict, returning a new dict with only the columns
available in the universe for that particular date
:param data: dict of dataframes
:type data: dict
:param current_timestamp: the current timestamp
:type datetime.datetime
:param universe: dataframe containing mapping of data -> list of assets
:type universe: pd.DataFrame
:return: dict of pd.DataFrame
:rtype dict
"""
current_date = current_timestamp.date()
assets = []
for idx, row in universe.iterrows():
if row.start_date <= current_date <= row.end_date:
assets = row.assets
break
filtered = {}
for feature, df in data.items():
filtered[feature] = df.drop(df.columns.difference(assets), axis=1)
return filtered
class OraclePrediction:
def __init__(self, mean_forecast, lower_bound, upper_bound, current_timestamp):
""" Container for the oracle predictions.
:param mean_forecast: Prediction values for various series at various times
:type mean_forecast: pd.DataFrame
:param lower_bound: Lower edge of the requested confidence interval
:type lower_bound: pd.DataFrame
:param upper_bound: Upper edge of the requested confidence interval
:type upper_bound: pd.DataFrame
:param current_timestamp: Timestamp when the prediction was made
:type target_timestamp: datetime
"""
self.mean_forecast = mean_forecast
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.current_timestamp = current_timestamp
def __repr__(self):
return "<Oracle prediction: {}>".format(self.__dict__) | n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps) | conditional_block |
oracle.py | # Trains the network then uses it to make predictions
# Also transforms the data before and after the predictions are made
# A fairly generic interface, in that it can easily applied to other models
import logging
from timeit import default_timer as timer
from copy import deepcopy
from datetime import timedelta
import numpy as np
import pandas as pd
from alphai_feature_generation.cleaning import resample_ohlcv, fill_gaps
from alphai_feature_generation.transformation import GymDataTransformation
from alphai_time_series.transform import gaussianise
from alphai_delphi.oracle import AbstractOracle
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.data.providers import TrainDataProvider
import alphai_cromulon_oracle.cromulon.train as cromulon
import alphai_cromulon_oracle.cromulon.evaluate as cromulon_eval
from alphai_cromulon_oracle.flags import build_tensorflow_flags
import alphai_cromulon_oracle.topology as tp
from alphai_cromulon_oracle import DATETIME_FORMAT_COMPACT
from alphai_cromulon_oracle.helpers import TrainFileManager, logtime
NETWORK_NAME = 'cromulon'
CLIP_VALUE = 5.0 # Largest number allowed to enter the network
DEFAULT_N_CORRELATED_SERIES = 1
DEFAULT_N_CONV_FILTERS = 32
DEFAULT_CONV_KERNEL_SIZE = [3, 3]
FEATURE_TO_RANK_CORRELATIONS = 0 # Use the first feature to form correlation coefficients
TRAIN_FILE_NAME_TEMPLATE = "{}_train_" + NETWORK_NAME
logger = logging.getLogger(__name__)
class CromulonOracle(AbstractOracle):
def _sanity_check(self):
pass
def global_transform(self, data):
transformed_data = self._data_transformation.apply_global_transformations(data)
return transformed_data
def resample(self, data):
resampled_raw_data = resample_ohlcv(data, "{}T".format(self._data_transformation.features_resample_minutes))
return resampled_raw_data
def fill_nan(self, data):
filled_data = fill_gaps(data, self._data_transformation.fill_limit, dropna=True)
return filled_data
def save(self):
pass
@property
def target_feature(self):
return self._target_feature
def load(self):
pass
def get_universe(self):
pass
def __init__(self, config):
"""
:param configuration: Dictionary containing all the parameters. Full specifications can be found at:
oracle-cromulon-python/docs/cromulon_options.md
"""
super().__init__(config)
logger.info('Initialising Cromulon Oracle.')
self.config = self.update_configuration(self.config)
self._init_data_transformation()
self._train_path = self.config['train_path']
n_correlated_series = self.config.get('n_correlated_series', DEFAULT_N_CORRELATED_SERIES)
self._configuration = self.config
self._init_train_file_manager()
self._tensorflow_flags = build_tensorflow_flags(self.config) # Perhaps use separate config dict here?
if self._tensorflow_flags.predict_single_shares:
self._n_input_series = int(np.minimum(n_correlated_series, self.config['n_series']))
self._n_forecasts = 1
else:
self._n_input_series = self.config['n_series']
self._n_forecasts = self.config['n_forecasts']
self._topology = None
def _init_train_file_manager(self):
self._train_file_manager = TrainFileManager(
self._train_path,
TRAIN_FILE_NAME_TEMPLATE,
DATETIME_FORMAT_COMPACT
)
self._train_file_manager.ensure_path_exists()
def _init_data_transformation(self):
data_transformation_config = self.config['data_transformation']
self._feature_list = data_transformation_config['feature_config_list']
self._n_features = len(self._feature_list)
data_transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps)
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
return TRAIN_FILE_NAME_TEMPLATE
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data):
testy = deepcopy(y_data)
self.print_verification_report(testy, 'Y_data')
def verify_x_data(self, x_data):
"""Check for nans or crazy numbers.
"""
testx = deepcopy(x_data).flatten()
xmin, xmax = self.print_verification_report(testx, 'X_data')
if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:
n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))
n_elements = len(testx)
x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)
logger.warning("Large inputs detected: clip values exceeding {}".format(CLIP_VALUE))
logger.info("{} of {} elements were clipped.".format(n_clipped_elements, n_elements))
return x_data
def update_configuration(self, config):
""" Pass on some config entries to data_transformation"""
config["data_transformation"]["n_classification_bins"] = config["n_classification_bins"]
config["data_transformation"]["nassets"] = config["nassets"]
config["data_transformation"]["classify_per_series"] = config["classify_per_series"]
config["data_transformation"]["normalise_per_series"] = config["normalise_per_series"]
return config
def _preprocess_inputs(self, train_x_dict):
""" Prepare training data to be fed into Cromulon. """
numpy_arrays = []
for key, value in train_x_dict.items():
numpy_arrays.append(value)
logger.info("Appending feature of shape {}".format(value.shape))
# Currently train_x will have dimensions [features; samples; timesteps; symbols]
train_x = np.stack(numpy_arrays, axis=0)
train_x = self.reorder_input_dimensions(train_x)
# Expand dataset if requested
if self._tensorflow_flags.predict_single_shares:
train_x = self.expand_input_data(train_x)
train_x = self.verify_x_data(train_x)
return train_x.astype(np.float32) # FIXME: set float32 in data transform, conditional on config file
def _preprocess_outputs(self, train_y_dict):
train_y = list(train_y_dict.values())[0]
train_y = np.swapaxes(train_y, axis1=1, axis2=2)
if self._tensorflow_flags.predict_single_shares:
n_feat_y = train_y.shape[2]
train_y = np.reshape(train_y, [-1, 1, 1, n_feat_y])
self.verify_y_data(train_y)
return train_y.astype(np.float32) # FIXME:set float32 in data transform, conditional on config file
def gaussianise_series(self, train_x):
""" Gaussianise each series within each batch - but don't normalise means
:param nparray train_x: Series in format [batches, features, series]. NB ensure all features
are of the same kind
:return: nparray The same data but now each series is gaussianised
"""
n_batches = train_x.shape[0]
for batch in range(n_batches):
train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)
return train_x
def | (self, train_x):
""" Reassign ordering of dimensions.
:param train_x: Enters with dimensions [features; samples; timesteps; series]
:return: train_x Now with dimensions [samples; series ; time; features]
"""
source = [0, 1, 2, 3]
destination = [3, 0, 2, 1]
return np.moveaxis(train_x, source, destination)
def expand_input_data(self, train_x):
"""Converts to the form where each time series is predicted separately, though companion time series are
included as auxilliary features
:param nparray train_x: [samples; series ; time; features]
:return: nparray The expanded training dataset, still in the format [samples; series ; time; features]
"""
n_samples = train_x.shape[0]
n_series = train_x.shape[1]
n_timesteps = train_x.shape[2]
n_features = train_x.shape[3]
n_expanded_samples = n_samples * n_series
logger.info("Data found to hold {} samples, {} series, {} timesteps, {} features.".format(
n_samples, n_series, n_timesteps, n_features))
target_shape = [n_expanded_samples, self._n_input_series, n_timesteps, n_features]
found_duplicates = False
if self._n_input_series == 1:
corr_train_x = train_x.reshape(target_shape)
else:
corr_train_x = np.zeros(shape=target_shape)
for sample in range(n_samples):
# Series ordering may differ between batches - so we need the correlations for each batch
data_sample = train_x[sample, :, :, FEATURE_TO_RANK_CORRELATIONS]
neg_correlation_matrix = - np.corrcoef(data_sample, rowvar=False) # False since col represents a var
correlation_indices = neg_correlation_matrix.argsort(axis=1) # Sort negatives to get descending order
for series_index in range(n_series):
if correlation_indices[series_index, [0]] != series_index:
found_duplicates = True
sample_number = sample * n_series + series_index
for i in range(self._n_input_series):
corr_series_index = correlation_indices[series_index, i]
corr_train_x[sample_number, :, i] = train_x[sample, :, corr_series_index]
if found_duplicates:
logger.warning('Some NaNs or duplicate series were found in the data')
return corr_train_x
def initialise_topology(self, n_timesteps):
""" Set up the network topology based upon the configuration file, and shape of input data. """
layer_heights = self._configuration['layer_heights']
layer_widths = self._configuration['layer_widths']
layer_depths = np.ones(len(layer_heights), dtype=np.int)
default_layer_types = ['full'] * len(layer_heights)
layer_types = self._configuration.get('layer_types', default_layer_types)
# Override input layer to match data
layer_depths[0] = 1 # n input series currently fixed to 1
layer_heights[0] = n_timesteps
layer_widths[0] = self._n_features
# Setup convolutional layer configuration
conv_config = {}
conv_config["kernel_size"] = self._configuration.get('kernel_size', DEFAULT_CONV_KERNEL_SIZE)
conv_config["n_kernels"] = self._configuration.get('n_kernels', DEFAULT_N_CONV_FILTERS)
conv_config["dilation_rates"] = self._configuration.get('dilation_rates', 1)
conv_config["strides"] = self._configuration.get('strides', 1)
self._topology = tp.Topology(
n_timesteps=n_timesteps,
n_forecasts=self._n_forecasts,
n_classification_bins=self._configuration['n_classification_bins'],
layer_heights=layer_heights,
layer_widths=layer_widths,
layer_depths=layer_depths,
layer_types=layer_types,
activation_functions=self._configuration['activation_functions'],
n_features=self._n_features,
conv_config=conv_config
)
def _extract_target_feature(self, feature_list):
for feature in feature_list:
if feature['is_target']:
return feature['name']
raise ValueError("You must specify at least one target feature")
def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):
"""
Filters the dataframes inside the dict, returning a new dict with only the columns
available in the universe for that particular date
:param data: dict of dataframes
:type data: dict
:param current_timestamp: the current timestamp
:type datetime.datetime
:param universe: dataframe containing mapping of data -> list of assets
:type universe: pd.DataFrame
:return: dict of pd.DataFrame
:rtype dict
"""
current_date = current_timestamp.date()
assets = []
for idx, row in universe.iterrows():
if row.start_date <= current_date <= row.end_date:
assets = row.assets
break
filtered = {}
for feature, df in data.items():
filtered[feature] = df.drop(df.columns.difference(assets), axis=1)
return filtered
class OraclePrediction:
def __init__(self, mean_forecast, lower_bound, upper_bound, current_timestamp):
""" Container for the oracle predictions.
:param mean_forecast: Prediction values for various series at various times
:type mean_forecast: pd.DataFrame
:param lower_bound: Lower edge of the requested confidence interval
:type lower_bound: pd.DataFrame
:param upper_bound: Upper edge of the requested confidence interval
:type upper_bound: pd.DataFrame
:param current_timestamp: Timestamp when the prediction was made
:type target_timestamp: datetime
"""
self.mean_forecast = mean_forecast
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.current_timestamp = current_timestamp
def __repr__(self):
return "<Oracle prediction: {}>".format(self.__dict__) | reorder_input_dimensions | identifier_name |
oracle.py | # Trains the network then uses it to make predictions
# Also transforms the data before and after the predictions are made
# A fairly generic interface, in that it can easily applied to other models
import logging
from timeit import default_timer as timer
from copy import deepcopy
from datetime import timedelta
import numpy as np
import pandas as pd
from alphai_feature_generation.cleaning import resample_ohlcv, fill_gaps
from alphai_feature_generation.transformation import GymDataTransformation
from alphai_time_series.transform import gaussianise
from alphai_delphi.oracle import AbstractOracle
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.data.providers import TrainDataProvider
import alphai_cromulon_oracle.cromulon.train as cromulon
import alphai_cromulon_oracle.cromulon.evaluate as cromulon_eval
from alphai_cromulon_oracle.flags import build_tensorflow_flags
import alphai_cromulon_oracle.topology as tp
from alphai_cromulon_oracle import DATETIME_FORMAT_COMPACT
from alphai_cromulon_oracle.helpers import TrainFileManager, logtime
NETWORK_NAME = 'cromulon'
CLIP_VALUE = 5.0 # Largest number allowed to enter the network
DEFAULT_N_CORRELATED_SERIES = 1
DEFAULT_N_CONV_FILTERS = 32
DEFAULT_CONV_KERNEL_SIZE = [3, 3]
FEATURE_TO_RANK_CORRELATIONS = 0 # Use the first feature to form correlation coefficients
TRAIN_FILE_NAME_TEMPLATE = "{}_train_" + NETWORK_NAME
logger = logging.getLogger(__name__)
class CromulonOracle(AbstractOracle):
def _sanity_check(self):
pass
def global_transform(self, data):
transformed_data = self._data_transformation.apply_global_transformations(data)
return transformed_data
def resample(self, data):
resampled_raw_data = resample_ohlcv(data, "{}T".format(self._data_transformation.features_resample_minutes))
return resampled_raw_data
def fill_nan(self, data):
filled_data = fill_gaps(data, self._data_transformation.fill_limit, dropna=True)
return filled_data
def save(self):
pass
@property
def target_feature(self):
return self._target_feature
def load(self):
pass
def get_universe(self):
pass
def __init__(self, config):
"""
:param configuration: Dictionary containing all the parameters. Full specifications can be found at:
oracle-cromulon-python/docs/cromulon_options.md
"""
super().__init__(config)
logger.info('Initialising Cromulon Oracle.')
self.config = self.update_configuration(self.config)
self._init_data_transformation()
self._train_path = self.config['train_path']
n_correlated_series = self.config.get('n_correlated_series', DEFAULT_N_CORRELATED_SERIES)
self._configuration = self.config
self._init_train_file_manager()
self._tensorflow_flags = build_tensorflow_flags(self.config) # Perhaps use separate config dict here?
if self._tensorflow_flags.predict_single_shares:
self._n_input_series = int(np.minimum(n_correlated_series, self.config['n_series'])) |
self._topology = None
def _init_train_file_manager(self):
self._train_file_manager = TrainFileManager(
self._train_path,
TRAIN_FILE_NAME_TEMPLATE,
DATETIME_FORMAT_COMPACT
)
self._train_file_manager.ensure_path_exists()
def _init_data_transformation(self):
data_transformation_config = self.config['data_transformation']
self._feature_list = data_transformation_config['feature_config_list']
self._n_features = len(self._feature_list)
data_transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps)
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
return TRAIN_FILE_NAME_TEMPLATE
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data):
testy = deepcopy(y_data)
self.print_verification_report(testy, 'Y_data')
def verify_x_data(self, x_data):
"""Check for nans or crazy numbers.
"""
testx = deepcopy(x_data).flatten()
xmin, xmax = self.print_verification_report(testx, 'X_data')
if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:
n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))
n_elements = len(testx)
x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)
logger.warning("Large inputs detected: clip values exceeding {}".format(CLIP_VALUE))
logger.info("{} of {} elements were clipped.".format(n_clipped_elements, n_elements))
return x_data
def update_configuration(self, config):
""" Pass on some config entries to data_transformation"""
config["data_transformation"]["n_classification_bins"] = config["n_classification_bins"]
config["data_transformation"]["nassets"] = config["nassets"]
config["data_transformation"]["classify_per_series"] = config["classify_per_series"]
config["data_transformation"]["normalise_per_series"] = config["normalise_per_series"]
return config
def _preprocess_inputs(self, train_x_dict):
""" Prepare training data to be fed into Cromulon. """
numpy_arrays = []
for key, value in train_x_dict.items():
numpy_arrays.append(value)
logger.info("Appending feature of shape {}".format(value.shape))
# Currently train_x will have dimensions [features; samples; timesteps; symbols]
train_x = np.stack(numpy_arrays, axis=0)
train_x = self.reorder_input_dimensions(train_x)
# Expand dataset if requested
if self._tensorflow_flags.predict_single_shares:
train_x = self.expand_input_data(train_x)
train_x = self.verify_x_data(train_x)
return train_x.astype(np.float32) # FIXME: set float32 in data transform, conditional on config file
def _preprocess_outputs(self, train_y_dict):
train_y = list(train_y_dict.values())[0]
train_y = np.swapaxes(train_y, axis1=1, axis2=2)
if self._tensorflow_flags.predict_single_shares:
n_feat_y = train_y.shape[2]
train_y = np.reshape(train_y, [-1, 1, 1, n_feat_y])
self.verify_y_data(train_y)
return train_y.astype(np.float32) # FIXME:set float32 in data transform, conditional on config file
def gaussianise_series(self, train_x):
""" Gaussianise each series within each batch - but don't normalise means
:param nparray train_x: Series in format [batches, features, series]. NB ensure all features
are of the same kind
:return: nparray The same data but now each series is gaussianised
"""
n_batches = train_x.shape[0]
for batch in range(n_batches):
train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)
return train_x
def reorder_input_dimensions(self, train_x):
""" Reassign ordering of dimensions.
:param train_x: Enters with dimensions [features; samples; timesteps; series]
:return: train_x Now with dimensions [samples; series ; time; features]
"""
source = [0, 1, 2, 3]
destination = [3, 0, 2, 1]
return np.moveaxis(train_x, source, destination)
def expand_input_data(self, train_x):
"""Converts to the form where each time series is predicted separately, though companion time series are
included as auxilliary features
:param nparray train_x: [samples; series ; time; features]
:return: nparray The expanded training dataset, still in the format [samples; series ; time; features]
"""
n_samples = train_x.shape[0]
n_series = train_x.shape[1]
n_timesteps = train_x.shape[2]
n_features = train_x.shape[3]
n_expanded_samples = n_samples * n_series
logger.info("Data found to hold {} samples, {} series, {} timesteps, {} features.".format(
n_samples, n_series, n_timesteps, n_features))
target_shape = [n_expanded_samples, self._n_input_series, n_timesteps, n_features]
found_duplicates = False
if self._n_input_series == 1:
corr_train_x = train_x.reshape(target_shape)
else:
corr_train_x = np.zeros(shape=target_shape)
for sample in range(n_samples):
# Series ordering may differ between batches - so we need the correlations for each batch
data_sample = train_x[sample, :, :, FEATURE_TO_RANK_CORRELATIONS]
neg_correlation_matrix = - np.corrcoef(data_sample, rowvar=False) # False since col represents a var
correlation_indices = neg_correlation_matrix.argsort(axis=1) # Sort negatives to get descending order
for series_index in range(n_series):
if correlation_indices[series_index, [0]] != series_index:
found_duplicates = True
sample_number = sample * n_series + series_index
for i in range(self._n_input_series):
corr_series_index = correlation_indices[series_index, i]
corr_train_x[sample_number, :, i] = train_x[sample, :, corr_series_index]
if found_duplicates:
logger.warning('Some NaNs or duplicate series were found in the data')
return corr_train_x
def initialise_topology(self, n_timesteps):
""" Set up the network topology based upon the configuration file, and shape of input data. """
layer_heights = self._configuration['layer_heights']
layer_widths = self._configuration['layer_widths']
layer_depths = np.ones(len(layer_heights), dtype=np.int)
default_layer_types = ['full'] * len(layer_heights)
layer_types = self._configuration.get('layer_types', default_layer_types)
# Override input layer to match data
layer_depths[0] = 1 # n input series currently fixed to 1
layer_heights[0] = n_timesteps
layer_widths[0] = self._n_features
# Setup convolutional layer configuration
conv_config = {}
conv_config["kernel_size"] = self._configuration.get('kernel_size', DEFAULT_CONV_KERNEL_SIZE)
conv_config["n_kernels"] = self._configuration.get('n_kernels', DEFAULT_N_CONV_FILTERS)
conv_config["dilation_rates"] = self._configuration.get('dilation_rates', 1)
conv_config["strides"] = self._configuration.get('strides', 1)
self._topology = tp.Topology(
n_timesteps=n_timesteps,
n_forecasts=self._n_forecasts,
n_classification_bins=self._configuration['n_classification_bins'],
layer_heights=layer_heights,
layer_widths=layer_widths,
layer_depths=layer_depths,
layer_types=layer_types,
activation_functions=self._configuration['activation_functions'],
n_features=self._n_features,
conv_config=conv_config
)
def _extract_target_feature(self, feature_list):
for feature in feature_list:
if feature['is_target']:
return feature['name']
raise ValueError("You must specify at least one target feature")
def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):
"""
Filters the dataframes inside the dict, returning a new dict with only the columns
available in the universe for that particular date
:param data: dict of dataframes
:type data: dict
:param current_timestamp: the current timestamp
:type datetime.datetime
:param universe: dataframe containing mapping of data -> list of assets
:type universe: pd.DataFrame
:return: dict of pd.DataFrame
:rtype dict
"""
current_date = current_timestamp.date()
assets = []
for idx, row in universe.iterrows():
if row.start_date <= current_date <= row.end_date:
assets = row.assets
break
filtered = {}
for feature, df in data.items():
filtered[feature] = df.drop(df.columns.difference(assets), axis=1)
return filtered
class OraclePrediction:
def __init__(self, mean_forecast, lower_bound, upper_bound, current_timestamp):
""" Container for the oracle predictions.
:param mean_forecast: Prediction values for various series at various times
:type mean_forecast: pd.DataFrame
:param lower_bound: Lower edge of the requested confidence interval
:type lower_bound: pd.DataFrame
:param upper_bound: Upper edge of the requested confidence interval
:type upper_bound: pd.DataFrame
:param current_timestamp: Timestamp when the prediction was made
:type target_timestamp: datetime
"""
self.mean_forecast = mean_forecast
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.current_timestamp = current_timestamp
def __repr__(self):
return "<Oracle prediction: {}>".format(self.__dict__) | self._n_forecasts = 1
else:
self._n_input_series = self.config['n_series']
self._n_forecasts = self.config['n_forecasts'] | random_line_split |
context.ts | /*
* Copyright 2018 Nazmul Idris All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/// <reference path="../../../typings/globals/node/index.d.ts" />
import {action_set_state_data, action_set_state_user} from "./actions";
const GLOBAL_CONSTANTS = require('../../global/constants').GLOBAL_CONSTANTS;
const LOGGING_ENABLED = require('../../global/constants').LOGGING_ENABLED;
import {customMiddleware, add_todo_item, toggle_todo_item} from "./mymiddlewares";
import {UserIF, DataIF, ReduxStateIF} from "./interfaces";
import {createStore, applyMiddleware, compose} from 'redux';
import * as reducers from './reducers';
import * as actions from './actions';
import * as persistence from './firebase';
import * as presence from './presence';
const lodash = require('lodash');
const events = require('events');
const uuid = require('node-uuid');
/**
* this holds the app's state which is comprised of:
* 1) user object: UserIF
* 2) data for the user: DataIF
*
* any time this user or data is modified, it emits events to notify any listeners
* that are interested in listening to these changes via:
* 1) LE_SET_USER
* 2) LE_SET_DATA
*/
class ApplicationContext {
public sessionId;
public socket;
public firebase;
public eventEmitter;
public reduxStore;
constructor() {
// init redux reduxStore
this.initReduxStore();
// init firebase
this.initFirebase();
// setup websocket (used for group chat)
this.initSocket();
// unique session id
this.sessionId = uuid.v4();
// create event emitter
this.initEventEmitter();
// setup firebase auth
persistence.initAuth(this);
// setup firebase presence
presence.initPresence(this);
}
isProduction() {
const hostname = window.location.hostname;
if (!lodash.isEqual(hostname, "localhost")) {
// prod app
return true;
} else {
// dev app
return false;
}
}
isDevelopment() {
return !this.isProduction();
}
/**
* this generates a different URL depending on whether the code is running on
* localhost or not.
* DEV - If it's running in localhost, then it understands this to be
* the dev environment and it tries to connect to "localhost:8080".
* PROD - If it's NOT running in localhost, then it understands this to be the
* production environment and tries to connect to "/".
* @returns {string}
*/
getSocketURL() {
let socketURL = "http://localhost:8080";
if (this.isProduction()) |
return socketURL;
}
/**
* this sets up the socket object for use by this context
*/
initSocket() {
let io = require("socket.io-client");
this.socket = new io.connect(this.getSocketURL());
}
/**
* to access the socket for this context use this method ... you can emit()
* using it, and you can attach on() listeners to this as well ... if you attach
* listeners, it's up to you to remove them from the socket when they're no longer
* needed. This class will NOT do the cleanup for you.
* @returns {io.connect|*}
*/
getSocket() {
return this.socket;
}
/**
* this returns an ephermeral session id for this session ... will change every
* time this session is restarted (ApplicationContext is created).
* @returns {string|*}
*/
getSessionId() {
return this.sessionId;
}
/**
* is true if the user object is set, and it contains a uid field.
* you can get the user object from getUser()
* you can get the uid from getUserId()
* @returns {boolean}
*/
isUserSet() {
try {
if (!lodash.isNil(this.getUser())) {
if (!lodash.isNil(this.getUserId())) {
return true;
}
}
return false;
} catch (err) {
return false;
}
}
/**
* get a reference to the saved user object
* @returns {UserIF}
*/
getUser() {
try {
return this.getReduxState().user;
} catch (err) {
return null;
}
}
/** gets the uid field of the userObject */
getUserId() {
try {
return this.getUser().uid;
} catch (err) {
return null;
}
}
/**
* get a reference to the saved data object
* @returns {DataIF}
*/
getData(): DataIF {
return this.getReduxState().data;
}
/** this tells firebase to start sign-in using Google (vs anon auth) */
forceSignIn() {
persistence.forceSignIn(this);
}
/** this tells firebase to initiate sign-out (of users who came in thru any
* auth providers - Google and anon) */
forceSignOut() {
persistence.forceSignOut(this);
}
/** setup the internal firebase object */
initFirebase() {
this.firebase = require("firebase");
const config = require('../../global/constants').FIREBASE_CONFIG;
this.firebase.initializeApp(config);
}
/**
* get a ref to the firebase instance
* @returns {firebase|*}
*/
getFirebase() {
return this.firebase;
}
/** this is a convenience method that allows you to get the firebase server
* timestamp object
*/
getFirebaseServerTimestampObject() {
return this.firebase.database.ServerValue.TIMESTAMP
}
/**
* get a ref to the firebase.database() instance
* @returns {*|firebase.database.Database|!firebase.database.Database}
*/
getDatabase() {
return this.firebase.database();
}
/** creates the event emitter */
initEventEmitter() {
this.eventEmitter = new events.EventEmitter();
}
/** disconnect the socket connection */
disconnectSocket() {
this.socket.disconnect();
}
/** convenience method to emit an event to the server */
emitToServer(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emitToServer: eventName ${eventName} fired`);
console.dir(payload);
}
this.socket.emit(eventName, payload);
}
/** convenience method to emit an event */
emit(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emit: eventName ${eventName} fired`);
console.dir(payload);
}
this.eventEmitter.emit(eventName, payload);
}
/** convenience method to listen to event
* @returns the listener that is passed as param
*/
addListener(eventName, listener) {
function logging_listener() {
if (LOGGING_ENABLED) {
console.log(`listener: for eventName ${eventName} responding`);
}
listener.apply(this, arguments);
}
this.eventEmitter.addListener(
eventName, logging_listener
);
return logging_listener;
}
/** convenience method to remove listener for event */
removeListener(eventName, listener) {
this.eventEmitter.removeListener(eventName, listener);
}
/**
* initialize the redux store and get the actions and reducers wired up to it
* this also tests to see if the browser is inDevelopment and if so, it will try and
* use the Redux Chrome Dev Tools Extension.
*/
initReduxStore() {
try {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const middlewares = [
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__(),
add_todo_item,
toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
composeEnhancers(middlewareEnhancer)
);
} catch (e) {
const middlewares = [add_todo_item, toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
middlewareEnhancer
);
}
// explicitly INIT Redux!
this.reduxStore.dispatch(actions.action_init());
/**
* this enables the use of redux dev tools in Chrome if you have the
* Chrome extension installed - https://goo.gl/xU4D6P
*/
// let USE_REDUX_DEVTOOLS = this.isDevelopment();
// create redux reduxStore
// if (USE_REDUX_DEVTOOLS) {
// // the following line uses chrome devtools redux plugin
// this.reduxStore = createStore(
// reducers.reducer_main,
// null,
// window.devToolsExtension && window.devToolsExtension()
// );
// }
// else {
// this.reduxStore = createStore(
// reducers.reducer_main,
// null
// );
// }
}
/**
* get a reference to the redux store
* @returns {any}
*/
getReduxStore() {
return this.reduxStore;
}
/**
* get a reference to the redux state
* @returns {S}
*/
getReduxState(): ReduxStateIF {
return this.reduxStore.getState();
}
}
function _dispatchAction(action, ctx) {
persistence.actuallyDispathAction(action, ctx);
}
function _bindActionCreator(actionCreator, dispatch, ctx) {
return function () {
return _dispatchAction(actionCreator.apply(undefined, arguments), ctx);
};
}
function bindActionCreators(actionCreators, dispatch, ctx) {
if (typeof actionCreators === 'function') {
return _bindActionCreator(actionCreators, dispatch, ctx);
}
if (typeof actionCreators !== 'object' || actionCreators === null) {
throw new Error(
'bindActionCreators expected an object or a function, instead received ' +
(actionCreators === null ? 'null' : typeof actionCreators) + '. ' +
'Did you write "import actions from" instead of "import * as' +
' actions from"?'
);
}
var keys = Object.keys(actionCreators);
var boundActionCreators = {};
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
var actionCreator = actionCreators[key];
if (typeof actionCreator === 'function') {
boundActionCreators[key] = _bindActionCreator(actionCreator, dispatch, ctx);
}
}
return boundActionCreators;
}
/** create a singleton that will be used everywhere in the project */
const applicationContext = new ApplicationContext();
/** export the singleton */
export {applicationContext, bindActionCreators} | {
socketURL = "/";
} | conditional_block |
context.ts | /*
* Copyright 2018 Nazmul Idris All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/// <reference path="../../../typings/globals/node/index.d.ts" />
import {action_set_state_data, action_set_state_user} from "./actions";
const GLOBAL_CONSTANTS = require('../../global/constants').GLOBAL_CONSTANTS;
const LOGGING_ENABLED = require('../../global/constants').LOGGING_ENABLED;
import {customMiddleware, add_todo_item, toggle_todo_item} from "./mymiddlewares";
import {UserIF, DataIF, ReduxStateIF} from "./interfaces";
import {createStore, applyMiddleware, compose} from 'redux';
import * as reducers from './reducers';
import * as actions from './actions';
import * as persistence from './firebase';
import * as presence from './presence';
const lodash = require('lodash');
const events = require('events');
const uuid = require('node-uuid');
/**
* this holds the app's state which is comprised of:
* 1) user object: UserIF
* 2) data for the user: DataIF
*
* any time this user or data is modified, it emits events to notify any listeners
* that are interested in listening to these changes via:
* 1) LE_SET_USER
* 2) LE_SET_DATA
*/
class ApplicationContext {
public sessionId;
public socket;
public firebase;
public eventEmitter;
public reduxStore;
constructor() {
// init redux reduxStore
this.initReduxStore();
// init firebase
this.initFirebase();
// setup websocket (used for group chat)
this.initSocket();
// unique session id
this.sessionId = uuid.v4();
// create event emitter
this.initEventEmitter();
// setup firebase auth
persistence.initAuth(this);
// setup firebase presence
presence.initPresence(this);
}
isProduction() {
const hostname = window.location.hostname;
if (!lodash.isEqual(hostname, "localhost")) {
// prod app
return true;
} else {
// dev app
return false;
}
}
isDevelopment() {
return !this.isProduction();
}
/**
* this generates a different URL depending on whether the code is running on
* localhost or not.
* DEV - If it's running in localhost, then it understands this to be
* the dev environment and it tries to connect to "localhost:8080".
* PROD - If it's NOT running in localhost, then it understands this to be the
* production environment and tries to connect to "/".
* @returns {string}
*/
getSocketURL() {
let socketURL = "http://localhost:8080";
if (this.isProduction()) {
socketURL = "/";
}
return socketURL;
}
/**
* this sets up the socket object for use by this context
*/
initSocket() {
let io = require("socket.io-client");
this.socket = new io.connect(this.getSocketURL());
}
/**
* to access the socket for this context use this method ... you can emit()
* using it, and you can attach on() listeners to this as well ... if you attach
* listeners, it's up to you to remove them from the socket when they're no longer
* needed. This class will NOT do the cleanup for you.
* @returns {io.connect|*}
*/
getSocket() {
return this.socket;
}
/**
* this returns an ephermeral session id for this session ... will change every
* time this session is restarted (ApplicationContext is created).
* @returns {string|*}
*/
getSessionId() {
return this.sessionId;
}
/**
* is true if the user object is set, and it contains a uid field.
* you can get the user object from getUser()
* you can get the uid from getUserId()
* @returns {boolean}
*/
isUserSet() {
try {
if (!lodash.isNil(this.getUser())) {
if (!lodash.isNil(this.getUserId())) {
return true;
}
}
return false;
} catch (err) {
return false;
}
}
/**
* get a reference to the saved user object
* @returns {UserIF}
*/
| () {
try {
return this.getReduxState().user;
} catch (err) {
return null;
}
}
/** gets the uid field of the userObject */
getUserId() {
try {
return this.getUser().uid;
} catch (err) {
return null;
}
}
/**
* get a reference to the saved data object
* @returns {DataIF}
*/
getData(): DataIF {
return this.getReduxState().data;
}
/** this tells firebase to start sign-in using Google (vs anon auth) */
forceSignIn() {
persistence.forceSignIn(this);
}
/** this tells firebase to initiate sign-out (of users who came in thru any
* auth providers - Google and anon) */
forceSignOut() {
persistence.forceSignOut(this);
}
/** setup the internal firebase object */
initFirebase() {
this.firebase = require("firebase");
const config = require('../../global/constants').FIREBASE_CONFIG;
this.firebase.initializeApp(config);
}
/**
* get a ref to the firebase instance
* @returns {firebase|*}
*/
getFirebase() {
return this.firebase;
}
/** this is a convenience method that allows you to get the firebase server
* timestamp object
*/
getFirebaseServerTimestampObject() {
return this.firebase.database.ServerValue.TIMESTAMP
}
/**
* get a ref to the firebase.database() instance
* @returns {*|firebase.database.Database|!firebase.database.Database}
*/
getDatabase() {
return this.firebase.database();
}
/** creates the event emitter */
initEventEmitter() {
this.eventEmitter = new events.EventEmitter();
}
/** disconnect the socket connection */
disconnectSocket() {
this.socket.disconnect();
}
/** convenience method to emit an event to the server */
emitToServer(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emitToServer: eventName ${eventName} fired`);
console.dir(payload);
}
this.socket.emit(eventName, payload);
}
/** convenience method to emit an event */
emit(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emit: eventName ${eventName} fired`);
console.dir(payload);
}
this.eventEmitter.emit(eventName, payload);
}
/** convenience method to listen to event
* @returns the listener that is passed as param
*/
addListener(eventName, listener) {
function logging_listener() {
if (LOGGING_ENABLED) {
console.log(`listener: for eventName ${eventName} responding`);
}
listener.apply(this, arguments);
}
this.eventEmitter.addListener(
eventName, logging_listener
);
return logging_listener;
}
/** convenience method to remove listener for event */
removeListener(eventName, listener) {
this.eventEmitter.removeListener(eventName, listener);
}
/**
* initialize the redux store and get the actions and reducers wired up to it
* this also tests to see if the browser is inDevelopment and if so, it will try and
* use the Redux Chrome Dev Tools Extension.
*/
initReduxStore() {
try {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const middlewares = [
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__(),
add_todo_item,
toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
composeEnhancers(middlewareEnhancer)
);
} catch (e) {
const middlewares = [add_todo_item, toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
middlewareEnhancer
);
}
// explicitly INIT Redux!
this.reduxStore.dispatch(actions.action_init());
/**
* this enables the use of redux dev tools in Chrome if you have the
* Chrome extension installed - https://goo.gl/xU4D6P
*/
// let USE_REDUX_DEVTOOLS = this.isDevelopment();
// create redux reduxStore
// if (USE_REDUX_DEVTOOLS) {
// // the following line uses chrome devtools redux plugin
// this.reduxStore = createStore(
// reducers.reducer_main,
// null,
// window.devToolsExtension && window.devToolsExtension()
// );
// }
// else {
// this.reduxStore = createStore(
// reducers.reducer_main,
// null
// );
// }
}
/**
* get a reference to the redux store
* @returns {any}
*/
getReduxStore() {
return this.reduxStore;
}
/**
* get a reference to the redux state
* @returns {S}
*/
getReduxState(): ReduxStateIF {
return this.reduxStore.getState();
}
}
function _dispatchAction(action, ctx) {
persistence.actuallyDispathAction(action, ctx);
}
function _bindActionCreator(actionCreator, dispatch, ctx) {
return function () {
return _dispatchAction(actionCreator.apply(undefined, arguments), ctx);
};
}
function bindActionCreators(actionCreators, dispatch, ctx) {
if (typeof actionCreators === 'function') {
return _bindActionCreator(actionCreators, dispatch, ctx);
}
if (typeof actionCreators !== 'object' || actionCreators === null) {
throw new Error(
'bindActionCreators expected an object or a function, instead received ' +
(actionCreators === null ? 'null' : typeof actionCreators) + '. ' +
'Did you write "import actions from" instead of "import * as' +
' actions from"?'
);
}
var keys = Object.keys(actionCreators);
var boundActionCreators = {};
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
var actionCreator = actionCreators[key];
if (typeof actionCreator === 'function') {
boundActionCreators[key] = _bindActionCreator(actionCreator, dispatch, ctx);
}
}
return boundActionCreators;
}
/** create a singleton that will be used everywhere in the project */
const applicationContext = new ApplicationContext();
/** export the singleton */
export {applicationContext, bindActionCreators} | getUser | identifier_name |
context.ts | /*
* Copyright 2018 Nazmul Idris All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/// <reference path="../../../typings/globals/node/index.d.ts" />
import {action_set_state_data, action_set_state_user} from "./actions";
const GLOBAL_CONSTANTS = require('../../global/constants').GLOBAL_CONSTANTS;
const LOGGING_ENABLED = require('../../global/constants').LOGGING_ENABLED;
import {customMiddleware, add_todo_item, toggle_todo_item} from "./mymiddlewares";
import {UserIF, DataIF, ReduxStateIF} from "./interfaces";
import {createStore, applyMiddleware, compose} from 'redux';
import * as reducers from './reducers';
import * as actions from './actions';
import * as persistence from './firebase';
import * as presence from './presence';
const lodash = require('lodash');
const events = require('events');
const uuid = require('node-uuid');
/**
* this holds the app's state which is comprised of:
* 1) user object: UserIF
* 2) data for the user: DataIF
*
* any time this user or data is modified, it emits events to notify any listeners
* that are interested in listening to these changes via:
* 1) LE_SET_USER
* 2) LE_SET_DATA
*/
class ApplicationContext {
public sessionId;
public socket;
public firebase;
public eventEmitter;
public reduxStore;
constructor() {
// init redux reduxStore
this.initReduxStore();
// init firebase
this.initFirebase();
// setup websocket (used for group chat)
this.initSocket();
// unique session id
this.sessionId = uuid.v4();
// create event emitter
this.initEventEmitter();
// setup firebase auth
persistence.initAuth(this);
// setup firebase presence
presence.initPresence(this);
}
isProduction() {
const hostname = window.location.hostname;
if (!lodash.isEqual(hostname, "localhost")) {
// prod app
return true;
} else {
// dev app
return false;
}
}
isDevelopment() {
return !this.isProduction();
}
/**
* this generates a different URL depending on whether the code is running on
* localhost or not. | * @returns {string}
*/
getSocketURL() {
let socketURL = "http://localhost:8080";
if (this.isProduction()) {
socketURL = "/";
}
return socketURL;
}
/**
* this sets up the socket object for use by this context
*/
initSocket() {
let io = require("socket.io-client");
this.socket = new io.connect(this.getSocketURL());
}
/**
* to access the socket for this context use this method ... you can emit()
* using it, and you can attach on() listeners to this as well ... if you attach
* listeners, it's up to you to remove them from the socket when they're no longer
* needed. This class will NOT do the cleanup for you.
* @returns {io.connect|*}
*/
getSocket() {
return this.socket;
}
/**
* this returns an ephermeral session id for this session ... will change every
* time this session is restarted (ApplicationContext is created).
* @returns {string|*}
*/
getSessionId() {
return this.sessionId;
}
/**
* is true if the user object is set, and it contains a uid field.
* you can get the user object from getUser()
* you can get the uid from getUserId()
* @returns {boolean}
*/
isUserSet() {
try {
if (!lodash.isNil(this.getUser())) {
if (!lodash.isNil(this.getUserId())) {
return true;
}
}
return false;
} catch (err) {
return false;
}
}
/**
* get a reference to the saved user object
* @returns {UserIF}
*/
getUser() {
try {
return this.getReduxState().user;
} catch (err) {
return null;
}
}
/** gets the uid field of the userObject */
getUserId() {
try {
return this.getUser().uid;
} catch (err) {
return null;
}
}
/**
* get a reference to the saved data object
* @returns {DataIF}
*/
getData(): DataIF {
return this.getReduxState().data;
}
/** this tells firebase to start sign-in using Google (vs anon auth) */
forceSignIn() {
persistence.forceSignIn(this);
}
/** this tells firebase to initiate sign-out (of users who came in thru any
* auth providers - Google and anon) */
forceSignOut() {
persistence.forceSignOut(this);
}
/** setup the internal firebase object */
initFirebase() {
this.firebase = require("firebase");
const config = require('../../global/constants').FIREBASE_CONFIG;
this.firebase.initializeApp(config);
}
/**
* get a ref to the firebase instance
* @returns {firebase|*}
*/
getFirebase() {
return this.firebase;
}
/** this is a convenience method that allows you to get the firebase server
* timestamp object
*/
getFirebaseServerTimestampObject() {
return this.firebase.database.ServerValue.TIMESTAMP
}
/**
* get a ref to the firebase.database() instance
* @returns {*|firebase.database.Database|!firebase.database.Database}
*/
getDatabase() {
return this.firebase.database();
}
/** creates the event emitter */
initEventEmitter() {
this.eventEmitter = new events.EventEmitter();
}
/** disconnect the socket connection */
disconnectSocket() {
this.socket.disconnect();
}
/** convenience method to emit an event to the server */
emitToServer(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emitToServer: eventName ${eventName} fired`);
console.dir(payload);
}
this.socket.emit(eventName, payload);
}
/** convenience method to emit an event */
emit(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emit: eventName ${eventName} fired`);
console.dir(payload);
}
this.eventEmitter.emit(eventName, payload);
}
/** convenience method to listen to event
* @returns the listener that is passed as param
*/
addListener(eventName, listener) {
function logging_listener() {
if (LOGGING_ENABLED) {
console.log(`listener: for eventName ${eventName} responding`);
}
listener.apply(this, arguments);
}
this.eventEmitter.addListener(
eventName, logging_listener
);
return logging_listener;
}
/** convenience method to remove listener for event */
removeListener(eventName, listener) {
this.eventEmitter.removeListener(eventName, listener);
}
/**
* initialize the redux store and get the actions and reducers wired up to it
* this also tests to see if the browser is inDevelopment and if so, it will try and
* use the Redux Chrome Dev Tools Extension.
*/
initReduxStore() {
try {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const middlewares = [
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__(),
add_todo_item,
toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
composeEnhancers(middlewareEnhancer)
);
} catch (e) {
const middlewares = [add_todo_item, toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
middlewareEnhancer
);
}
// explicitly INIT Redux!
this.reduxStore.dispatch(actions.action_init());
/**
* this enables the use of redux dev tools in Chrome if you have the
* Chrome extension installed - https://goo.gl/xU4D6P
*/
// let USE_REDUX_DEVTOOLS = this.isDevelopment();
// create redux reduxStore
// if (USE_REDUX_DEVTOOLS) {
// // the following line uses chrome devtools redux plugin
// this.reduxStore = createStore(
// reducers.reducer_main,
// null,
// window.devToolsExtension && window.devToolsExtension()
// );
// }
// else {
// this.reduxStore = createStore(
// reducers.reducer_main,
// null
// );
// }
}
/**
* get a reference to the redux store
* @returns {any}
*/
getReduxStore() {
return this.reduxStore;
}
/**
* get a reference to the redux state
* @returns {S}
*/
getReduxState(): ReduxStateIF {
return this.reduxStore.getState();
}
}
function _dispatchAction(action, ctx) {
persistence.actuallyDispathAction(action, ctx);
}
function _bindActionCreator(actionCreator, dispatch, ctx) {
return function () {
return _dispatchAction(actionCreator.apply(undefined, arguments), ctx);
};
}
function bindActionCreators(actionCreators, dispatch, ctx) {
if (typeof actionCreators === 'function') {
return _bindActionCreator(actionCreators, dispatch, ctx);
}
if (typeof actionCreators !== 'object' || actionCreators === null) {
throw new Error(
'bindActionCreators expected an object or a function, instead received ' +
(actionCreators === null ? 'null' : typeof actionCreators) + '. ' +
'Did you write "import actions from" instead of "import * as' +
' actions from"?'
);
}
var keys = Object.keys(actionCreators);
var boundActionCreators = {};
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
var actionCreator = actionCreators[key];
if (typeof actionCreator === 'function') {
boundActionCreators[key] = _bindActionCreator(actionCreator, dispatch, ctx);
}
}
return boundActionCreators;
}
/** create a singleton that will be used everywhere in the project */
const applicationContext = new ApplicationContext();
/** export the singleton */
export {applicationContext, bindActionCreators} | * DEV - If it's running in localhost, then it understands this to be
* the dev environment and it tries to connect to "localhost:8080".
* PROD - If it's NOT running in localhost, then it understands this to be the
* production environment and tries to connect to "/". | random_line_split |
block.rs | use ::slice::Slice;
use ::errors::RubbleResult;
use ::util::coding;
use ::status::Status;
use ::comparator::SliceComparator;
use std::mem;
use std::str;
pub struct OwnedBlock {
data: Vec<u8>,
restart_offset: usize,
}
pub struct SliceBlock<'a> {
data: Slice<'a>,
restart_offset: usize,
}
pub trait Block {
fn get_size(&self) -> usize;
fn data(&self) -> Slice;
fn restart_offset(&self) -> usize;
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>;
fn num_restarts(data: Slice) -> usize
{
assert!(data.len() >= mem::size_of::<u32>());
let offset = data.len() - mem::size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
} | impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn compare(&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
{
self.value_offset + self.value_len
}
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared != 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if !self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self.parse_next_key() && self.next_entry_offset() < self.restarts {
// Keep skipping
}
}
fn corruption_error(&mut self) {
self.current = self.restarts;
self.restart_index = self.num_restarts;
self.status = Status::Corruption("bad entry in block".into());
self.key = String::new();
}
fn parse_next_key(&mut self) -> bool {
self.current = self.next_entry_offset();
let p = &self.data[self.current..];
if p.len() == 0 {
// No more entries to return. Mark as invalid.
self.current = self.restarts;
self.restart_index = self.num_restarts;
return false;
}
let entry = match decode_entry(p) {
Ok(p) => p,
_ => {
self.corruption_error();
return false;
}
};
if self.key.len() < entry.shared as usize {
self.corruption_error();
return false;
}
self.key = str::from_utf8(&entry.new_slice[..entry.non_shared as usize])
.expect("Invalid UTF-8 key")
.to_owned();
self.value_offset = entry.non_shared as usize;
self.value_len = entry.value_length as usize;
while self.restart_index + 1 < self.num_restarts
&& self.get_restart_point(self.restart_index + 1) < self.current
{
self.restart_index += 1;
}
true
}
}
pub struct KVEntry {
key: String,
value: Vec<u8>,
}
impl<'a, T: SliceComparator> Iterator for BlockIterator<'a, T> {
// we will be counting with usize
type Item = KVEntry;
fn next(&mut self) -> Option<KVEntry> {
self.step();
match self.num_restarts {
0 => None,
_ => Some(KVEntry {
key: self.key(),
value: self.value().to_vec(),
})
}
}
} | random_line_split | |
block.rs | use ::slice::Slice;
use ::errors::RubbleResult;
use ::util::coding;
use ::status::Status;
use ::comparator::SliceComparator;
use std::mem;
use std::str;
pub struct OwnedBlock {
data: Vec<u8>,
restart_offset: usize,
}
pub struct SliceBlock<'a> {
data: Slice<'a>,
restart_offset: usize,
}
pub trait Block {
fn get_size(&self) -> usize;
fn data(&self) -> Slice;
fn restart_offset(&self) -> usize;
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>;
fn num_restarts(data: Slice) -> usize
{
assert!(data.len() >= mem::size_of::<u32>());
let offset = data.len() - mem::size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
}
impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn compare(&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
|
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared != 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if !self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self.parse_next_key() && self.next_entry_offset() < self.restarts {
// Keep skipping
}
}
fn corruption_error(&mut self) {
self.current = self.restarts;
self.restart_index = self.num_restarts;
self.status = Status::Corruption("bad entry in block".into());
self.key = String::new();
}
fn parse_next_key(&mut self) -> bool {
self.current = self.next_entry_offset();
let p = &self.data[self.current..];
if p.len() == 0 {
// No more entries to return. Mark as invalid.
self.current = self.restarts;
self.restart_index = self.num_restarts;
return false;
}
let entry = match decode_entry(p) {
Ok(p) => p,
_ => {
self.corruption_error();
return false;
}
};
if self.key.len() < entry.shared as usize {
self.corruption_error();
return false;
}
self.key = str::from_utf8(&entry.new_slice[..entry.non_shared as usize])
.expect("Invalid UTF-8 key")
.to_owned();
self.value_offset = entry.non_shared as usize;
self.value_len = entry.value_length as usize;
while self.restart_index + 1 < self.num_restarts
&& self.get_restart_point(self.restart_index + 1) < self.current
{
self.restart_index += 1;
}
true
}
}
pub struct KVEntry {
key: String,
value: Vec<u8>,
}
impl<'a, T: SliceComparator> Iterator for BlockIterator<'a, T> {
// we will be counting with usize
type Item = KVEntry;
fn next(&mut self) -> Option<KVEntry> {
self.step();
match self.num_restarts {
0 => None,
_ => Some(KVEntry {
key: self.key(),
value: self.value().to_vec(),
})
}
}
}
| {
self.value_offset + self.value_len
} | identifier_body |
block.rs | use ::slice::Slice;
use ::errors::RubbleResult;
use ::util::coding;
use ::status::Status;
use ::comparator::SliceComparator;
use std::mem;
use std::str;
pub struct OwnedBlock {
data: Vec<u8>,
restart_offset: usize,
}
pub struct SliceBlock<'a> {
data: Slice<'a>,
restart_offset: usize,
}
pub trait Block {
fn get_size(&self) -> usize;
fn data(&self) -> Slice;
fn restart_offset(&self) -> usize;
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>;
fn num_restarts(data: Slice) -> usize
{
assert!(data.len() >= mem::size_of::<u32>());
let offset = data.len() - mem::size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
}
impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn | (&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
{
self.value_offset + self.value_len
}
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared != 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if !self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self.parse_next_key() && self.next_entry_offset() < self.restarts {
// Keep skipping
}
}
fn corruption_error(&mut self) {
self.current = self.restarts;
self.restart_index = self.num_restarts;
self.status = Status::Corruption("bad entry in block".into());
self.key = String::new();
}
fn parse_next_key(&mut self) -> bool {
self.current = self.next_entry_offset();
let p = &self.data[self.current..];
if p.len() == 0 {
// No more entries to return. Mark as invalid.
self.current = self.restarts;
self.restart_index = self.num_restarts;
return false;
}
let entry = match decode_entry(p) {
Ok(p) => p,
_ => {
self.corruption_error();
return false;
}
};
if self.key.len() < entry.shared as usize {
self.corruption_error();
return false;
}
self.key = str::from_utf8(&entry.new_slice[..entry.non_shared as usize])
.expect("Invalid UTF-8 key")
.to_owned();
self.value_offset = entry.non_shared as usize;
self.value_len = entry.value_length as usize;
while self.restart_index + 1 < self.num_restarts
&& self.get_restart_point(self.restart_index + 1) < self.current
{
self.restart_index += 1;
}
true
}
}
pub struct KVEntry {
key: String,
value: Vec<u8>,
}
impl<'a, T: SliceComparator> Iterator for BlockIterator<'a, T> {
// we will be counting with usize
type Item = KVEntry;
fn next(&mut self) -> Option<KVEntry> {
self.step();
match self.num_restarts {
0 => None,
_ => Some(KVEntry {
key: self.key(),
value: self.value().to_vec(),
})
}
}
}
| compare | identifier_name |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn | () {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| active_command_block_test | identifier_name |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count. | command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
} | random_line_split | |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> |
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
} | identifier_body |
generator.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of Generator thread and Generator trait.
//!
//! Generator thread accept a set of serializable arguments.
use {
crate::common_operations::create_target,
crate::io_packet::IoPacketType,
crate::issuer::{run_issuer, IssuerArgs},
crate::log::Stats,
crate::operations::{OperationType, PipelineStages},
crate::sequential_io_generator::SequentialIoGenerator,
crate::target::{AvailableTargets, TargetOps},
crate::verifier::{run_verifier, VerifierArgs},
failure::Error,
log::debug,
serde_derive::{Deserialize, Serialize},
std::{
clone::Clone,
collections::HashMap,
ops::Range,
sync::{
mpsc::{channel, sync_channel, SyncSender},
Arc, Condvar, Mutex,
},
thread::spawn,
time::Instant,
},
};
/// This structure provides a mechanism for issuer to block on commands from
/// generator or from verifiers. When command_count drops to zero, issuer blocks
/// on someone to wake them up.
/// When generator or verifier insert a command in issuer's channel they signal
/// the issuer to wake up.
#[derive(Clone)]
pub struct ActiveCommands {
/// command_count indicates how many commands are in issuers queue.
/// Mutex and condition variable protect and help to wait on the count.
command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write | else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn active_command_block_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
let mut command_count_copy = command_count.clone();
command_count.increment();
let thd = thread::spawn(move || {
sleep(time::Duration::from_secs(1));
// First repay will wake the other threads sleeping borrower.
command_count_copy.increment();
});
// On first call we dont block as the we find it immediately
assert_eq!(command_count.decrement(), false);
// On second call we block as the thread that is supposed to increment in
// sleeping for a second.
assert_eq!(command_count.decrement(), true);
let _ = thd.join();
// command count should be zero now
assert_eq!(command_count.count(), 0);
}
}
| {
operations.push(OperationType::Write);
} | conditional_block |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{ .. } | Action::CellAction{ .. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context, .. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
}; | context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if !entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant { .. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
}
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
} | }
match process_action_buffers( | random_line_split |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn | (context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{ .. } | Action::CellAction{ .. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context, .. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if !entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant { .. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
}
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
}
| reset_action_buffers | identifier_name |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{ .. } | Action::CellAction{ .. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context, .. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if !entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant { .. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else |
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
}
| {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
} | conditional_block |
main.rs | #![feature(try_from)]
extern crate itertools;
extern crate ketos;
extern crate minutiae;
extern crate pcg;
extern crate rand;
extern crate uuid;
use std::fmt::{self, Debug, Formatter};
use std::rc::Rc;
use ketos::{Context, GlobalScope, Scope, Value};
use ketos::compile::compile;
use ketos::bytecode::Code;
use ketos::lexer::Lexer;
use ketos::parser::Parser;
use ketos::rc_vec::RcVec;
use ketos::restrict::RestrictConfig;
use itertools::Itertools;
use minutiae::prelude::*;
use minutiae::engine::serial::SerialEngine;
use minutiae::engine::iterator::SerialEntityIterator;
use minutiae::driver::middleware::MinDelay;
use minutiae::driver::BasicDriver;
use minutiae::universe::Universe2D;
use minutiae::util::{debug, translate_entity};
use pcg::PcgRng;
use rand::{Rng, SeedableRng};
use uuid::Uuid;
#[cfg(feature = "wasm")]
extern {
pub fn canvas_render(pixbuf_ptr: *const u8);
}
const UNIVERSE_SIZE: usize = 800;
const ANT_COUNT: usize = 2000;
const PRNG_SEED: [u64; 2] = [198918237842, 9];
const UNIVERSE_LENGTH: usize = UNIVERSE_SIZE * UNIVERSE_SIZE;
fn get_codes_from_source(context: &Context, src: &str) -> Result<Vec<Rc<Code>>, String> {
let lexer = Lexer::new(src, 0);
Parser::new(&context, lexer)
.parse_exprs()
.map_err(debug)?
.iter()
.map(|v| compile(&context, v))
.fold_results(Vec::new(), |mut acc, code| {
acc.push(Rc::new(code));
acc
})
.map_err(debug)
}
fn get_ant_restrictions() -> RestrictConfig {
RestrictConfig::strict()
}
fn get_ant_global_scope() -> Scope {
let global_scope = ketos::scope::GlobalScope::default("ant");
global_scope.add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self |
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{ .. } | Action::CellAction{ .. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context, .. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if !entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant { .. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
}
}
#[cfg(feature = "wasm")]
fn init(
universe: U,
engine: OurSerialEngine
) {
use minutiae::emscripten::{EmscriptenDriver, CanvasRenderer};
let driver = EmscriptenDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(CanvasRenderer::new(UNIVERSE_SIZE, calc_color, canvas_render)),
]);
}
#[cfg(not(feature = "wasm"))]
fn init(
universe: U,
engine: OurSerialEngine
) {
let driver = BasicDriver;
driver.init(universe, engine, &mut [
Box::new(MinDelay::from_tps(59.99)),
Box::new(minutiae::driver::middleware::gif_renderer::GifRenderer::new(
"./out.gif", UNIVERSE_SIZE, calc_color
)),
]);
}
fn main() {
let conf = UniverseConf {
size: 800,
view_distance: 1,
};
let universe = Universe2D::new(conf, &mut WorldGenerator);
let engine: OurSerialEngine = Box::new(AntEngine);
init(universe, engine);
}
| {
ES::Ant(ant)
} | identifier_body |
test_utils.go | /*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"time"
b64 "encoding/base64"
. "github.com/onsi/gomega"
amkovmwarecomv1alpha1 "github.com/vmware/global-load-balancing-services-for-kubernetes/federator/api/v1alpha1"
gslbalphav1 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha1"
gdpalphav2 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha2"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
},
}
Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
}
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(Equal(1))
}
// func VerifyTestAMKOClusterObjectSuccess(k8sClient client.Client, statusType string) {
// Eventually(func() string {
// var obj amkovmwarecomv1alpha1.AMKOCluster
// Expect(k8sClient.Get(context.TODO(),
// types.NamespacedName{
// Name: TestAMKOClusterName,
// Namespace: AviSystemNS},
// &obj)).Should(Succeed())
// return getTestAMKOClusterStatusReason(obj.Status, statusType)
// }, 5*time.Second, 1*time.Second).Should(Equal("Federation successful"))
// }
func | (k8sClient client.Client, statusType, statusMsg, failureMsg string) {
Eventually(func() map[string]string {
var obj amkovmwarecomv1alpha1.AMKOCluster
Expect(k8sClient.Get(context.TODO(),
types.NamespacedName{
Name: TestAMKOClusterName,
Namespace: AviSystemNS},
&obj)).Should(Succeed())
fmt.Printf("status of AMKOCluster: %v\n", obj.Status)
return getTestAMKOClusterStatusReason(obj.Status, statusType)
}, 5*time.Second, 1*time.Second).Should(Equal(map[string]string{"reason": failureMsg,
"status": statusMsg,
}))
}
func CleanupTestObjects(k8sClient1, k8sClient2 client.Client,
amkoCluster1, amkoCluster2 *amkovmwarecomv1alpha1.AMKOCluster,
gcObj *gslbalphav1.GSLBConfig, gdpObj *gdpalphav2.GlobalDeploymentPolicy) {
ctx := context.Background()
Expect(k8sClient1.Delete(ctx, amkoCluster1)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient1, gcObj, gdpObj)
Expect(k8sClient2.Delete(ctx, amkoCluster2)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient2, gcObj, gdpObj)
}
func VerifySuccessForAllStatusFields(k8sClient client.Client) {
VerifyTestAMKOClusterStatus(k8sClient, CurrentAMKOClusterValidationStatusField,
StatusMsgValidAMKOCluster, "")
VerifyTestAMKOClusterStatus(k8sClient, ClusterContextsStatusField,
StatusMsgClusterClientsSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, MemberValidationStatusField,
StatusMembersValidationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GSLBConfigFederationStatusField,
StatusGSLBConfigFederationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GDPFederationStatusField,
StatusGDPFederationSuccess, "")
}
| VerifyTestAMKOClusterStatus | identifier_name |
test_utils.go | /*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"time"
b64 "encoding/base64"
. "github.com/onsi/gomega"
amkovmwarecomv1alpha1 "github.com/vmware/global-load-balancing-services-for-kubernetes/federator/api/v1alpha1"
gslbalphav1 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha1"
gdpalphav2 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha2"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
},
}
Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster |
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(Equal(1))
}
// func VerifyTestAMKOClusterObjectSuccess(k8sClient client.Client, statusType string) {
// Eventually(func() string {
// var obj amkovmwarecomv1alpha1.AMKOCluster
// Expect(k8sClient.Get(context.TODO(),
// types.NamespacedName{
// Name: TestAMKOClusterName,
// Namespace: AviSystemNS},
// &obj)).Should(Succeed())
// return getTestAMKOClusterStatusReason(obj.Status, statusType)
// }, 5*time.Second, 1*time.Second).Should(Equal("Federation successful"))
// }
func VerifyTestAMKOClusterStatus(k8sClient client.Client, statusType, statusMsg, failureMsg string) {
Eventually(func() map[string]string {
var obj amkovmwarecomv1alpha1.AMKOCluster
Expect(k8sClient.Get(context.TODO(),
types.NamespacedName{
Name: TestAMKOClusterName,
Namespace: AviSystemNS},
&obj)).Should(Succeed())
fmt.Printf("status of AMKOCluster: %v\n", obj.Status)
return getTestAMKOClusterStatusReason(obj.Status, statusType)
}, 5*time.Second, 1*time.Second).Should(Equal(map[string]string{"reason": failureMsg,
"status": statusMsg,
}))
}
func CleanupTestObjects(k8sClient1, k8sClient2 client.Client,
amkoCluster1, amkoCluster2 *amkovmwarecomv1alpha1.AMKOCluster,
gcObj *gslbalphav1.GSLBConfig, gdpObj *gdpalphav2.GlobalDeploymentPolicy) {
ctx := context.Background()
Expect(k8sClient1.Delete(ctx, amkoCluster1)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient1, gcObj, gdpObj)
Expect(k8sClient2.Delete(ctx, amkoCluster2)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient2, gcObj, gdpObj)
}
func VerifySuccessForAllStatusFields(k8sClient client.Client) {
VerifyTestAMKOClusterStatus(k8sClient, CurrentAMKOClusterValidationStatusField,
StatusMsgValidAMKOCluster, "")
VerifyTestAMKOClusterStatus(k8sClient, ClusterContextsStatusField,
StatusMsgClusterClientsSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, MemberValidationStatusField,
StatusMembersValidationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GSLBConfigFederationStatusField,
StatusGSLBConfigFederationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GDPFederationStatusField,
StatusGDPFederationSuccess, "")
}
| {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
} | identifier_body |
test_utils.go | /*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"time"
b64 "encoding/base64"
. "github.com/onsi/gomega"
amkovmwarecomv1alpha1 "github.com/vmware/global-load-balancing-services-for-kubernetes/federator/api/v1alpha1"
gslbalphav1 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha1"
gdpalphav2 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha2"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
},
}
Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
}
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) |
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(Equal(1))
}
// func VerifyTestAMKOClusterObjectSuccess(k8sClient client.Client, statusType string) {
// Eventually(func() string {
// var obj amkovmwarecomv1alpha1.AMKOCluster
// Expect(k8sClient.Get(context.TODO(),
// types.NamespacedName{
// Name: TestAMKOClusterName,
// Namespace: AviSystemNS},
// &obj)).Should(Succeed())
// return getTestAMKOClusterStatusReason(obj.Status, statusType)
// }, 5*time.Second, 1*time.Second).Should(Equal("Federation successful"))
// }
func VerifyTestAMKOClusterStatus(k8sClient client.Client, statusType, statusMsg, failureMsg string) {
Eventually(func() map[string]string {
var obj amkovmwarecomv1alpha1.AMKOCluster
Expect(k8sClient.Get(context.TODO(),
types.NamespacedName{
Name: TestAMKOClusterName,
Namespace: AviSystemNS},
&obj)).Should(Succeed())
fmt.Printf("status of AMKOCluster: %v\n", obj.Status)
return getTestAMKOClusterStatusReason(obj.Status, statusType)
}, 5*time.Second, 1*time.Second).Should(Equal(map[string]string{"reason": failureMsg,
"status": statusMsg,
}))
}
func CleanupTestObjects(k8sClient1, k8sClient2 client.Client,
amkoCluster1, amkoCluster2 *amkovmwarecomv1alpha1.AMKOCluster,
gcObj *gslbalphav1.GSLBConfig, gdpObj *gdpalphav2.GlobalDeploymentPolicy) {
ctx := context.Background()
Expect(k8sClient1.Delete(ctx, amkoCluster1)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient1, gcObj, gdpObj)
Expect(k8sClient2.Delete(ctx, amkoCluster2)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient2, gcObj, gdpObj)
}
func VerifySuccessForAllStatusFields(k8sClient client.Client) {
VerifyTestAMKOClusterStatus(k8sClient, CurrentAMKOClusterValidationStatusField,
StatusMsgValidAMKOCluster, "")
VerifyTestAMKOClusterStatus(k8sClient, ClusterContextsStatusField,
StatusMsgClusterClientsSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, MemberValidationStatusField,
StatusMembersValidationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GSLBConfigFederationStatusField,
StatusGSLBConfigFederationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GDPFederationStatusField,
StatusGDPFederationSuccess, "")
}
| {
return
} | conditional_block |
test_utils.go | /*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"time"
b64 "encoding/base64"
. "github.com/onsi/gomega"
amkovmwarecomv1alpha1 "github.com/vmware/global-load-balancing-services-for-kubernetes/federator/api/v1alpha1"
gslbalphav1 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha1"
gdpalphav2 "github.com/vmware/global-load-balancing-services-for-kubernetes/internal/apis/amko/v1alpha2"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
}, | Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
}
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(Equal(1))
}
// func VerifyTestAMKOClusterObjectSuccess(k8sClient client.Client, statusType string) {
// Eventually(func() string {
// var obj amkovmwarecomv1alpha1.AMKOCluster
// Expect(k8sClient.Get(context.TODO(),
// types.NamespacedName{
// Name: TestAMKOClusterName,
// Namespace: AviSystemNS},
// &obj)).Should(Succeed())
// return getTestAMKOClusterStatusReason(obj.Status, statusType)
// }, 5*time.Second, 1*time.Second).Should(Equal("Federation successful"))
// }
func VerifyTestAMKOClusterStatus(k8sClient client.Client, statusType, statusMsg, failureMsg string) {
Eventually(func() map[string]string {
var obj amkovmwarecomv1alpha1.AMKOCluster
Expect(k8sClient.Get(context.TODO(),
types.NamespacedName{
Name: TestAMKOClusterName,
Namespace: AviSystemNS},
&obj)).Should(Succeed())
fmt.Printf("status of AMKOCluster: %v\n", obj.Status)
return getTestAMKOClusterStatusReason(obj.Status, statusType)
}, 5*time.Second, 1*time.Second).Should(Equal(map[string]string{"reason": failureMsg,
"status": statusMsg,
}))
}
func CleanupTestObjects(k8sClient1, k8sClient2 client.Client,
amkoCluster1, amkoCluster2 *amkovmwarecomv1alpha1.AMKOCluster,
gcObj *gslbalphav1.GSLBConfig, gdpObj *gdpalphav2.GlobalDeploymentPolicy) {
ctx := context.Background()
Expect(k8sClient1.Delete(ctx, amkoCluster1)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient1, gcObj, gdpObj)
Expect(k8sClient2.Delete(ctx, amkoCluster2)).Should(Succeed())
deleteTestGCAndGDPObj(ctx, k8sClient2, gcObj, gdpObj)
}
func VerifySuccessForAllStatusFields(k8sClient client.Client) {
VerifyTestAMKOClusterStatus(k8sClient, CurrentAMKOClusterValidationStatusField,
StatusMsgValidAMKOCluster, "")
VerifyTestAMKOClusterStatus(k8sClient, ClusterContextsStatusField,
StatusMsgClusterClientsSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, MemberValidationStatusField,
StatusMembersValidationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GSLBConfigFederationStatusField,
StatusGSLBConfigFederationSuccess, "")
VerifyTestAMKOClusterStatus(k8sClient, GDPFederationStatusField,
StatusGDPFederationSuccess, "")
} | } | random_line_split |
main-v2.js | // Array with all the button values
let calcBtns = ['C', '', '', '/', '7', '8', '9', 'X', '4', '5', '6', '-', '1', '2', '3', '+', '0', '', '.', '='];
// Default Values
let num1 = '';
let num2 = '';
let operand = '';
// Values for multiple equal sign press
let equalTemp = undefined;
let eqPress = false;
// Function to render elements
function renderElement(element, classes) {
let output = document.createElement(element);
output.className = classes;
return output;
}
// Renders the page elements on load
function loadCalc() {
// Create Elements
let container = renderElement('div', 'container-fluid');
let row = renderElement('div', 'row');
let leftCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let centerCol = renderElement('div', 'col-12 col-sm-12 col-md-10 col-lg-8 text-center');
let rightCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let title = renderElement('h1', 'my-5 display-4 text-white');
// title.innerHTML = 'Calculator';
// A colorful title
title.innerHTML = '<span class="text-danger">C</span><span class="text-primary">a</span><span class="text-warning">l</span><span class="text-dark">c</span><span class="text-danger">u</span><span class="text-primary">l</span><span class="text-warning">a</span><span class="text-dark">t</span><span class="text-danger">o</span><span class="text-primary">r</span>';
let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) { | }
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') {
symPress(this.id);
} else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
return num1;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
return num1;
}
}
// [] + [] + []... =
function multiCalc(sym) {
switch (sym) {
case '+':
num1 = Number(num1) + Number(num2);
num2 = '';
break;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
break;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
break;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
}
}
// For when equal sign is pressed multiple times --> [] + = = = OR [] + [] = = =
function equalCalc(sym) {
switch (sym) {
case '+':
// If equal's temp num has not been defined yet, define it
// Otherwise, keep performing calculations using the old value
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) + Number(equalTemp);
num2 = '';
return num1;
case '-':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) - Number(equalTemp);
num2 = '';
return num1;
case '/':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) / Number(equalTemp);
num2 = '';
return num1;
case '*':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) * Number(equalTemp);
num2 = '';
return num1;
case '':
return num1;
}
}
// Resets all of the calculator's values to their default state
function clear() {
num1 = '';
num2 = '';
operand = '';
displayWindow.innerHTML = 0;
equalTemp = undefined;
eqPress = false;
}
// Cases I tested for:
// [] + [] =
// [] + [] + []... =
// [] + [] =, [] + [] = --> Should reset after first equal sign
// [] + [] =, + [] = --> Shouldn't reset, should add the new value to the first answer
// [] + = = =... --> Should keep adding the first number to the running sum
// [] + [] = = =... --> Should continue to add the second value to the sum
//
//
// Things that shouldn't be allowed:
// 00003 (leading zeros); 4.56.87 (multiple decimal points); pressing a symbol before first inputting a number;
// pressing multiple operands in a row --> I made it so that this changes your selected operand (ex. 1 - * + 2 = 3);
// Inputting an unlimited amount of numbers --> I capped input at 10 digits
//
// Certain cases result in NaN: 0 / 0; . + .
// In these instances, I have the calculator display "-Undefined-" and then internally reset all of its values
// I believe that "undefined" is actually the correct answer for 0 / 0.
// For . + . --> I've set the display to read "Invalid Use of Decimal"
//
// A case that I did not solve for:
// .1 + .2 (wasn't really sure how to approach solving this)
//
//
// First build was really messy and hard to debug (main.js). For this current build, I first created only core
// functions, and then began adding "edge cases" incrementally. I used a 4-quadrant chart to approach edge cases:
// Urgent vs Less Urgent; High vs Low Importance
// This was a much better way to write code. For the final few edge cases, however, it was still pretty
// difficult to add fixes while also trying to avoid breaking anything else (which I did several times).
// Added keyboard input. Made it so that * and X can both be used for multiplication. C and Delete can both
// be used for clear. = and Enter can both be used for equals. | // Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+'); | random_line_split |
main-v2.js | // Array with all the button values
let calcBtns = ['C', '', '', '/', '7', '8', '9', 'X', '4', '5', '6', '-', '1', '2', '3', '+', '0', '', '.', '='];
// Default Values
let num1 = '';
let num2 = '';
let operand = '';
// Values for multiple equal sign press
let equalTemp = undefined;
let eqPress = false;
// Function to render elements
function renderElement(element, classes) {
let output = document.createElement(element);
output.className = classes;
return output;
}
// Renders the page elements on load
function loadCalc() {
// Create Elements
let container = renderElement('div', 'container-fluid');
let row = renderElement('div', 'row');
let leftCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let centerCol = renderElement('div', 'col-12 col-sm-12 col-md-10 col-lg-8 text-center');
let rightCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let title = renderElement('h1', 'my-5 display-4 text-white');
// title.innerHTML = 'Calculator';
// A colorful title
title.innerHTML = '<span class="text-danger">C</span><span class="text-primary">a</span><span class="text-warning">l</span><span class="text-dark">c</span><span class="text-danger">u</span><span class="text-primary">l</span><span class="text-warning">a</span><span class="text-dark">t</span><span class="text-danger">o</span><span class="text-primary">r</span>';
let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) {
// Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+');
}
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') {
symPress(this.id);
} else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
return num1;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
return num1;
}
}
// [] + [] + []... =
function multiCalc(sym) {
switch (sym) {
case '+':
num1 = Number(num1) + Number(num2);
num2 = '';
break;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
break;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
break;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
}
}
// For when equal sign is pressed multiple times --> [] + = = = OR [] + [] = = =
function equalCalc(sym) {
switch (sym) {
case '+':
// If equal's temp num has not been defined yet, define it
// Otherwise, keep performing calculations using the old value
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) + Number(equalTemp);
num2 = '';
return num1;
case '-':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) - Number(equalTemp);
num2 = '';
return num1;
case '/':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) / Number(equalTemp);
num2 = '';
return num1;
case '*':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) * Number(equalTemp);
num2 = '';
return num1;
case '':
return num1;
}
}
// Resets all of the calculator's values to their default state
function clear() |
// Cases I tested for:
// [] + [] =
// [] + [] + []... =
// [] + [] =, [] + [] = --> Should reset after first equal sign
// [] + [] =, + [] = --> Shouldn't reset, should add the new value to the first answer
// [] + = = =... --> Should keep adding the first number to the running sum
// [] + [] = = =... --> Should continue to add the second value to the sum
//
//
// Things that shouldn't be allowed:
// 00003 (leading zeros); 4.56.87 (multiple decimal points); pressing a symbol before first inputting a number;
// pressing multiple operands in a row --> I made it so that this changes your selected operand (ex. 1 - * + 2 = 3);
// Inputting an unlimited amount of numbers --> I capped input at 10 digits
//
// Certain cases result in NaN: 0 / 0; . + .
// In these instances, I have the calculator display "-Undefined-" and then internally reset all of its values
// I believe that "undefined" is actually the correct answer for 0 / 0.
// For . + . --> I've set the display to read "Invalid Use of Decimal"
//
// A case that I did not solve for:
// .1 + .2 (wasn't really sure how to approach solving this)
//
//
// First build was really messy and hard to debug (main.js). For this current build, I first created only core
// functions, and then began adding "edge cases" incrementally. I used a 4-quadrant chart to approach edge cases:
// Urgent vs Less Urgent; High vs Low Importance
// This was a much better way to write code. For the final few edge cases, however, it was still pretty
// difficult to add fixes while also trying to avoid breaking anything else (which I did several times).
// Added keyboard input. Made it so that * and X can both be used for multiplication. C and Delete can both
// be used for clear. = and Enter can both be used for equals. | {
num1 = '';
num2 = '';
operand = '';
displayWindow.innerHTML = 0;
equalTemp = undefined;
eqPress = false;
} | identifier_body |
main-v2.js | // Array with all the button values
let calcBtns = ['C', '', '', '/', '7', '8', '9', 'X', '4', '5', '6', '-', '1', '2', '3', '+', '0', '', '.', '='];
// Default Values
let num1 = '';
let num2 = '';
let operand = '';
// Values for multiple equal sign press
let equalTemp = undefined;
let eqPress = false;
// Function to render elements
function renderElement(element, classes) {
let output = document.createElement(element);
output.className = classes;
return output;
}
// Renders the page elements on load
function loadCalc() {
// Create Elements
let container = renderElement('div', 'container-fluid');
let row = renderElement('div', 'row');
let leftCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let centerCol = renderElement('div', 'col-12 col-sm-12 col-md-10 col-lg-8 text-center');
let rightCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let title = renderElement('h1', 'my-5 display-4 text-white');
// title.innerHTML = 'Calculator';
// A colorful title
title.innerHTML = '<span class="text-danger">C</span><span class="text-primary">a</span><span class="text-warning">l</span><span class="text-dark">c</span><span class="text-danger">u</span><span class="text-primary">l</span><span class="text-warning">a</span><span class="text-dark">t</span><span class="text-danger">o</span><span class="text-primary">r</span>';
let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) {
// Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+');
}
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') | else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
return num1;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
return num1;
}
}
// [] + [] + []... =
function multiCalc(sym) {
switch (sym) {
case '+':
num1 = Number(num1) + Number(num2);
num2 = '';
break;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
break;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
break;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
}
}
// For when equal sign is pressed multiple times --> [] + = = = OR [] + [] = = =
function equalCalc(sym) {
switch (sym) {
case '+':
// If equal's temp num has not been defined yet, define it
// Otherwise, keep performing calculations using the old value
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) + Number(equalTemp);
num2 = '';
return num1;
case '-':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) - Number(equalTemp);
num2 = '';
return num1;
case '/':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) / Number(equalTemp);
num2 = '';
return num1;
case '*':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) * Number(equalTemp);
num2 = '';
return num1;
case '':
return num1;
}
}
// Resets all of the calculator's values to their default state
function clear() {
num1 = '';
num2 = '';
operand = '';
displayWindow.innerHTML = 0;
equalTemp = undefined;
eqPress = false;
}
// Cases I tested for:
// [] + [] =
// [] + [] + []... =
// [] + [] =, [] + [] = --> Should reset after first equal sign
// [] + [] =, + [] = --> Shouldn't reset, should add the new value to the first answer
// [] + = = =... --> Should keep adding the first number to the running sum
// [] + [] = = =... --> Should continue to add the second value to the sum
//
//
// Things that shouldn't be allowed:
// 00003 (leading zeros); 4.56.87 (multiple decimal points); pressing a symbol before first inputting a number;
// pressing multiple operands in a row --> I made it so that this changes your selected operand (ex. 1 - * + 2 = 3);
// Inputting an unlimited amount of numbers --> I capped input at 10 digits
//
// Certain cases result in NaN: 0 / 0; . + .
// In these instances, I have the calculator display "-Undefined-" and then internally reset all of its values
// I believe that "undefined" is actually the correct answer for 0 / 0.
// For . + . --> I've set the display to read "Invalid Use of Decimal"
//
// A case that I did not solve for:
// .1 + .2 (wasn't really sure how to approach solving this)
//
//
// First build was really messy and hard to debug (main.js). For this current build, I first created only core
// functions, and then began adding "edge cases" incrementally. I used a 4-quadrant chart to approach edge cases:
// Urgent vs Less Urgent; High vs Low Importance
// This was a much better way to write code. For the final few edge cases, however, it was still pretty
// difficult to add fixes while also trying to avoid breaking anything else (which I did several times).
// Added keyboard input. Made it so that * and X can both be used for multiplication. C and Delete can both
// be used for clear. = and Enter can both be used for equals. | {
symPress(this.id);
} | conditional_block |
main-v2.js | // Array with all the button values
let calcBtns = ['C', '', '', '/', '7', '8', '9', 'X', '4', '5', '6', '-', '1', '2', '3', '+', '0', '', '.', '='];
// Default Values
let num1 = '';
let num2 = '';
let operand = '';
// Values for multiple equal sign press
let equalTemp = undefined;
let eqPress = false;
// Function to render elements
function renderElement(element, classes) {
let output = document.createElement(element);
output.className = classes;
return output;
}
// Renders the page elements on load
function | () {
// Create Elements
let container = renderElement('div', 'container-fluid');
let row = renderElement('div', 'row');
let leftCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let centerCol = renderElement('div', 'col-12 col-sm-12 col-md-10 col-lg-8 text-center');
let rightCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let title = renderElement('h1', 'my-5 display-4 text-white');
// title.innerHTML = 'Calculator';
// A colorful title
title.innerHTML = '<span class="text-danger">C</span><span class="text-primary">a</span><span class="text-warning">l</span><span class="text-dark">c</span><span class="text-danger">u</span><span class="text-primary">l</span><span class="text-warning">a</span><span class="text-dark">t</span><span class="text-danger">o</span><span class="text-primary">r</span>';
let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) {
// Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+');
}
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') {
symPress(this.id);
} else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
return num1;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
return num1;
}
}
// [] + [] + []... =
function multiCalc(sym) {
switch (sym) {
case '+':
num1 = Number(num1) + Number(num2);
num2 = '';
break;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
break;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
break;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
}
}
// For when equal sign is pressed multiple times --> [] + = = = OR [] + [] = = =
function equalCalc(sym) {
switch (sym) {
case '+':
// If equal's temp num has not been defined yet, define it
// Otherwise, keep performing calculations using the old value
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) + Number(equalTemp);
num2 = '';
return num1;
case '-':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) - Number(equalTemp);
num2 = '';
return num1;
case '/':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) / Number(equalTemp);
num2 = '';
return num1;
case '*':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) * Number(equalTemp);
num2 = '';
return num1;
case '':
return num1;
}
}
// Resets all of the calculator's values to their default state
function clear() {
num1 = '';
num2 = '';
operand = '';
displayWindow.innerHTML = 0;
equalTemp = undefined;
eqPress = false;
}
// Cases I tested for:
// [] + [] =
// [] + [] + []... =
// [] + [] =, [] + [] = --> Should reset after first equal sign
// [] + [] =, + [] = --> Shouldn't reset, should add the new value to the first answer
// [] + = = =... --> Should keep adding the first number to the running sum
// [] + [] = = =... --> Should continue to add the second value to the sum
//
//
// Things that shouldn't be allowed:
// 00003 (leading zeros); 4.56.87 (multiple decimal points); pressing a symbol before first inputting a number;
// pressing multiple operands in a row --> I made it so that this changes your selected operand (ex. 1 - * + 2 = 3);
// Inputting an unlimited amount of numbers --> I capped input at 10 digits
//
// Certain cases result in NaN: 0 / 0; . + .
// In these instances, I have the calculator display "-Undefined-" and then internally reset all of its values
// I believe that "undefined" is actually the correct answer for 0 / 0.
// For . + . --> I've set the display to read "Invalid Use of Decimal"
//
// A case that I did not solve for:
// .1 + .2 (wasn't really sure how to approach solving this)
//
//
// First build was really messy and hard to debug (main.js). For this current build, I first created only core
// functions, and then began adding "edge cases" incrementally. I used a 4-quadrant chart to approach edge cases:
// Urgent vs Less Urgent; High vs Low Importance
// This was a much better way to write code. For the final few edge cases, however, it was still pretty
// difficult to add fixes while also trying to avoid breaking anything else (which I did several times).
// Added keyboard input. Made it so that * and X can both be used for multiplication. C and Delete can both
// be used for clear. = and Enter can both be used for equals. | loadCalc | identifier_name |
proc.py | """
proc module for pyppl
"""
import copy as pycopy
import os
import pickle
import sys
import threading
from Queue import Queue
from random import randint
from subprocess import PIPE, Popen
from time import sleep, time
from . import utils
from .aggr import aggr
from .channel import channel
from .job import job as pjob
from ..runners import runner_local, runner_sge, runner_ssh
class proc (object):
"""
The proc class defining a process
@static variables:
`RUNNERS`: The regiested runners
`PROCS`: The "<id>.<tag>" initialized processes, used to detected whether there are two processes with the same id and tag.
`ALIAS`: The alias for the properties
`LOG_NLINE`: The limit of lines of logging information of same type of messages
@magic methods:
`__getattr__(self, name)`: get the value of a property in `self.props`
`__setattr__(self, name, value)`: set the value of a property in `self.config`
"""
RUNNERS = {}
PROCS = {}
ALIAS = {
'exdir': 'exportdir',
'exhow': 'exporthow',
'exow': 'exportow',
'errhow': 'errorhow',
'errntry': 'errorntry',
'lang': 'defaultSh',
'rc': 'retcodes',
'ppldir': 'tmpdir'
}
LOG_NLINE = {
'': 999,
'EXPORT_CACHE_OUTFILE_EXISTS': -3,
'EXPORT_CACHE_USING_SYMLINK': 3,
'BRINGFILE_OVERWRITING': 3,
'OUTNAME_USING_OUTTYPES': 1,
'OUTDIR_CREATED': 0,
'OUTDIR_CREATED_AFTER_RESET': 0,
'SCRIPT_USING_TEMPLATE': 1,
'SCRIPT_EXISTS': -2,
'NOSCRIPT': 1,
'JOB_RESETTING': 0,
'INFILE_OVERWRITING': -3
}
OUT_VARTYPE = ['var']
OUT_FILETYPE = ['file', 'path']
OUT_DIRTYPE = ['dir', 'folder']
IN_VARTYPE = ['var']
IN_FILETYPE = ['file', 'path', 'dir', 'folder']
IN_FILESTYPE = ['files', 'paths', 'dirs', 'folders']
EX_GZIP = ['gzip', 'gz']
EX_COPY = ['copy', 'cp']
EX_MOVE = ['move', 'mv']
EX_SYMLINK = ['link', 'symlink', 'symbol']
def __init__ (self, tag = 'notag'):
"""
Constructor
@params:
`tag`: The tag of the process
"""
# computed props
self.__dict__['props'] = {}
# configs
self.__dict__['config'] = {}
pid = utils.varname(self.__class__.__name__, 2)
self.config['input'] = ''
self.config['output'] = {}
# where cache file and wdir located
self.config['tmpdir'] = os.path.abspath("./workdir")
self.config['forks'] = 1
self.config['cache'] = True # False or 'export' or 'export+' (do True if failed do export)
self.config['retcodes'] = [0]
self.config['echo'] = False
self.config['runner'] = 'local'
self.config['script'] = ''
self.config['depends'] = []
self.config['tag'] = tag
self.config['exportdir'] = ''
self.config['exporthow'] = 'move' # symlink, copy, gzip
self.config['exportow'] = True # overwrite
self.config['errorhow'] = "terminate" # retry, ignore
self.config['errorntry'] = 3
self.config['defaultSh'] = 'bash'
self.config['beforeCmd'] = ""
self.config['afterCmd'] = ""
self.config['workdir'] = ''
self.config['args'] = {}
self.config['channel'] = channel.create()
self.config['aggr'] = None
self.config['callback'] = None
self.config['brings'] = {}
# init props
# id of the process, actually it's the variable name of the process
self.props['id'] = pid
# the tag
self.props['tag'] = tag
# the cachefile, cache file will be in <tmpdir>/<cachefile>
#self.props['cachefile'] = 'cached.jobs'
# which processes this one depents on
self.props['depends'] = []
# the script
self.props['script'] = ""
self.props['input'] = ''
self.props['indata'] = {}
self.props['output'] = ''
self.props['depends'] = self.config['depends']
self.props['nexts'] = []
self.props['tmpdir'] = self.config['tmpdir']
self.props['forks'] = self.config['forks']
self.props['cache'] = self.config['cache']
self.props['cached'] = True
self.props['retcodes'] = self.config['retcodes']
self.props['beforeCmd'] = self.config['beforeCmd']
self.props['afterCmd'] = self.config['afterCmd']
self.props['echo'] = self.config['echo']
self.props['runner'] = self.config['runner']
self.props['exportdir'] = self.config['exportdir']
self.props['exporthow'] = self.config['exporthow']
self.props['exportow'] = self.config['exportow']
self.props['errorhow'] = self.config['errorhow']
self.props['errorntry'] = self.config['errorntry']
self.props['jobs'] = []
self.props['ncjobids'] = [] # non-cached job ids
self.props['defaultSh'] = self.config['defaultSh']
self.props['channel'] = channel.create()
self.props['length'] = 0
# remember which property is set, then it won't be overwritten by configurations
self.props['sets'] = []
self.props['procvars'] = {}
self.props['workdir'] = ''
# for unittest, in real case, the logger will be got from pyppl
self.props['logger'] = None
self.props['args'] = self.config['args']
self.props['aggr'] = self.config['aggr']
self.props['callback'] = self.config['callback']
self.props['brings'] = self.config['brings']
self.props['suffix'] = ''
self.props['lognline'] = {key:0 for key in proc.LOG_NLINE.keys()}
self.props['lognline']['prevlog'] = ''
def __getattr__ (self, name):
if not self.props.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Property "%s" of proc is not found' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
return self.props[name]
def __setattr__ (self, name, value):
if not self.config.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Cannot set property "%s" for proc instance' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
self.sets.append(name)
self.config[name] = value
if name == 'depends':
# remove me from nexts of my previous depends
for depend in self.depends:
if not self in depend.nexts:
continue
del depend.props['nexts'][depend.nexts.index(self)]
self.props['depends'] = []
depends = value
if not isinstance (value, list):
depends = [value]
for depend in depends:
if isinstance (depend, proc):
self.props['depends'].append (depend)
if self not in depend.nexts:
depend.nexts.append (self)
elif isinstance (depend, aggr):
for p in depend.ends:
self.props['depends'].append (p)
if self not in p.nexts:
p.nexts.append (self)
else:
self.props[name] = value
def log (self, msg, level="info", flag=None, key = ''):
"""
The log function with aggregation name, process id and tag integrated.
@params:
`msg`: The message to log
`level`: The log level
`flag`: The flag
`key`: The type of messages
"""
if flag is None:
flag = level
flag = flag.upper().rjust(7)
flag = "[%s]" % flag
title = self._name()
func = getattr(self.logger, level)
maxline = proc.LOG_NLINE[key]
prevlog = self.lognline['prevlog']
if key == prevlog:
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
else:
n_omit = self.lognline[prevlog] - abs(proc.LOG_NLINE[prevlog])
if n_omit > 0 and proc.LOG_NLINE[prevlog] < 0:
logname = 'logs' if n_omit > 1 else 'log'
maxinfo = ' (%s, max=%s)' % (prevlog, abs(proc.LOG_NLINE[prevlog])) if prevlog else ''
self.logger.debug ("[ DEBUG] %s: ... and %s %s omitted%s." % (title, n_omit, logname, maxinfo))
self.lognline[prevlog] = 0
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
self.lognline['prevlog'] = key
self.lognline[key] += 1
def copy (self, tag=None, newid=None):
"""
Copy a process
@params:
`newid`: The new id of the process, default: `None` (use the varname)
`tag`: The tag of the new process, default: `None` (used the old one)
@returns:
The new process
"""
newproc = proc (tag if tag is not None else self.tag)
config = {key:val for key, val in self.config.iteritems() if key not in ['tag', 'workdir', 'aggr']}
config['tag'] = newproc.tag
config['aggr'] = ''
config['workdir'] = ''
props = {key:val for key, val in self.props.iteritems() if key not in ['cached', 'procvars', 'ncjobids', 'sets', 'channel', 'jobs', 'depends', 'nexts', 'tag', 'workdir', 'id', 'args']}
props['cached'] = True
props['procvars'] = {}
props['channel'] = channel.create()
props['depends'] = []
props['nexts'] = []
props['jobs'] = []
props['ncjobids'] = []
props['sets'] = []
props['workdir'] = ''
props['args'] = pycopy.copy(self.props['args'])
props['id'] = utils.varname(r'\w+\.' + self.copy.__name__, 3) if newid is None else newid
newproc.__dict__['config'].update(config)
newproc.__dict__['props'].update(props)
return newproc
def _suffix (self):
"""
Calcuate a uid for the process according to the configuration
@returns:
The uid
"""
if self.suffix:
return self.suffix
config = { key:val for key, val in self.config.iteritems() if key not in ['workdir', 'forks', 'cache', 'retcodes', 'echo', 'runner', 'exportdir', 'exporthow', 'exportow', 'errorhow', 'errorntry'] or key.endswith ('Runner') }
config['id'] = self.id
config['tag'] = self.tag
if config.has_key ('callback'):
config['callback'] = utils.funcsig(config['callback'])
# proc is not picklable
if config.has_key('depends'):
depends = config['depends']
pickable_depends = []
if isinstance(depends, proc):
depends = [depends]
elif isinstance(depends, aggr):
depends = depends.procs
for depend in depends:
pickable_depends.append(depend.id + '.' + depend.tag)
config['depends'] = pickable_depends
# lambda not pickable
if config.has_key ('input') and isinstance(config['input'], dict):
config['input'] = pycopy.copy(config['input'])
for key, val in config['input'].iteritems():
config['input'][key] = utils.funcsig(val) if callable(val) else val
signature = pickle.dumps(str(config))
self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how?
# set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0:
raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props')
def _buildJobs (self):
rptjob = randint(0, self.length-1)
for i in range(self.length):
job = pjob (i, self)
self.jobs[i] = job
job.init ()
row = [x['data'] for x in job.output.values()]
self.channel.rbind (row)
self.jobs[rptjob].report()
def _readConfig (self, config):
|
def _isCached (self):
"""
Tell whether the jobs are cached
@returns:
True if all jobs are cached, otherwise False
"""
self.props['ncjobids'] = range(self.length)
if self.cache == False:
self.log ('Not cached, because proc.cache is False', 'debug')
return False
if self.cache == True:
for depend in self.depends:
if depend.cached: continue
self.log ('Not cached, my dependent "%s" is not cached.' % depend._name(), 'debug')
return False
trulyCachedJids = []
exptCachedJids = []
self.props['ncjobids'] = []
for i, job in enumerate(self.jobs):
job = self.jobs[i]
if job.isTrulyCached ():
trulyCachedJids.append(i)
elif job.isExptCached ():
exptCachedJids.append (i)
else:
self.props['ncjobids'].append (i)
self.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')
self.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')
if self.ncjobids:
if len(self.ncjobids) < self.length:
self.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')
self.log ('Jobs to be running: %s' % self.ncjobids, 'debug')
else:
self.log ('Not cached, none of the jobs are cached.', 'info')
return False
else:
self.log (self.workdir, 'info', 'CACHED')
return True
def _runCmd (self, key):
"""
Run the `beforeCmd` or `afterCmd`
@params:
`key`: "beforeCmd" or "afterCmd"
@returns:
The return code of the command
"""
if not self.props[key]:
return 0
cmd = utils.format(self.props[key], self.procvars)
self.log ('Running <%s>: %s' % (key, cmd), 'info')
p = Popen (cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)
if self.echo:
for line in iter(p.stdout.readline, ''):
self.logger.info ('[ STDOUT] ' + line.rstrip("\n"))
for line in iter(p.stderr.readline, ''):
self.logger.error ('[ STDERR] ' + line.rstrip("\n"))
return p.wait()
def _runJobs (self):
"""
Submit and run the jobs
"""
# submit jobs
def sworker (q):
"""
The worker to run jobs
"""
while True:
(run, i) = q.get()
sleep (i)
if run.isRunning():
self.log ("Job #%s is already running, skip submitting." % run.job.index, 'info')
else:
run.submit()
run.wait()
run.finish()
q.task_done()
runner = proc.RUNNERS[self.runner]
maxsubmit = self.forks
if hasattr(runner, 'maxsubmit'):
maxsubmit = runner.maxsubmit
interval = .1
if hasattr(runner, 'interval'):
interval = runner.interval
sq = Queue()
for i in self.ncjobids:
rjob = runner (self.jobs[i])
tm = int(i/maxsubmit) * interval
sq.put ((rjob, tm))
# submit jobs
nojobs2submit = min (self.forks, len(self.ncjobids))
for i in range (nojobs2submit):
t = threading.Thread(target = sworker, args = (sq, ))
t.daemon = True
t.start ()
sq.join()
@staticmethod
def registerRunner (runner):
"""
Register a runner
@params:
`runner`: The runner to be registered.
"""
runner_name = runner.__name__
if runner_name.startswith ('runner_'):
runner_name = runner_name[7:]
if not proc.RUNNERS.has_key(runner_name):
proc.RUNNERS[runner_name] = runner
proc.registerRunner (runner_local)
proc.registerRunner (runner_sge)
proc.registerRunner (runner_ssh)
| """
Read the configuration
@params:
`config`: The configuration
"""
conf = { key:val for key, val in config.iteritems() if key not in self.sets }
self.config.update (conf)
for key, val in conf.iteritems():
self.props[key] = val | identifier_body |
proc.py | """
proc module for pyppl
"""
import copy as pycopy
import os
import pickle
import sys
import threading
from Queue import Queue
from random import randint
from subprocess import PIPE, Popen
from time import sleep, time
from . import utils
from .aggr import aggr
from .channel import channel
from .job import job as pjob
from ..runners import runner_local, runner_sge, runner_ssh
class proc (object):
"""
The proc class defining a process
@static variables:
`RUNNERS`: The regiested runners
`PROCS`: The "<id>.<tag>" initialized processes, used to detected whether there are two processes with the same id and tag.
`ALIAS`: The alias for the properties
`LOG_NLINE`: The limit of lines of logging information of same type of messages
@magic methods:
`__getattr__(self, name)`: get the value of a property in `self.props`
`__setattr__(self, name, value)`: set the value of a property in `self.config`
"""
RUNNERS = {}
PROCS = {}
ALIAS = {
'exdir': 'exportdir',
'exhow': 'exporthow',
'exow': 'exportow',
'errhow': 'errorhow',
'errntry': 'errorntry',
'lang': 'defaultSh',
'rc': 'retcodes',
'ppldir': 'tmpdir'
}
LOG_NLINE = {
'': 999,
'EXPORT_CACHE_OUTFILE_EXISTS': -3,
'EXPORT_CACHE_USING_SYMLINK': 3,
'BRINGFILE_OVERWRITING': 3,
'OUTNAME_USING_OUTTYPES': 1,
'OUTDIR_CREATED': 0,
'OUTDIR_CREATED_AFTER_RESET': 0,
'SCRIPT_USING_TEMPLATE': 1,
'SCRIPT_EXISTS': -2,
'NOSCRIPT': 1,
'JOB_RESETTING': 0,
'INFILE_OVERWRITING': -3
}
OUT_VARTYPE = ['var']
OUT_FILETYPE = ['file', 'path']
OUT_DIRTYPE = ['dir', 'folder']
IN_VARTYPE = ['var']
IN_FILETYPE = ['file', 'path', 'dir', 'folder']
IN_FILESTYPE = ['files', 'paths', 'dirs', 'folders']
EX_GZIP = ['gzip', 'gz']
EX_COPY = ['copy', 'cp']
EX_MOVE = ['move', 'mv']
EX_SYMLINK = ['link', 'symlink', 'symbol']
def __init__ (self, tag = 'notag'):
"""
Constructor
@params:
`tag`: The tag of the process
"""
# computed props
self.__dict__['props'] = {}
# configs
self.__dict__['config'] = {}
pid = utils.varname(self.__class__.__name__, 2)
self.config['input'] = ''
self.config['output'] = {}
# where cache file and wdir located
self.config['tmpdir'] = os.path.abspath("./workdir")
self.config['forks'] = 1
self.config['cache'] = True # False or 'export' or 'export+' (do True if failed do export)
self.config['retcodes'] = [0]
self.config['echo'] = False
self.config['runner'] = 'local'
self.config['script'] = ''
self.config['depends'] = []
self.config['tag'] = tag
self.config['exportdir'] = ''
self.config['exporthow'] = 'move' # symlink, copy, gzip
self.config['exportow'] = True # overwrite
self.config['errorhow'] = "terminate" # retry, ignore
self.config['errorntry'] = 3
self.config['defaultSh'] = 'bash'
self.config['beforeCmd'] = ""
self.config['afterCmd'] = ""
self.config['workdir'] = ''
self.config['args'] = {}
self.config['channel'] = channel.create()
self.config['aggr'] = None
self.config['callback'] = None
self.config['brings'] = {}
# init props
# id of the process, actually it's the variable name of the process
self.props['id'] = pid
# the tag
self.props['tag'] = tag
# the cachefile, cache file will be in <tmpdir>/<cachefile>
#self.props['cachefile'] = 'cached.jobs'
# which processes this one depents on
self.props['depends'] = []
# the script
self.props['script'] = ""
self.props['input'] = ''
self.props['indata'] = {}
self.props['output'] = ''
self.props['depends'] = self.config['depends']
self.props['nexts'] = []
self.props['tmpdir'] = self.config['tmpdir']
self.props['forks'] = self.config['forks']
self.props['cache'] = self.config['cache']
self.props['cached'] = True
self.props['retcodes'] = self.config['retcodes']
self.props['beforeCmd'] = self.config['beforeCmd']
self.props['afterCmd'] = self.config['afterCmd']
self.props['echo'] = self.config['echo']
self.props['runner'] = self.config['runner']
self.props['exportdir'] = self.config['exportdir']
self.props['exporthow'] = self.config['exporthow']
self.props['exportow'] = self.config['exportow']
self.props['errorhow'] = self.config['errorhow']
self.props['errorntry'] = self.config['errorntry']
self.props['jobs'] = []
self.props['ncjobids'] = [] # non-cached job ids
self.props['defaultSh'] = self.config['defaultSh']
self.props['channel'] = channel.create()
self.props['length'] = 0
# remember which property is set, then it won't be overwritten by configurations
self.props['sets'] = []
self.props['procvars'] = {}
self.props['workdir'] = ''
# for unittest, in real case, the logger will be got from pyppl
self.props['logger'] = None
self.props['args'] = self.config['args']
self.props['aggr'] = self.config['aggr']
self.props['callback'] = self.config['callback']
self.props['brings'] = self.config['brings']
self.props['suffix'] = ''
self.props['lognline'] = {key:0 for key in proc.LOG_NLINE.keys()}
self.props['lognline']['prevlog'] = ''
def __getattr__ (self, name):
if not self.props.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Property "%s" of proc is not found' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
return self.props[name]
def __setattr__ (self, name, value):
if not self.config.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Cannot set property "%s" for proc instance' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
self.sets.append(name)
self.config[name] = value
if name == 'depends':
# remove me from nexts of my previous depends
for depend in self.depends:
if not self in depend.nexts:
continue
del depend.props['nexts'][depend.nexts.index(self)]
self.props['depends'] = []
depends = value
if not isinstance (value, list):
depends = [value]
for depend in depends:
if isinstance (depend, proc):
self.props['depends'].append (depend)
if self not in depend.nexts:
depend.nexts.append (self)
elif isinstance (depend, aggr):
for p in depend.ends:
self.props['depends'].append (p)
if self not in p.nexts:
p.nexts.append (self)
else:
self.props[name] = value
def log (self, msg, level="info", flag=None, key = ''):
"""
The log function with aggregation name, process id and tag integrated.
@params:
`msg`: The message to log
`level`: The log level
`flag`: The flag
`key`: The type of messages
"""
if flag is None:
flag = level
flag = flag.upper().rjust(7)
flag = "[%s]" % flag
title = self._name()
func = getattr(self.logger, level)
maxline = proc.LOG_NLINE[key]
prevlog = self.lognline['prevlog']
if key == prevlog:
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
else:
n_omit = self.lognline[prevlog] - abs(proc.LOG_NLINE[prevlog])
if n_omit > 0 and proc.LOG_NLINE[prevlog] < 0:
logname = 'logs' if n_omit > 1 else 'log'
maxinfo = ' (%s, max=%s)' % (prevlog, abs(proc.LOG_NLINE[prevlog])) if prevlog else ''
self.logger.debug ("[ DEBUG] %s: ... and %s %s omitted%s." % (title, n_omit, logname, maxinfo))
self.lognline[prevlog] = 0
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
self.lognline['prevlog'] = key
self.lognline[key] += 1
def copy (self, tag=None, newid=None):
"""
Copy a process
@params:
`newid`: The new id of the process, default: `None` (use the varname)
`tag`: The tag of the new process, default: `None` (used the old one)
@returns:
The new process
"""
newproc = proc (tag if tag is not None else self.tag)
config = {key:val for key, val in self.config.iteritems() if key not in ['tag', 'workdir', 'aggr']}
config['tag'] = newproc.tag
config['aggr'] = ''
config['workdir'] = ''
props = {key:val for key, val in self.props.iteritems() if key not in ['cached', 'procvars', 'ncjobids', 'sets', 'channel', 'jobs', 'depends', 'nexts', 'tag', 'workdir', 'id', 'args']}
props['cached'] = True
props['procvars'] = {}
props['channel'] = channel.create()
props['depends'] = []
props['nexts'] = []
props['jobs'] = []
props['ncjobids'] = []
props['sets'] = []
props['workdir'] = ''
props['args'] = pycopy.copy(self.props['args'])
props['id'] = utils.varname(r'\w+\.' + self.copy.__name__, 3) if newid is None else newid
newproc.__dict__['config'].update(config)
newproc.__dict__['props'].update(props)
return newproc
def _suffix (self):
"""
Calcuate a uid for the process according to the configuration
@returns:
The uid
"""
if self.suffix:
return self.suffix
config = { key:val for key, val in self.config.iteritems() if key not in ['workdir', 'forks', 'cache', 'retcodes', 'echo', 'runner', 'exportdir', 'exporthow', 'exportow', 'errorhow', 'errorntry'] or key.endswith ('Runner') }
config['id'] = self.id
config['tag'] = self.tag
if config.has_key ('callback'):
config['callback'] = utils.funcsig(config['callback'])
# proc is not picklable
if config.has_key('depends'):
depends = config['depends']
pickable_depends = []
if isinstance(depends, proc):
depends = [depends]
elif isinstance(depends, aggr):
depends = depends.procs
for depend in depends:
pickable_depends.append(depend.id + '.' + depend.tag)
config['depends'] = pickable_depends
# lambda not pickable
if config.has_key ('input') and isinstance(config['input'], dict):
config['input'] = pycopy.copy(config['input'])
for key, val in config['input'].iteritems():
config['input'][key] = utils.funcsig(val) if callable(val) else val
signature = pickle.dumps(str(config))
self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how?
# set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0:
raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props')
def _buildJobs (self):
rptjob = randint(0, self.length-1)
for i in range(self.length):
job = pjob (i, self)
self.jobs[i] = job
job.init ()
row = [x['data'] for x in job.output.values()]
self.channel.rbind (row)
self.jobs[rptjob].report()
def _readConfig (self, config):
"""
Read the configuration
@params:
`config`: The configuration
"""
conf = { key:val for key, val in config.iteritems() if key not in self.sets }
self.config.update (conf)
for key, val in conf.iteritems():
self.props[key] = val
def _isCached (self):
"""
Tell whether the jobs are cached
@returns:
True if all jobs are cached, otherwise False
"""
self.props['ncjobids'] = range(self.length)
if self.cache == False:
self.log ('Not cached, because proc.cache is False', 'debug')
return False
if self.cache == True:
for depend in self.depends:
if depend.cached: continue
self.log ('Not cached, my dependent "%s" is not cached.' % depend._name(), 'debug')
return False
trulyCachedJids = []
exptCachedJids = []
self.props['ncjobids'] = []
for i, job in enumerate(self.jobs):
job = self.jobs[i]
if job.isTrulyCached ():
trulyCachedJids.append(i)
elif job.isExptCached ():
exptCachedJids.append (i)
else:
self.props['ncjobids'].append (i)
self.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')
self.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')
if self.ncjobids:
if len(self.ncjobids) < self.length:
self.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')
self.log ('Jobs to be running: %s' % self.ncjobids, 'debug')
else:
self.log ('Not cached, none of the jobs are cached.', 'info')
return False
else:
self.log (self.workdir, 'info', 'CACHED')
return True
def _runCmd (self, key):
"""
Run the `beforeCmd` or `afterCmd`
@params:
`key`: "beforeCmd" or "afterCmd"
@returns:
The return code of the command
"""
if not self.props[key]:
return 0
cmd = utils.format(self.props[key], self.procvars)
self.log ('Running <%s>: %s' % (key, cmd), 'info')
p = Popen (cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)
if self.echo:
for line in iter(p.stdout.readline, ''):
self.logger.info ('[ STDOUT] ' + line.rstrip("\n"))
for line in iter(p.stderr.readline, ''):
self.logger.error ('[ STDERR] ' + line.rstrip("\n"))
return p.wait()
def | (self):
"""
Submit and run the jobs
"""
# submit jobs
def sworker (q):
"""
The worker to run jobs
"""
while True:
(run, i) = q.get()
sleep (i)
if run.isRunning():
self.log ("Job #%s is already running, skip submitting." % run.job.index, 'info')
else:
run.submit()
run.wait()
run.finish()
q.task_done()
runner = proc.RUNNERS[self.runner]
maxsubmit = self.forks
if hasattr(runner, 'maxsubmit'):
maxsubmit = runner.maxsubmit
interval = .1
if hasattr(runner, 'interval'):
interval = runner.interval
sq = Queue()
for i in self.ncjobids:
rjob = runner (self.jobs[i])
tm = int(i/maxsubmit) * interval
sq.put ((rjob, tm))
# submit jobs
nojobs2submit = min (self.forks, len(self.ncjobids))
for i in range (nojobs2submit):
t = threading.Thread(target = sworker, args = (sq, ))
t.daemon = True
t.start ()
sq.join()
@staticmethod
def registerRunner (runner):
"""
Register a runner
@params:
`runner`: The runner to be registered.
"""
runner_name = runner.__name__
if runner_name.startswith ('runner_'):
runner_name = runner_name[7:]
if not proc.RUNNERS.has_key(runner_name):
proc.RUNNERS[runner_name] = runner
proc.registerRunner (runner_local)
proc.registerRunner (runner_sge)
proc.registerRunner (runner_ssh)
| _runJobs | identifier_name |
proc.py | """
proc module for pyppl
"""
import copy as pycopy
import os
import pickle
import sys
import threading
from Queue import Queue
from random import randint
from subprocess import PIPE, Popen
from time import sleep, time
from . import utils
from .aggr import aggr
from .channel import channel
from .job import job as pjob
from ..runners import runner_local, runner_sge, runner_ssh
class proc (object):
"""
The proc class defining a process
@static variables:
`RUNNERS`: The regiested runners
`PROCS`: The "<id>.<tag>" initialized processes, used to detected whether there are two processes with the same id and tag.
`ALIAS`: The alias for the properties
`LOG_NLINE`: The limit of lines of logging information of same type of messages
@magic methods:
`__getattr__(self, name)`: get the value of a property in `self.props`
`__setattr__(self, name, value)`: set the value of a property in `self.config`
"""
RUNNERS = {}
PROCS = {}
ALIAS = {
'exdir': 'exportdir',
'exhow': 'exporthow',
'exow': 'exportow',
'errhow': 'errorhow',
'errntry': 'errorntry',
'lang': 'defaultSh',
'rc': 'retcodes',
'ppldir': 'tmpdir'
}
LOG_NLINE = {
'': 999,
'EXPORT_CACHE_OUTFILE_EXISTS': -3,
'EXPORT_CACHE_USING_SYMLINK': 3,
'BRINGFILE_OVERWRITING': 3,
'OUTNAME_USING_OUTTYPES': 1,
'OUTDIR_CREATED': 0,
'OUTDIR_CREATED_AFTER_RESET': 0,
'SCRIPT_USING_TEMPLATE': 1,
'SCRIPT_EXISTS': -2,
'NOSCRIPT': 1,
'JOB_RESETTING': 0,
'INFILE_OVERWRITING': -3
}
OUT_VARTYPE = ['var']
OUT_FILETYPE = ['file', 'path']
OUT_DIRTYPE = ['dir', 'folder']
IN_VARTYPE = ['var']
IN_FILETYPE = ['file', 'path', 'dir', 'folder']
IN_FILESTYPE = ['files', 'paths', 'dirs', 'folders']
EX_GZIP = ['gzip', 'gz']
EX_COPY = ['copy', 'cp']
EX_MOVE = ['move', 'mv']
EX_SYMLINK = ['link', 'symlink', 'symbol']
def __init__ (self, tag = 'notag'):
"""
Constructor
@params:
`tag`: The tag of the process
"""
# computed props
self.__dict__['props'] = {}
# configs
self.__dict__['config'] = {}
pid = utils.varname(self.__class__.__name__, 2)
self.config['input'] = ''
self.config['output'] = {}
# where cache file and wdir located
self.config['tmpdir'] = os.path.abspath("./workdir")
self.config['forks'] = 1
self.config['cache'] = True # False or 'export' or 'export+' (do True if failed do export)
self.config['retcodes'] = [0]
self.config['echo'] = False
self.config['runner'] = 'local'
self.config['script'] = ''
self.config['depends'] = []
self.config['tag'] = tag
self.config['exportdir'] = ''
self.config['exporthow'] = 'move' # symlink, copy, gzip
self.config['exportow'] = True # overwrite
self.config['errorhow'] = "terminate" # retry, ignore
self.config['errorntry'] = 3
self.config['defaultSh'] = 'bash'
self.config['beforeCmd'] = ""
self.config['afterCmd'] = ""
self.config['workdir'] = ''
self.config['args'] = {}
self.config['channel'] = channel.create()
self.config['aggr'] = None
self.config['callback'] = None
self.config['brings'] = {}
# init props
# id of the process, actually it's the variable name of the process
self.props['id'] = pid
# the tag
self.props['tag'] = tag
# the cachefile, cache file will be in <tmpdir>/<cachefile>
#self.props['cachefile'] = 'cached.jobs'
# which processes this one depents on
self.props['depends'] = []
# the script
self.props['script'] = ""
self.props['input'] = ''
self.props['indata'] = {}
self.props['output'] = ''
self.props['depends'] = self.config['depends']
self.props['nexts'] = []
self.props['tmpdir'] = self.config['tmpdir']
self.props['forks'] = self.config['forks']
self.props['cache'] = self.config['cache']
self.props['cached'] = True
self.props['retcodes'] = self.config['retcodes']
self.props['beforeCmd'] = self.config['beforeCmd']
self.props['afterCmd'] = self.config['afterCmd']
self.props['echo'] = self.config['echo']
self.props['runner'] = self.config['runner']
self.props['exportdir'] = self.config['exportdir']
self.props['exporthow'] = self.config['exporthow']
self.props['exportow'] = self.config['exportow']
self.props['errorhow'] = self.config['errorhow']
self.props['errorntry'] = self.config['errorntry']
self.props['jobs'] = []
self.props['ncjobids'] = [] # non-cached job ids
self.props['defaultSh'] = self.config['defaultSh']
self.props['channel'] = channel.create()
self.props['length'] = 0
# remember which property is set, then it won't be overwritten by configurations
self.props['sets'] = []
self.props['procvars'] = {}
self.props['workdir'] = ''
# for unittest, in real case, the logger will be got from pyppl
self.props['logger'] = None
self.props['args'] = self.config['args']
self.props['aggr'] = self.config['aggr']
self.props['callback'] = self.config['callback']
self.props['brings'] = self.config['brings']
self.props['suffix'] = ''
self.props['lognline'] = {key:0 for key in proc.LOG_NLINE.keys()}
self.props['lognline']['prevlog'] = ''
def __getattr__ (self, name):
if not self.props.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Property "%s" of proc is not found' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
return self.props[name]
def __setattr__ (self, name, value):
if not self.config.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Cannot set property "%s" for proc instance' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
self.sets.append(name)
self.config[name] = value
if name == 'depends':
# remove me from nexts of my previous depends
for depend in self.depends:
if not self in depend.nexts:
continue
del depend.props['nexts'][depend.nexts.index(self)]
self.props['depends'] = []
depends = value
if not isinstance (value, list):
depends = [value]
for depend in depends:
if isinstance (depend, proc):
self.props['depends'].append (depend)
if self not in depend.nexts:
depend.nexts.append (self)
elif isinstance (depend, aggr):
for p in depend.ends:
self.props['depends'].append (p)
if self not in p.nexts:
p.nexts.append (self)
else:
self.props[name] = value
def log (self, msg, level="info", flag=None, key = ''):
"""
The log function with aggregation name, process id and tag integrated.
@params:
`msg`: The message to log
`level`: The log level
`flag`: The flag
`key`: The type of messages
"""
if flag is None:
flag = level
flag = flag.upper().rjust(7)
flag = "[%s]" % flag
title = self._name()
func = getattr(self.logger, level)
maxline = proc.LOG_NLINE[key]
prevlog = self.lognline['prevlog']
if key == prevlog:
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
else:
n_omit = self.lognline[prevlog] - abs(proc.LOG_NLINE[prevlog])
if n_omit > 0 and proc.LOG_NLINE[prevlog] < 0:
logname = 'logs' if n_omit > 1 else 'log'
maxinfo = ' (%s, max=%s)' % (prevlog, abs(proc.LOG_NLINE[prevlog])) if prevlog else ''
self.logger.debug ("[ DEBUG] %s: ... and %s %s omitted%s." % (title, n_omit, logname, maxinfo))
self.lognline[prevlog] = 0
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
self.lognline['prevlog'] = key
self.lognline[key] += 1
def copy (self, tag=None, newid=None):
"""
Copy a process
@params:
`newid`: The new id of the process, default: `None` (use the varname)
`tag`: The tag of the new process, default: `None` (used the old one)
@returns:
The new process
"""
newproc = proc (tag if tag is not None else self.tag)
config = {key:val for key, val in self.config.iteritems() if key not in ['tag', 'workdir', 'aggr']}
config['tag'] = newproc.tag
config['aggr'] = ''
config['workdir'] = ''
props = {key:val for key, val in self.props.iteritems() if key not in ['cached', 'procvars', 'ncjobids', 'sets', 'channel', 'jobs', 'depends', 'nexts', 'tag', 'workdir', 'id', 'args']}
props['cached'] = True
props['procvars'] = {}
props['channel'] = channel.create()
props['depends'] = []
props['nexts'] = []
props['jobs'] = []
props['ncjobids'] = []
props['sets'] = []
props['workdir'] = ''
props['args'] = pycopy.copy(self.props['args'])
props['id'] = utils.varname(r'\w+\.' + self.copy.__name__, 3) if newid is None else newid
newproc.__dict__['config'].update(config)
newproc.__dict__['props'].update(props)
return newproc
def _suffix (self):
"""
Calcuate a uid for the process according to the configuration
@returns:
The uid
"""
if self.suffix:
return self.suffix
config = { key:val for key, val in self.config.iteritems() if key not in ['workdir', 'forks', 'cache', 'retcodes', 'echo', 'runner', 'exportdir', 'exporthow', 'exportow', 'errorhow', 'errorntry'] or key.endswith ('Runner') }
config['id'] = self.id
config['tag'] = self.tag
if config.has_key ('callback'):
config['callback'] = utils.funcsig(config['callback'])
# proc is not picklable
if config.has_key('depends'):
depends = config['depends']
pickable_depends = []
if isinstance(depends, proc):
depends = [depends]
elif isinstance(depends, aggr):
depends = depends.procs
for depend in depends:
pickable_depends.append(depend.id + '.' + depend.tag)
config['depends'] = pickable_depends
# lambda not pickable
if config.has_key ('input') and isinstance(config['input'], dict):
config['input'] = pycopy.copy(config['input'])
for key, val in config['input'].iteritems():
config['input'][key] = utils.funcsig(val) if callable(val) else val
signature = pickle.dumps(str(config))
self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how? | raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props')
def _buildJobs (self):
rptjob = randint(0, self.length-1)
for i in range(self.length):
job = pjob (i, self)
self.jobs[i] = job
job.init ()
row = [x['data'] for x in job.output.values()]
self.channel.rbind (row)
self.jobs[rptjob].report()
def _readConfig (self, config):
"""
Read the configuration
@params:
`config`: The configuration
"""
conf = { key:val for key, val in config.iteritems() if key not in self.sets }
self.config.update (conf)
for key, val in conf.iteritems():
self.props[key] = val
def _isCached (self):
"""
Tell whether the jobs are cached
@returns:
True if all jobs are cached, otherwise False
"""
self.props['ncjobids'] = range(self.length)
if self.cache == False:
self.log ('Not cached, because proc.cache is False', 'debug')
return False
if self.cache == True:
for depend in self.depends:
if depend.cached: continue
self.log ('Not cached, my dependent "%s" is not cached.' % depend._name(), 'debug')
return False
trulyCachedJids = []
exptCachedJids = []
self.props['ncjobids'] = []
for i, job in enumerate(self.jobs):
job = self.jobs[i]
if job.isTrulyCached ():
trulyCachedJids.append(i)
elif job.isExptCached ():
exptCachedJids.append (i)
else:
self.props['ncjobids'].append (i)
self.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')
self.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')
if self.ncjobids:
if len(self.ncjobids) < self.length:
self.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')
self.log ('Jobs to be running: %s' % self.ncjobids, 'debug')
else:
self.log ('Not cached, none of the jobs are cached.', 'info')
return False
else:
self.log (self.workdir, 'info', 'CACHED')
return True
def _runCmd (self, key):
"""
Run the `beforeCmd` or `afterCmd`
@params:
`key`: "beforeCmd" or "afterCmd"
@returns:
The return code of the command
"""
if not self.props[key]:
return 0
cmd = utils.format(self.props[key], self.procvars)
self.log ('Running <%s>: %s' % (key, cmd), 'info')
p = Popen (cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)
if self.echo:
for line in iter(p.stdout.readline, ''):
self.logger.info ('[ STDOUT] ' + line.rstrip("\n"))
for line in iter(p.stderr.readline, ''):
self.logger.error ('[ STDERR] ' + line.rstrip("\n"))
return p.wait()
def _runJobs (self):
"""
Submit and run the jobs
"""
# submit jobs
def sworker (q):
"""
The worker to run jobs
"""
while True:
(run, i) = q.get()
sleep (i)
if run.isRunning():
self.log ("Job #%s is already running, skip submitting." % run.job.index, 'info')
else:
run.submit()
run.wait()
run.finish()
q.task_done()
runner = proc.RUNNERS[self.runner]
maxsubmit = self.forks
if hasattr(runner, 'maxsubmit'):
maxsubmit = runner.maxsubmit
interval = .1
if hasattr(runner, 'interval'):
interval = runner.interval
sq = Queue()
for i in self.ncjobids:
rjob = runner (self.jobs[i])
tm = int(i/maxsubmit) * interval
sq.put ((rjob, tm))
# submit jobs
nojobs2submit = min (self.forks, len(self.ncjobids))
for i in range (nojobs2submit):
t = threading.Thread(target = sworker, args = (sq, ))
t.daemon = True
t.start ()
sq.join()
@staticmethod
def registerRunner (runner):
"""
Register a runner
@params:
`runner`: The runner to be registered.
"""
runner_name = runner.__name__
if runner_name.startswith ('runner_'):
runner_name = runner_name[7:]
if not proc.RUNNERS.has_key(runner_name):
proc.RUNNERS[runner_name] = runner
proc.registerRunner (runner_local)
proc.registerRunner (runner_sge)
proc.registerRunner (runner_ssh) | # set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0: | random_line_split |
proc.py | """
proc module for pyppl
"""
import copy as pycopy
import os
import pickle
import sys
import threading
from Queue import Queue
from random import randint
from subprocess import PIPE, Popen
from time import sleep, time
from . import utils
from .aggr import aggr
from .channel import channel
from .job import job as pjob
from ..runners import runner_local, runner_sge, runner_ssh
class proc (object):
"""
The proc class defining a process
@static variables:
`RUNNERS`: The regiested runners
`PROCS`: The "<id>.<tag>" initialized processes, used to detected whether there are two processes with the same id and tag.
`ALIAS`: The alias for the properties
`LOG_NLINE`: The limit of lines of logging information of same type of messages
@magic methods:
`__getattr__(self, name)`: get the value of a property in `self.props`
`__setattr__(self, name, value)`: set the value of a property in `self.config`
"""
RUNNERS = {}
PROCS = {}
ALIAS = {
'exdir': 'exportdir',
'exhow': 'exporthow',
'exow': 'exportow',
'errhow': 'errorhow',
'errntry': 'errorntry',
'lang': 'defaultSh',
'rc': 'retcodes',
'ppldir': 'tmpdir'
}
LOG_NLINE = {
'': 999,
'EXPORT_CACHE_OUTFILE_EXISTS': -3,
'EXPORT_CACHE_USING_SYMLINK': 3,
'BRINGFILE_OVERWRITING': 3,
'OUTNAME_USING_OUTTYPES': 1,
'OUTDIR_CREATED': 0,
'OUTDIR_CREATED_AFTER_RESET': 0,
'SCRIPT_USING_TEMPLATE': 1,
'SCRIPT_EXISTS': -2,
'NOSCRIPT': 1,
'JOB_RESETTING': 0,
'INFILE_OVERWRITING': -3
}
OUT_VARTYPE = ['var']
OUT_FILETYPE = ['file', 'path']
OUT_DIRTYPE = ['dir', 'folder']
IN_VARTYPE = ['var']
IN_FILETYPE = ['file', 'path', 'dir', 'folder']
IN_FILESTYPE = ['files', 'paths', 'dirs', 'folders']
EX_GZIP = ['gzip', 'gz']
EX_COPY = ['copy', 'cp']
EX_MOVE = ['move', 'mv']
EX_SYMLINK = ['link', 'symlink', 'symbol']
def __init__ (self, tag = 'notag'):
"""
Constructor
@params:
`tag`: The tag of the process
"""
# computed props
self.__dict__['props'] = {}
# configs
self.__dict__['config'] = {}
pid = utils.varname(self.__class__.__name__, 2)
self.config['input'] = ''
self.config['output'] = {}
# where cache file and wdir located
self.config['tmpdir'] = os.path.abspath("./workdir")
self.config['forks'] = 1
self.config['cache'] = True # False or 'export' or 'export+' (do True if failed do export)
self.config['retcodes'] = [0]
self.config['echo'] = False
self.config['runner'] = 'local'
self.config['script'] = ''
self.config['depends'] = []
self.config['tag'] = tag
self.config['exportdir'] = ''
self.config['exporthow'] = 'move' # symlink, copy, gzip
self.config['exportow'] = True # overwrite
self.config['errorhow'] = "terminate" # retry, ignore
self.config['errorntry'] = 3
self.config['defaultSh'] = 'bash'
self.config['beforeCmd'] = ""
self.config['afterCmd'] = ""
self.config['workdir'] = ''
self.config['args'] = {}
self.config['channel'] = channel.create()
self.config['aggr'] = None
self.config['callback'] = None
self.config['brings'] = {}
# init props
# id of the process, actually it's the variable name of the process
self.props['id'] = pid
# the tag
self.props['tag'] = tag
# the cachefile, cache file will be in <tmpdir>/<cachefile>
#self.props['cachefile'] = 'cached.jobs'
# which processes this one depents on
self.props['depends'] = []
# the script
self.props['script'] = ""
self.props['input'] = ''
self.props['indata'] = {}
self.props['output'] = ''
self.props['depends'] = self.config['depends']
self.props['nexts'] = []
self.props['tmpdir'] = self.config['tmpdir']
self.props['forks'] = self.config['forks']
self.props['cache'] = self.config['cache']
self.props['cached'] = True
self.props['retcodes'] = self.config['retcodes']
self.props['beforeCmd'] = self.config['beforeCmd']
self.props['afterCmd'] = self.config['afterCmd']
self.props['echo'] = self.config['echo']
self.props['runner'] = self.config['runner']
self.props['exportdir'] = self.config['exportdir']
self.props['exporthow'] = self.config['exporthow']
self.props['exportow'] = self.config['exportow']
self.props['errorhow'] = self.config['errorhow']
self.props['errorntry'] = self.config['errorntry']
self.props['jobs'] = []
self.props['ncjobids'] = [] # non-cached job ids
self.props['defaultSh'] = self.config['defaultSh']
self.props['channel'] = channel.create()
self.props['length'] = 0
# remember which property is set, then it won't be overwritten by configurations
self.props['sets'] = []
self.props['procvars'] = {}
self.props['workdir'] = ''
# for unittest, in real case, the logger will be got from pyppl
self.props['logger'] = None
self.props['args'] = self.config['args']
self.props['aggr'] = self.config['aggr']
self.props['callback'] = self.config['callback']
self.props['brings'] = self.config['brings']
self.props['suffix'] = ''
self.props['lognline'] = {key:0 for key in proc.LOG_NLINE.keys()}
self.props['lognline']['prevlog'] = ''
def __getattr__ (self, name):
if not self.props.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Property "%s" of proc is not found' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
return self.props[name]
def __setattr__ (self, name, value):
if not self.config.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Cannot set property "%s" for proc instance' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
self.sets.append(name)
self.config[name] = value
if name == 'depends':
# remove me from nexts of my previous depends
for depend in self.depends:
if not self in depend.nexts:
continue
del depend.props['nexts'][depend.nexts.index(self)]
self.props['depends'] = []
depends = value
if not isinstance (value, list):
depends = [value]
for depend in depends:
if isinstance (depend, proc):
self.props['depends'].append (depend)
if self not in depend.nexts:
depend.nexts.append (self)
elif isinstance (depend, aggr):
for p in depend.ends:
self.props['depends'].append (p)
if self not in p.nexts:
p.nexts.append (self)
else:
self.props[name] = value
def log (self, msg, level="info", flag=None, key = ''):
"""
The log function with aggregation name, process id and tag integrated.
@params:
`msg`: The message to log
`level`: The log level
`flag`: The flag
`key`: The type of messages
"""
if flag is None:
flag = level
flag = flag.upper().rjust(7)
flag = "[%s]" % flag
title = self._name()
func = getattr(self.logger, level)
maxline = proc.LOG_NLINE[key]
prevlog = self.lognline['prevlog']
if key == prevlog:
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
else:
n_omit = self.lognline[prevlog] - abs(proc.LOG_NLINE[prevlog])
if n_omit > 0 and proc.LOG_NLINE[prevlog] < 0:
logname = 'logs' if n_omit > 1 else 'log'
maxinfo = ' (%s, max=%s)' % (prevlog, abs(proc.LOG_NLINE[prevlog])) if prevlog else ''
self.logger.debug ("[ DEBUG] %s: ... and %s %s omitted%s." % (title, n_omit, logname, maxinfo))
self.lognline[prevlog] = 0
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
self.lognline['prevlog'] = key
self.lognline[key] += 1
def copy (self, tag=None, newid=None):
"""
Copy a process
@params:
`newid`: The new id of the process, default: `None` (use the varname)
`tag`: The tag of the new process, default: `None` (used the old one)
@returns:
The new process
"""
newproc = proc (tag if tag is not None else self.tag)
config = {key:val for key, val in self.config.iteritems() if key not in ['tag', 'workdir', 'aggr']}
config['tag'] = newproc.tag
config['aggr'] = ''
config['workdir'] = ''
props = {key:val for key, val in self.props.iteritems() if key not in ['cached', 'procvars', 'ncjobids', 'sets', 'channel', 'jobs', 'depends', 'nexts', 'tag', 'workdir', 'id', 'args']}
props['cached'] = True
props['procvars'] = {}
props['channel'] = channel.create()
props['depends'] = []
props['nexts'] = []
props['jobs'] = []
props['ncjobids'] = []
props['sets'] = []
props['workdir'] = ''
props['args'] = pycopy.copy(self.props['args'])
props['id'] = utils.varname(r'\w+\.' + self.copy.__name__, 3) if newid is None else newid
newproc.__dict__['config'].update(config)
newproc.__dict__['props'].update(props)
return newproc
def _suffix (self):
"""
Calcuate a uid for the process according to the configuration
@returns:
The uid
"""
if self.suffix:
return self.suffix
config = { key:val for key, val in self.config.iteritems() if key not in ['workdir', 'forks', 'cache', 'retcodes', 'echo', 'runner', 'exportdir', 'exporthow', 'exportow', 'errorhow', 'errorntry'] or key.endswith ('Runner') }
config['id'] = self.id
config['tag'] = self.tag
if config.has_key ('callback'):
config['callback'] = utils.funcsig(config['callback'])
# proc is not picklable
if config.has_key('depends'):
depends = config['depends']
pickable_depends = []
if isinstance(depends, proc):
depends = [depends]
elif isinstance(depends, aggr):
depends = depends.procs
for depend in depends:
pickable_depends.append(depend.id + '.' + depend.tag)
config['depends'] = pickable_depends
# lambda not pickable
if config.has_key ('input') and isinstance(config['input'], dict):
config['input'] = pycopy.copy(config['input'])
for key, val in config['input'].iteritems():
config['input'][key] = utils.funcsig(val) if callable(val) else val
signature = pickle.dumps(str(config))
self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how?
# set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0:
raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
|
def _buildJobs (self):
rptjob = randint(0, self.length-1)
for i in range(self.length):
job = pjob (i, self)
self.jobs[i] = job
job.init ()
row = [x['data'] for x in job.output.values()]
self.channel.rbind (row)
self.jobs[rptjob].report()
def _readConfig (self, config):
"""
Read the configuration
@params:
`config`: The configuration
"""
conf = { key:val for key, val in config.iteritems() if key not in self.sets }
self.config.update (conf)
for key, val in conf.iteritems():
self.props[key] = val
def _isCached (self):
"""
Tell whether the jobs are cached
@returns:
True if all jobs are cached, otherwise False
"""
self.props['ncjobids'] = range(self.length)
if self.cache == False:
self.log ('Not cached, because proc.cache is False', 'debug')
return False
if self.cache == True:
for depend in self.depends:
if depend.cached: continue
self.log ('Not cached, my dependent "%s" is not cached.' % depend._name(), 'debug')
return False
trulyCachedJids = []
exptCachedJids = []
self.props['ncjobids'] = []
for i, job in enumerate(self.jobs):
job = self.jobs[i]
if job.isTrulyCached ():
trulyCachedJids.append(i)
elif job.isExptCached ():
exptCachedJids.append (i)
else:
self.props['ncjobids'].append (i)
self.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')
self.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')
if self.ncjobids:
if len(self.ncjobids) < self.length:
self.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')
self.log ('Jobs to be running: %s' % self.ncjobids, 'debug')
else:
self.log ('Not cached, none of the jobs are cached.', 'info')
return False
else:
self.log (self.workdir, 'info', 'CACHED')
return True
def _runCmd (self, key):
"""
Run the `beforeCmd` or `afterCmd`
@params:
`key`: "beforeCmd" or "afterCmd"
@returns:
The return code of the command
"""
if not self.props[key]:
return 0
cmd = utils.format(self.props[key], self.procvars)
self.log ('Running <%s>: %s' % (key, cmd), 'info')
p = Popen (cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)
if self.echo:
for line in iter(p.stdout.readline, ''):
self.logger.info ('[ STDOUT] ' + line.rstrip("\n"))
for line in iter(p.stderr.readline, ''):
self.logger.error ('[ STDERR] ' + line.rstrip("\n"))
return p.wait()
def _runJobs (self):
"""
Submit and run the jobs
"""
# submit jobs
def sworker (q):
"""
The worker to run jobs
"""
while True:
(run, i) = q.get()
sleep (i)
if run.isRunning():
self.log ("Job #%s is already running, skip submitting." % run.job.index, 'info')
else:
run.submit()
run.wait()
run.finish()
q.task_done()
runner = proc.RUNNERS[self.runner]
maxsubmit = self.forks
if hasattr(runner, 'maxsubmit'):
maxsubmit = runner.maxsubmit
interval = .1
if hasattr(runner, 'interval'):
interval = runner.interval
sq = Queue()
for i in self.ncjobids:
rjob = runner (self.jobs[i])
tm = int(i/maxsubmit) * interval
sq.put ((rjob, tm))
# submit jobs
nojobs2submit = min (self.forks, len(self.ncjobids))
for i in range (nojobs2submit):
t = threading.Thread(target = sworker, args = (sq, ))
t.daemon = True
t.start ()
sq.join()
@staticmethod
def registerRunner (runner):
"""
Register a runner
@params:
`runner`: The runner to be registered.
"""
runner_name = runner.__name__
if runner_name.startswith ('runner_'):
runner_name = runner_name[7:]
if not proc.RUNNERS.has_key(runner_name):
proc.RUNNERS[runner_name] = runner
proc.registerRunner (runner_local)
proc.registerRunner (runner_sge)
proc.registerRunner (runner_ssh)
| val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props') | conditional_block |
madlibs.py | """\
The Mad Libs gaming bot.
"""
from __future__ import unicode_literals
import os
import random
import sys
import threading
from collections import defaultdict
from gizzylib import nlp
from itertools import repeat
from math import ceil, floor
from numpy import dot
from numpy.linalg import norm
bold = irc.style("bold")
underline = irc.style("underline")
def constant_factory(value):
"""Helper to construct constant value defaultdicts"""
return repeat(value).next
def gamethread(func):
"""Decorator for functions that are Timer game threads.
Thread removes self from registry of threads in module state."""
def new_func(*args, **kwargs):
state = args[1]
state['threads'].pop(threading.current_thread().ident, None)
func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def generate_madlib(state):
"""Generates a Mad Lib from a line out of the chosen corpus."""
line = None
while not line:
if not state['corpus']:
if state['options']['corpus'] == "None":
name = None
else:
name = state['options']['corpus']
if state['options']['corporaset'] == "None":
set = None
else:
set = state['options']['corporaset']
# will raise IOError if corpus invalid
if name:
state['corpus'] = nlp.corpus(set=set, name=name)
else:
state['corpus'] = nlp.random_corpus(set=set)
try:
line = nlp.random_line(state['corpus'])
except UnicodeDecodeError:
state['corpus'] == None
doc = nlp.nlp(line)
# truncate line if too long
maxlen = state['options']['linemaxlen']
if len(line) > maxlen:
line = ""
for span in doc.sents:
sent = ''.join(doc[i].string for i in range(
span.start, span.end
)).strip()
if len(line) + len(sent) > maxlen:
break
line += sent + " "
doc = nlp.nlp(line)
ddict = defaultdict(list)
for (index, token) in enumerate(doc):
if token.pos_ in ['ADJ', 'ADV', 'NOUN', 'VERB']:
ddict[token].append(index)
slist = sorted(ddict, key=lambda t: t.prob)
# build list of tokens+whitespace from parsed output
words = map(lambda x: x.string, list(doc))
# 2 subs + 1 more per word wrap line
limit = min(len(line) / 80 + 2, 6)
slots = []
for t in slist[:limit]:
for ctr in ddict[t]:
words[ctr] = underline + u" " + t.pos_ + " " +\
underline + t.whitespace_
slots.append(ctr)
slots.sort()
state['doc'] = doc
state['text'] = "".join(words)
state['textshape'] = slots
@gamethread
def warntime(msg, state):
msg.reply(bold + "*** {} second warning! ***".format(
state['options']['warntime']) + bold
)
@gamethread
def startround(msg, state):
"Start a round of Mad Libs. "
state['round'] += 0.25
state['votes'] = { k: -1 for k, v in state['votes'].items() }
state['entries'] = []
state['skippers'] = set()
try:
generate_madlib(state)
except IOError as e:
msg.reply("Unable to locate corpus. Aborting game.")
log.error("Corpus open failed: " + str(e))
killgame(state)
# give 10s more time for each add'l 80-char line
entrytime = int(state['options']['entrytime'] + \
(floor(len(state['text']) / 80) - 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def botentry(msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
|
def endgame(msg, state):
"End a game of Mad Libs."
slist = sorted(iter(state['scores'].items()),
key=lambda k: k[1],
reverse=True
)
winners = [slist[0]]
for player in slist[1:]:
if player[1] == slist[0][1]:
winners.append(player[0])
else:
break
msg.reply(bold + "======= GAME OVER! =======" + bold)
log.info(bold + "======= GAME OVER! =======" + bold)
msg.reply("Winner" + ("s" if len(winners) > 1 else "") + \
" with a score of " + slist[0][1] + ": " +\
bold + ", ".join(winners[:-1]) + \
(" and " if len(winners) > 1 else "") + \
winners[-1] + "!"
)
while slist:
if len(slist) >= 3:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
del slist[0:3]
elif len(slist) == 2:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
del slist[0:2]
else:
msg.reply("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
log.info("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
del slist[0]
# be safe, kill any lingering threads
killgame(state)
def killgame(state, reset=True):
if state['round'] == 0:
return
for t in state['threads'].itervalues():
try:
t.cancel()
except AttributeError:
continue
if reset:
resetstate(state)
log.info("Game killed.")
def resetstate(state):
state.update({
# Round number, 0=no game running
'round': 0,
# Round's game text and shape of removed words
'doc': None,
'text': '',
'textshape': [],
# Pending entries: [(nick, [words], votes), ...]
'entries': [],
# Pending votes: { nick: voteentry, ... } # 0-indexed
'votes': defaultdict(constant_factory(-1)),
# Scores: { nick: score, ... }
'scores': defaultdict(int),
# Threads on timers, keyed by thread ident
'threads': {},
# Absolute path to corpus file
'corpus': None,
# set of skippers
'skippers': set()
})
@command(["madlibs", "startgame"], require_owner=True)
def startgame(msg, state):
"Start a game of Mad Libs."
msg.reply("Welcome to super duper amazing Mad Libs game!")
msg.reply("Round 1/{0} starts in {1} seconds.".format(
state['options']['numrounds'],
state['options']['intertime']
))
state['round'] = 0.75
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
@command(["madlibs", "state"], require_owner=True)
def dumpstate(msg, state):
"Dump current module state"
log.debug(str(state))
msg.reply("State dumped to logfile.")
@command(["madlibs", "option"], require_owner=True)
def showoptions(msg, state):
"Show all configurable options"
msg.reply("Mad Libs options:")
for k, v in state['options'].items():
msg.reply(" {0}: {1}".format(k, v))
@command(["madlibs", "option", "<key>", "<value>"], require_owner=True)
def setoption(msg, state):
"Set option <key> to <value>"
key = msg.group("key")
value = msg.group("value")
if key in state['options']:
if isinstance(state['options'][key], bool):
if value.lower() in ['true', '1', 'yes', 't']:
state['options'][key] = True
value = True
else:
state['options'][key] = False
value = False
elif isinstance(state['options'][key], int):
state['options'][key] = int(value)
elif isinstance(state['options'][key], str):
state['options'][key] = value
# only Python 2 defines the unicode type
elif sys.version_info[0] == 2 and \
isinstance(state['options'][key], unicode):
state['options'][key] = unicode(value)
else:
# ???
return
msg.reply("Mad Libs option {0} set to {1}.".format(key, value))
@command(["<blah:madlibs (stop|kill)game>"], require_owner=True)
def stopgame(msg, state):
"Stop a game in progress."
if state['round'] != 0:
killgame(state)
msg.reply(bold + "Game halted by request." + bold)
@rule(".*")
def process(msg, state):
"Handle entry and vote submissions."
if msg.sender[0] == "#" or state['round'] == 0:
# ignore if no game running or if public utterance
return
if state['round'] % 1 == 0:
# Entry submission phase
processentry(msg, state)
elif state['round'] % 1 == 0.5:
# Voting phase
processvote(msg, state)
# intertime 0.75 state falls through with no action
def load():
statedict = {
# Default game options
'options': {
# game length and timing options
'numrounds': 8,
'entrytime': 90,
'votetime': 80,
'warntime': 15,
'intertime': 15,
# gameplay options
'hidesentence': False,
'botplays': True,
'corporaset': 'McGuffey',
'corpus': 'None',
'linemaxlen': 400,
'shame': True,
'matchpos': True,
'stopwords': ["cosby", "urkel", "huxtable", "arvid",
"imhotep", "shumway", "dodonga"]
}
}
resetstate(statedict)
return statedict
def unload(state):
killgame(state)
| "End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
log.info("======= Voting Results =======")
for num, ent in enumerate(state['entries']):
msg.reply("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
log.info("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
state['scores'][ent[0]] += ent[2]
if state['options']['shame'] and shame:
msg.reply("These users did not vote: " +
", ".join(shame)
)
log.debug("Scores so far: " + str(state['scores']))
if state['round'] > state['options']['numrounds']:
endgame(msg, state)
else:
msg.reply("Round {0}/{1} starts in {2} seconds.".format(
int(ceil(state['round'])),
state['options']['numrounds'],
state['options']['intertime']
))
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t | identifier_body |
madlibs.py | """\
The Mad Libs gaming bot.
"""
from __future__ import unicode_literals
import os
import random
import sys
import threading
from collections import defaultdict
from gizzylib import nlp
from itertools import repeat
from math import ceil, floor
from numpy import dot
from numpy.linalg import norm
bold = irc.style("bold")
underline = irc.style("underline")
def constant_factory(value):
"""Helper to construct constant value defaultdicts"""
return repeat(value).next
def gamethread(func):
"""Decorator for functions that are Timer game threads.
Thread removes self from registry of threads in module state."""
def new_func(*args, **kwargs):
state = args[1]
state['threads'].pop(threading.current_thread().ident, None)
func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def generate_madlib(state):
"""Generates a Mad Lib from a line out of the chosen corpus."""
line = None
while not line:
if not state['corpus']:
if state['options']['corpus'] == "None":
name = None
else:
name = state['options']['corpus']
if state['options']['corporaset'] == "None":
set = None
else:
set = state['options']['corporaset']
# will raise IOError if corpus invalid
if name:
state['corpus'] = nlp.corpus(set=set, name=name)
else:
state['corpus'] = nlp.random_corpus(set=set)
try:
line = nlp.random_line(state['corpus'])
except UnicodeDecodeError:
state['corpus'] == None
doc = nlp.nlp(line)
# truncate line if too long
maxlen = state['options']['linemaxlen']
if len(line) > maxlen:
line = ""
for span in doc.sents:
sent = ''.join(doc[i].string for i in range(
span.start, span.end
)).strip()
if len(line) + len(sent) > maxlen:
break
line += sent + " "
doc = nlp.nlp(line)
ddict = defaultdict(list)
for (index, token) in enumerate(doc):
if token.pos_ in ['ADJ', 'ADV', 'NOUN', 'VERB']:
ddict[token].append(index)
slist = sorted(ddict, key=lambda t: t.prob)
# build list of tokens+whitespace from parsed output
words = map(lambda x: x.string, list(doc))
# 2 subs + 1 more per word wrap line
limit = min(len(line) / 80 + 2, 6)
slots = []
for t in slist[:limit]:
for ctr in ddict[t]:
words[ctr] = underline + u" " + t.pos_ + " " +\
underline + t.whitespace_
slots.append(ctr)
slots.sort()
state['doc'] = doc
state['text'] = "".join(words)
state['textshape'] = slots
@gamethread
def warntime(msg, state):
msg.reply(bold + "*** {} second warning! ***".format(
state['options']['warntime']) + bold
)
@gamethread
def startround(msg, state):
"Start a round of Mad Libs. "
state['round'] += 0.25
state['votes'] = { k: -1 for k, v in state['votes'].items() }
state['entries'] = []
state['skippers'] = set()
try:
generate_madlib(state)
except IOError as e:
msg.reply("Unable to locate corpus. Aborting game.")
log.error("Corpus open failed: " + str(e))
killgame(state)
# give 10s more time for each add'l 80-char line
entrytime = int(state['options']['entrytime'] + \
(floor(len(state['text']) / 80) - 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def botentry(msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
"End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
log.info("======= Voting Results =======")
for num, ent in enumerate(state['entries']):
msg.reply("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
log.info("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
state['scores'][ent[0]] += ent[2]
if state['options']['shame'] and shame:
msg.reply("These users did not vote: " +
", ".join(shame)
)
log.debug("Scores so far: " + str(state['scores']))
if state['round'] > state['options']['numrounds']:
endgame(msg, state)
else:
msg.reply("Round {0}/{1} starts in {2} seconds.".format(
int(ceil(state['round'])),
state['options']['numrounds'],
state['options']['intertime']
))
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
def endgame(msg, state):
"End a game of Mad Libs."
slist = sorted(iter(state['scores'].items()),
key=lambda k: k[1],
reverse=True
)
| if player[1] == slist[0][1]:
winners.append(player[0])
else:
break
msg.reply(bold + "======= GAME OVER! =======" + bold)
log.info(bold + "======= GAME OVER! =======" + bold)
msg.reply("Winner" + ("s" if len(winners) > 1 else "") + \
" with a score of " + slist[0][1] + ": " +\
bold + ", ".join(winners[:-1]) + \
(" and " if len(winners) > 1 else "") + \
winners[-1] + "!"
)
while slist:
if len(slist) >= 3:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
del slist[0:3]
elif len(slist) == 2:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
del slist[0:2]
else:
msg.reply("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
log.info("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
del slist[0]
# be safe, kill any lingering threads
killgame(state)
def killgame(state, reset=True):
if state['round'] == 0:
return
for t in state['threads'].itervalues():
try:
t.cancel()
except AttributeError:
continue
if reset:
resetstate(state)
log.info("Game killed.")
def resetstate(state):
state.update({
# Round number, 0=no game running
'round': 0,
# Round's game text and shape of removed words
'doc': None,
'text': '',
'textshape': [],
# Pending entries: [(nick, [words], votes), ...]
'entries': [],
# Pending votes: { nick: voteentry, ... } # 0-indexed
'votes': defaultdict(constant_factory(-1)),
# Scores: { nick: score, ... }
'scores': defaultdict(int),
# Threads on timers, keyed by thread ident
'threads': {},
# Absolute path to corpus file
'corpus': None,
# set of skippers
'skippers': set()
})
@command(["madlibs", "startgame"], require_owner=True)
def startgame(msg, state):
"Start a game of Mad Libs."
msg.reply("Welcome to super duper amazing Mad Libs game!")
msg.reply("Round 1/{0} starts in {1} seconds.".format(
state['options']['numrounds'],
state['options']['intertime']
))
state['round'] = 0.75
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
@command(["madlibs", "state"], require_owner=True)
def dumpstate(msg, state):
"Dump current module state"
log.debug(str(state))
msg.reply("State dumped to logfile.")
@command(["madlibs", "option"], require_owner=True)
def showoptions(msg, state):
"Show all configurable options"
msg.reply("Mad Libs options:")
for k, v in state['options'].items():
msg.reply(" {0}: {1}".format(k, v))
@command(["madlibs", "option", "<key>", "<value>"], require_owner=True)
def setoption(msg, state):
"Set option <key> to <value>"
key = msg.group("key")
value = msg.group("value")
if key in state['options']:
if isinstance(state['options'][key], bool):
if value.lower() in ['true', '1', 'yes', 't']:
state['options'][key] = True
value = True
else:
state['options'][key] = False
value = False
elif isinstance(state['options'][key], int):
state['options'][key] = int(value)
elif isinstance(state['options'][key], str):
state['options'][key] = value
# only Python 2 defines the unicode type
elif sys.version_info[0] == 2 and \
isinstance(state['options'][key], unicode):
state['options'][key] = unicode(value)
else:
# ???
return
msg.reply("Mad Libs option {0} set to {1}.".format(key, value))
@command(["<blah:madlibs (stop|kill)game>"], require_owner=True)
def stopgame(msg, state):
"Stop a game in progress."
if state['round'] != 0:
killgame(state)
msg.reply(bold + "Game halted by request." + bold)
@rule(".*")
def process(msg, state):
"Handle entry and vote submissions."
if msg.sender[0] == "#" or state['round'] == 0:
# ignore if no game running or if public utterance
return
if state['round'] % 1 == 0:
# Entry submission phase
processentry(msg, state)
elif state['round'] % 1 == 0.5:
# Voting phase
processvote(msg, state)
# intertime 0.75 state falls through with no action
def load():
statedict = {
# Default game options
'options': {
# game length and timing options
'numrounds': 8,
'entrytime': 90,
'votetime': 80,
'warntime': 15,
'intertime': 15,
# gameplay options
'hidesentence': False,
'botplays': True,
'corporaset': 'McGuffey',
'corpus': 'None',
'linemaxlen': 400,
'shame': True,
'matchpos': True,
'stopwords': ["cosby", "urkel", "huxtable", "arvid",
"imhotep", "shumway", "dodonga"]
}
}
resetstate(statedict)
return statedict
def unload(state):
killgame(state) | winners = [slist[0]]
for player in slist[1:]: | random_line_split |
madlibs.py | """\
The Mad Libs gaming bot.
"""
from __future__ import unicode_literals
import os
import random
import sys
import threading
from collections import defaultdict
from gizzylib import nlp
from itertools import repeat
from math import ceil, floor
from numpy import dot
from numpy.linalg import norm
bold = irc.style("bold")
underline = irc.style("underline")
def constant_factory(value):
"""Helper to construct constant value defaultdicts"""
return repeat(value).next
def gamethread(func):
"""Decorator for functions that are Timer game threads.
Thread removes self from registry of threads in module state."""
def new_func(*args, **kwargs):
state = args[1]
state['threads'].pop(threading.current_thread().ident, None)
func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def generate_madlib(state):
"""Generates a Mad Lib from a line out of the chosen corpus."""
line = None
while not line:
if not state['corpus']:
if state['options']['corpus'] == "None":
name = None
else:
name = state['options']['corpus']
if state['options']['corporaset'] == "None":
set = None
else:
set = state['options']['corporaset']
# will raise IOError if corpus invalid
if name:
state['corpus'] = nlp.corpus(set=set, name=name)
else:
state['corpus'] = nlp.random_corpus(set=set)
try:
line = nlp.random_line(state['corpus'])
except UnicodeDecodeError:
state['corpus'] == None
doc = nlp.nlp(line)
# truncate line if too long
maxlen = state['options']['linemaxlen']
if len(line) > maxlen:
line = ""
for span in doc.sents:
sent = ''.join(doc[i].string for i in range(
span.start, span.end
)).strip()
if len(line) + len(sent) > maxlen:
break
line += sent + " "
doc = nlp.nlp(line)
ddict = defaultdict(list)
for (index, token) in enumerate(doc):
if token.pos_ in ['ADJ', 'ADV', 'NOUN', 'VERB']:
ddict[token].append(index)
slist = sorted(ddict, key=lambda t: t.prob)
# build list of tokens+whitespace from parsed output
words = map(lambda x: x.string, list(doc))
# 2 subs + 1 more per word wrap line
limit = min(len(line) / 80 + 2, 6)
slots = []
for t in slist[:limit]:
for ctr in ddict[t]:
words[ctr] = underline + u" " + t.pos_ + " " +\
underline + t.whitespace_
slots.append(ctr)
slots.sort()
state['doc'] = doc
state['text'] = "".join(words)
state['textshape'] = slots
@gamethread
def warntime(msg, state):
msg.reply(bold + "*** {} second warning! ***".format(
state['options']['warntime']) + bold
)
@gamethread
def startround(msg, state):
"Start a round of Mad Libs. "
state['round'] += 0.25
state['votes'] = { k: -1 for k, v in state['votes'].items() }
state['entries'] = []
state['skippers'] = set()
try:
generate_madlib(state)
except IOError as e:
msg.reply("Unable to locate corpus. Aborting game.")
log.error("Corpus open failed: " + str(e))
killgame(state)
# give 10s more time for each add'l 80-char line
entrytime = int(state['options']['entrytime'] + \
(floor(len(state['text']) / 80) - 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def | (msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
"End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
log.info("======= Voting Results =======")
for num, ent in enumerate(state['entries']):
msg.reply("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
log.info("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
state['scores'][ent[0]] += ent[2]
if state['options']['shame'] and shame:
msg.reply("These users did not vote: " +
", ".join(shame)
)
log.debug("Scores so far: " + str(state['scores']))
if state['round'] > state['options']['numrounds']:
endgame(msg, state)
else:
msg.reply("Round {0}/{1} starts in {2} seconds.".format(
int(ceil(state['round'])),
state['options']['numrounds'],
state['options']['intertime']
))
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
def endgame(msg, state):
"End a game of Mad Libs."
slist = sorted(iter(state['scores'].items()),
key=lambda k: k[1],
reverse=True
)
winners = [slist[0]]
for player in slist[1:]:
if player[1] == slist[0][1]:
winners.append(player[0])
else:
break
msg.reply(bold + "======= GAME OVER! =======" + bold)
log.info(bold + "======= GAME OVER! =======" + bold)
msg.reply("Winner" + ("s" if len(winners) > 1 else "") + \
" with a score of " + slist[0][1] + ": " +\
bold + ", ".join(winners[:-1]) + \
(" and " if len(winners) > 1 else "") + \
winners[-1] + "!"
)
while slist:
if len(slist) >= 3:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
del slist[0:3]
elif len(slist) == 2:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
del slist[0:2]
else:
msg.reply("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
log.info("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
del slist[0]
# be safe, kill any lingering threads
killgame(state)
def killgame(state, reset=True):
if state['round'] == 0:
return
for t in state['threads'].itervalues():
try:
t.cancel()
except AttributeError:
continue
if reset:
resetstate(state)
log.info("Game killed.")
def resetstate(state):
state.update({
# Round number, 0=no game running
'round': 0,
# Round's game text and shape of removed words
'doc': None,
'text': '',
'textshape': [],
# Pending entries: [(nick, [words], votes), ...]
'entries': [],
# Pending votes: { nick: voteentry, ... } # 0-indexed
'votes': defaultdict(constant_factory(-1)),
# Scores: { nick: score, ... }
'scores': defaultdict(int),
# Threads on timers, keyed by thread ident
'threads': {},
# Absolute path to corpus file
'corpus': None,
# set of skippers
'skippers': set()
})
@command(["madlibs", "startgame"], require_owner=True)
def startgame(msg, state):
"Start a game of Mad Libs."
msg.reply("Welcome to super duper amazing Mad Libs game!")
msg.reply("Round 1/{0} starts in {1} seconds.".format(
state['options']['numrounds'],
state['options']['intertime']
))
state['round'] = 0.75
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
@command(["madlibs", "state"], require_owner=True)
def dumpstate(msg, state):
"Dump current module state"
log.debug(str(state))
msg.reply("State dumped to logfile.")
@command(["madlibs", "option"], require_owner=True)
def showoptions(msg, state):
"Show all configurable options"
msg.reply("Mad Libs options:")
for k, v in state['options'].items():
msg.reply(" {0}: {1}".format(k, v))
@command(["madlibs", "option", "<key>", "<value>"], require_owner=True)
def setoption(msg, state):
"Set option <key> to <value>"
key = msg.group("key")
value = msg.group("value")
if key in state['options']:
if isinstance(state['options'][key], bool):
if value.lower() in ['true', '1', 'yes', 't']:
state['options'][key] = True
value = True
else:
state['options'][key] = False
value = False
elif isinstance(state['options'][key], int):
state['options'][key] = int(value)
elif isinstance(state['options'][key], str):
state['options'][key] = value
# only Python 2 defines the unicode type
elif sys.version_info[0] == 2 and \
isinstance(state['options'][key], unicode):
state['options'][key] = unicode(value)
else:
# ???
return
msg.reply("Mad Libs option {0} set to {1}.".format(key, value))
@command(["<blah:madlibs (stop|kill)game>"], require_owner=True)
def stopgame(msg, state):
"Stop a game in progress."
if state['round'] != 0:
killgame(state)
msg.reply(bold + "Game halted by request." + bold)
@rule(".*")
def process(msg, state):
"Handle entry and vote submissions."
if msg.sender[0] == "#" or state['round'] == 0:
# ignore if no game running or if public utterance
return
if state['round'] % 1 == 0:
# Entry submission phase
processentry(msg, state)
elif state['round'] % 1 == 0.5:
# Voting phase
processvote(msg, state)
# intertime 0.75 state falls through with no action
def load():
statedict = {
# Default game options
'options': {
# game length and timing options
'numrounds': 8,
'entrytime': 90,
'votetime': 80,
'warntime': 15,
'intertime': 15,
# gameplay options
'hidesentence': False,
'botplays': True,
'corporaset': 'McGuffey',
'corpus': 'None',
'linemaxlen': 400,
'shame': True,
'matchpos': True,
'stopwords': ["cosby", "urkel", "huxtable", "arvid",
"imhotep", "shumway", "dodonga"]
}
}
resetstate(statedict)
return statedict
def unload(state):
killgame(state)
| botentry | identifier_name |
madlibs.py | """\
The Mad Libs gaming bot.
"""
from __future__ import unicode_literals
import os
import random
import sys
import threading
from collections import defaultdict
from gizzylib import nlp
from itertools import repeat
from math import ceil, floor
from numpy import dot
from numpy.linalg import norm
bold = irc.style("bold")
underline = irc.style("underline")
def constant_factory(value):
"""Helper to construct constant value defaultdicts"""
return repeat(value).next
def gamethread(func):
"""Decorator for functions that are Timer game threads.
Thread removes self from registry of threads in module state."""
def new_func(*args, **kwargs):
state = args[1]
state['threads'].pop(threading.current_thread().ident, None)
func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def generate_madlib(state):
"""Generates a Mad Lib from a line out of the chosen corpus."""
line = None
while not line:
if not state['corpus']:
if state['options']['corpus'] == "None":
|
else:
name = state['options']['corpus']
if state['options']['corporaset'] == "None":
set = None
else:
set = state['options']['corporaset']
# will raise IOError if corpus invalid
if name:
state['corpus'] = nlp.corpus(set=set, name=name)
else:
state['corpus'] = nlp.random_corpus(set=set)
try:
line = nlp.random_line(state['corpus'])
except UnicodeDecodeError:
state['corpus'] == None
doc = nlp.nlp(line)
# truncate line if too long
maxlen = state['options']['linemaxlen']
if len(line) > maxlen:
line = ""
for span in doc.sents:
sent = ''.join(doc[i].string for i in range(
span.start, span.end
)).strip()
if len(line) + len(sent) > maxlen:
break
line += sent + " "
doc = nlp.nlp(line)
ddict = defaultdict(list)
for (index, token) in enumerate(doc):
if token.pos_ in ['ADJ', 'ADV', 'NOUN', 'VERB']:
ddict[token].append(index)
slist = sorted(ddict, key=lambda t: t.prob)
# build list of tokens+whitespace from parsed output
words = map(lambda x: x.string, list(doc))
# 2 subs + 1 more per word wrap line
limit = min(len(line) / 80 + 2, 6)
slots = []
for t in slist[:limit]:
for ctr in ddict[t]:
words[ctr] = underline + u" " + t.pos_ + " " +\
underline + t.whitespace_
slots.append(ctr)
slots.sort()
state['doc'] = doc
state['text'] = "".join(words)
state['textshape'] = slots
@gamethread
def warntime(msg, state):
msg.reply(bold + "*** {} second warning! ***".format(
state['options']['warntime']) + bold
)
@gamethread
def startround(msg, state):
"Start a round of Mad Libs. "
state['round'] += 0.25
state['votes'] = { k: -1 for k, v in state['votes'].items() }
state['entries'] = []
state['skippers'] = set()
try:
generate_madlib(state)
except IOError as e:
msg.reply("Unable to locate corpus. Aborting game.")
log.error("Corpus open failed: " + str(e))
killgame(state)
# give 10s more time for each add'l 80-char line
entrytime = int(state['options']['entrytime'] + \
(floor(len(state['text']) / 80) - 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def botentry(msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
"End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
log.info("======= Voting Results =======")
for num, ent in enumerate(state['entries']):
msg.reply("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
log.info("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
state['scores'][ent[0]] += ent[2]
if state['options']['shame'] and shame:
msg.reply("These users did not vote: " +
", ".join(shame)
)
log.debug("Scores so far: " + str(state['scores']))
if state['round'] > state['options']['numrounds']:
endgame(msg, state)
else:
msg.reply("Round {0}/{1} starts in {2} seconds.".format(
int(ceil(state['round'])),
state['options']['numrounds'],
state['options']['intertime']
))
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
def endgame(msg, state):
"End a game of Mad Libs."
slist = sorted(iter(state['scores'].items()),
key=lambda k: k[1],
reverse=True
)
winners = [slist[0]]
for player in slist[1:]:
if player[1] == slist[0][1]:
winners.append(player[0])
else:
break
msg.reply(bold + "======= GAME OVER! =======" + bold)
log.info(bold + "======= GAME OVER! =======" + bold)
msg.reply("Winner" + ("s" if len(winners) > 1 else "") + \
" with a score of " + slist[0][1] + ": " +\
bold + ", ".join(winners[:-1]) + \
(" and " if len(winners) > 1 else "") + \
winners[-1] + "!"
)
while slist:
if len(slist) >= 3:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1],
slist[2][0], slist[2][1]
))
del slist[0:3]
elif len(slist) == 2:
msg.reply(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
log.info(
"{:>15}: {:>2} {:>15}: {:>2}".format(
slist[0][0], slist[0][1],
slist[1][0], slist[1][1]
))
del slist[0:2]
else:
msg.reply("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
log.info("{:>15}: {:>2}".format(slist[0][0], slist[0][1]))
del slist[0]
# be safe, kill any lingering threads
killgame(state)
def killgame(state, reset=True):
if state['round'] == 0:
return
for t in state['threads'].itervalues():
try:
t.cancel()
except AttributeError:
continue
if reset:
resetstate(state)
log.info("Game killed.")
def resetstate(state):
state.update({
# Round number, 0=no game running
'round': 0,
# Round's game text and shape of removed words
'doc': None,
'text': '',
'textshape': [],
# Pending entries: [(nick, [words], votes), ...]
'entries': [],
# Pending votes: { nick: voteentry, ... } # 0-indexed
'votes': defaultdict(constant_factory(-1)),
# Scores: { nick: score, ... }
'scores': defaultdict(int),
# Threads on timers, keyed by thread ident
'threads': {},
# Absolute path to corpus file
'corpus': None,
# set of skippers
'skippers': set()
})
@command(["madlibs", "startgame"], require_owner=True)
def startgame(msg, state):
"Start a game of Mad Libs."
msg.reply("Welcome to super duper amazing Mad Libs game!")
msg.reply("Round 1/{0} starts in {1} seconds.".format(
state['options']['numrounds'],
state['options']['intertime']
))
state['round'] = 0.75
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
@command(["madlibs", "state"], require_owner=True)
def dumpstate(msg, state):
"Dump current module state"
log.debug(str(state))
msg.reply("State dumped to logfile.")
@command(["madlibs", "option"], require_owner=True)
def showoptions(msg, state):
"Show all configurable options"
msg.reply("Mad Libs options:")
for k, v in state['options'].items():
msg.reply(" {0}: {1}".format(k, v))
@command(["madlibs", "option", "<key>", "<value>"], require_owner=True)
def setoption(msg, state):
"Set option <key> to <value>"
key = msg.group("key")
value = msg.group("value")
if key in state['options']:
if isinstance(state['options'][key], bool):
if value.lower() in ['true', '1', 'yes', 't']:
state['options'][key] = True
value = True
else:
state['options'][key] = False
value = False
elif isinstance(state['options'][key], int):
state['options'][key] = int(value)
elif isinstance(state['options'][key], str):
state['options'][key] = value
# only Python 2 defines the unicode type
elif sys.version_info[0] == 2 and \
isinstance(state['options'][key], unicode):
state['options'][key] = unicode(value)
else:
# ???
return
msg.reply("Mad Libs option {0} set to {1}.".format(key, value))
@command(["<blah:madlibs (stop|kill)game>"], require_owner=True)
def stopgame(msg, state):
"Stop a game in progress."
if state['round'] != 0:
killgame(state)
msg.reply(bold + "Game halted by request." + bold)
@rule(".*")
def process(msg, state):
"Handle entry and vote submissions."
if msg.sender[0] == "#" or state['round'] == 0:
# ignore if no game running or if public utterance
return
if state['round'] % 1 == 0:
# Entry submission phase
processentry(msg, state)
elif state['round'] % 1 == 0.5:
# Voting phase
processvote(msg, state)
# intertime 0.75 state falls through with no action
def load():
statedict = {
# Default game options
'options': {
# game length and timing options
'numrounds': 8,
'entrytime': 90,
'votetime': 80,
'warntime': 15,
'intertime': 15,
# gameplay options
'hidesentence': False,
'botplays': True,
'corporaset': 'McGuffey',
'corpus': 'None',
'linemaxlen': 400,
'shame': True,
'matchpos': True,
'stopwords': ["cosby", "urkel", "huxtable", "arvid",
"imhotep", "shumway", "dodonga"]
}
}
resetstate(statedict)
return statedict
def unload(state):
killgame(state)
| name = None | conditional_block |
gru_model.py | # -*- coding: utf-8 -*-
"""Intent GRU Model 90+.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zH4GNqFS_Z4PxGEueU5Y6g_qOevCl-6d
<a href="https://colab.research.google.com/github/Dark-Sied/Intent_Classification/blob/master/Intent_classification_final.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 1.Import Libraries
"""
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
import nltk
import re
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Bidirectional, Embedding, Dropout
from keras.callbacks import ModelCheckpoint
from google.colab import files
"""# 2. Upload dataset"""
files.upload()
def load_dataset(filename):
df = pd.read_csv(filename, encoding = "latin1", names = ["Sentence", "Intent"])
print(df.head())
intent = df["Intent"]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
intent, unique_intent, sentences = load_dataset("Dataset.csv")
intent
sentences
print(sentences[:10])
nltk.download("stopwords")
nltk.download("punkt")
#define stemmer
stemmer = LancasterStemmer()
"""# 3. Data Cleaning"""
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = word_tokenize(clean)
#stemming
words.append([i.lower() for i in w])
return words
cleaned_words = cleaning(sentences)
print(len(cleaned_words))
print(cleaned_words[:2])
"""### 3.1 Keras Tokenizer"""
def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def | (words):
return(len(max(words, key = len)))
word_tokenizer = create_tokenizer(cleaned_words)
vocab_size = len(word_tokenizer.word_index) + 1
max_length = max_length(cleaned_words)
print("Vocab Size = %d and Maximum length = %d" % (vocab_size, max_length))
"""### 3.2 One Hot Encoding for Model Fed"""
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
encoded_doc = encoding_doc(word_tokenizer, cleaned_words)
def padding_doc(encoded_doc, max_length):
return(pad_sequences(encoded_doc, maxlen = max_length, padding = "post"))
padded_doc = padding_doc(encoded_doc, max_length)
padded_doc[:5]
print("Shape of padded docs = ",padded_doc.shape)
#tokenizer with filter changed
output_tokenizer = create_tokenizer(unique_intent, filters = '!"#$%&()*+,-/:;<=>?@[\]^`{|}~')
output_tokenizer.word_index
encoded_output = encoding_doc(output_tokenizer, intent)
encoded_output = np.array(encoded_output).reshape(len(encoded_output), 1)
encoded_output.shape
def one_hot(encode):
o = OneHotEncoder(sparse = False)
return(o.fit_transform(encode))
output_one_hot = one_hot(encoded_output)
output_one_hot.shape
"""# 4. Train and Validation Split"""
from sklearn.model_selection import train_test_split
train_X, val_X, train_Y, val_Y = train_test_split(padded_doc, output_one_hot, shuffle = True, test_size = 0.2)
print("Shape of train_X = %s and train_Y = %s" % (train_X.shape, train_Y.shape))
print("Shape of val_X = %s and val_Y = %s" % (val_X.shape, val_Y.shape))
"""# 5. GRU Modeling"""
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Bidirectional(LSTM(64)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
model = create_model(vocab_size, max_length)
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.summary()
"""# 6. Training"""
filename = 'model.h5'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
hist = model.fit(train_X, train_Y, epochs = 100, batch_size = 32, validation_data = (val_X, val_Y), callbacks = [checkpoint])
loss = pd.DataFrame({'loss': model.history.history['accuracy'], 'auc': model.history.history['val_accuracy'] })
loss.plot()
model = load_model("model.h5")
def predictions(text):
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
test_word = word_tokenize(clean)
test_word = [w.lower() for w in test_word]
test_ls = word_tokenizer.texts_to_sequences(test_word)
print(test_word)
#Check for unknown words
if [] in test_ls:
test_ls = list(filter(None, test_ls))
test_ls = np.array(test_ls).reshape(1, len(test_ls))
x = padding_doc(test_ls, max_length)
pred = model.predict_proba(x)
return pred
def get_final_output(pred, classes):
predictions = pred[0]
classes = np.array(classes)
ids = np.argsort(-predictions)
classes = classes[ids]
predictions = -np.sort(-predictions)
for i in range(pred.shape[1]):
print("%s has confidence = %s" % (classes[i], (predictions[i])))
"""# 7. Testing"""
text = "Can you help me?"
pred = predictions(text)
get_final_output(pred, unique_intent)
"""# 8. Save/Load Pickle"""
# from sklearn.externals import joblib
# joblib.dump(model, 'modelnlp.pkl')
# nlp_model = open('modelnlp.pkl','rb')
# nlp = joblib.load(nlp_model)
# !pip install git+https://github.com/TinkerMob/keras_albert_model.git
# from keras_albert_model import build_albert
"""# 9. Experiment with Monkeyzlearn API"""
from monkeylearn import MonkeyLearn
ml = MonkeyLearn('e7e230d51a8668a72eea86c29559bef04bd6c8fb')
data = ["Hi Feco, looks promising, I would like to schedule a call tomorrow and see the demo. What times do you have available? Thanks, Ryan."]
model_id = 'cl_v9GTn7zi'
result = ml.classifiers.classify(model_id, data)
print(result.body)
# !pip install monkeylearn
"""# 10. BERT Model"""
!pip install bert-for-tf2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import os
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Embedding, Activation, LSTM, SimpleRNN, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
from tqdm import tqdm
from tensorflow.keras import backend as K
import tensorflow as tf
import tensorflow_hub as hub
print("TensorFlow Version:",tf.__version__)
print("Hub version: ",hub.__version__)
# Params for bert model
class BertModel(object):
def __init__(self):
self.max_len = 128
bert_path = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1"
FullTokenizer=bert.bert_tokenization.FullTokenizer
self.bert_module = hub.KerasLayer(bert_path,trainable=True)
self.vocab_file = self.bert_module.resolved_object.vocab_file.asset_path.numpy()
self.do_lower_case = self.bert_module.resolved_object.do_lower_case.numpy()
self.tokenizer = FullTokenizer(self.vocab_file,self.do_lower_case)
def get_masks(self,tokens, max_seq_length):
return [1]*len(tokens) + [0] * (max_seq_length - len(tokens))
def get_segments(self,tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return segments + [0] * (max_seq_length - len(tokens))
def get_ids(self,tokens, tokenizer, max_seq_length):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens,)
input_ids = token_ids + [0] * (max_seq_length-len(token_ids))
return input_ids
def create_single_input(self,sentence,maxlen):
stokens = self.tokenizer.tokenize(sentence)
stokens = stokens[:maxlen]
stokens = ["[CLS]"] + stokens + ["[SEP]"]
ids = self.get_ids(stokens, self.tokenizer, self.max_len)
masks = self.get_masks(stokens, self.max_len)
segments = self.get_segments(stokens, self.max_len)
return ids,masks,segments
def create_input_array(self,sentences):
input_ids, input_masks, input_segments = [], [], []
for sentence in tqdm(sentences,position=0, leave=True):
ids,masks,segments=self.create_single_input(sentence,self.max_len-2)
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
tensor = [np.asarray(input_ids, dtype=np.int32),
np.asarray(input_masks, dtype=np.int32),
np.asarray(input_segments, dtype=np.int32)]
return tensor
class PreprocessingBertData():
def prepare_data_x(self,train_sentences):
x = bert_model_obj.create_input_array(train_sentences)
return x
def prepare_data_y(self,train_labels):
y = list()
for item in train_labels:
label = item
y.append(label)
y = np.array(y)
return y
bert_model_obj = BertModel()
train_sentences = sentences
output_one_hot.shape
train_labels = output_one_hot.tolist()
output_one_hot
preprocess_bert_data_obj = PreprocessingBertData()
x = preprocess_bert_data_obj.prepare_data_x(train_sentences)
y = preprocess_bert_data_obj.prepare_data_y(train_labels)
train_input_ids, train_input_masks, train_segment_ids = x
train_labels = y
class DesignModel():
def __init__(self):
self.model = None
self.train_data = [train_input_ids, train_input_masks, train_segment_ids]
self.train_labels = train_labels
def bert_model(self,max_seq_length):
in_id = Input(shape=(max_seq_length,), dtype=tf.int32, name="input_ids")
in_mask = Input(shape=(max_seq_length,), dtype=tf.int32, name="input_masks")
in_segment = Input(shape=(max_seq_length,), dtype=tf.int32, name="segment_ids")
bert_inputs = [in_id, in_mask, in_segment]
pooled_output, sequence_output = bert_model_obj.bert_module(bert_inputs)
x = tf.keras.layers.GlobalAveragePooling1D()(sequence_output)
x = tf.keras.layers.Dropout(0.2)(x)
out = tf.keras.layers.Dense(21, activation="softmax", name="dense_output")(x)
self.model = tf.keras.models.Model(inputs=bert_inputs, outputs=out)
self.model.compile(optimizer=tf.keras.optimizers.Adam(1e-5),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
self.model.summary()
def model_train(self,batch_size,num_epoch):
print("Fitting to model")
self.model.fit(self.train_data,self.train_labels,epochs=num_epoch,batch_size=batch_size,validation_split=0.2,shuffle=True)
print("Model Training complete.")
def save_model(self,model,model_name):
self.model.save(model_name+".h5")
print("Model saved to Model folder.")
model_obj = DesignModel()
model_obj.bert_model(bert_model_obj.max_len)
model_obj.bert_model(21)
# model_obj.model_train(1113, 1)
| max_length | identifier_name |
gru_model.py | # -*- coding: utf-8 -*-
"""Intent GRU Model 90+.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zH4GNqFS_Z4PxGEueU5Y6g_qOevCl-6d
<a href="https://colab.research.google.com/github/Dark-Sied/Intent_Classification/blob/master/Intent_classification_final.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 1.Import Libraries
"""
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.lancaster import LancasterStemmer
import nltk
import re
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Bidirectional, Embedding, Dropout
from keras.callbacks import ModelCheckpoint
from google.colab import files
"""# 2. Upload dataset"""
files.upload()
def load_dataset(filename):
df = pd.read_csv(filename, encoding = "latin1", names = ["Sentence", "Intent"])
print(df.head())
intent = df["Intent"]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
intent, unique_intent, sentences = load_dataset("Dataset.csv")
intent
sentences
print(sentences[:10])
nltk.download("stopwords")
nltk.download("punkt")
#define stemmer
stemmer = LancasterStemmer()
"""# 3. Data Cleaning"""
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = word_tokenize(clean)
#stemming
words.append([i.lower() for i in w])
return words
cleaned_words = cleaning(sentences)
print(len(cleaned_words))
print(cleaned_words[:2])
"""### 3.1 Keras Tokenizer"""
def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
word_tokenizer = create_tokenizer(cleaned_words)
vocab_size = len(word_tokenizer.word_index) + 1
max_length = max_length(cleaned_words)
print("Vocab Size = %d and Maximum length = %d" % (vocab_size, max_length))
"""### 3.2 One Hot Encoding for Model Fed"""
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
encoded_doc = encoding_doc(word_tokenizer, cleaned_words)
def padding_doc(encoded_doc, max_length):
return(pad_sequences(encoded_doc, maxlen = max_length, padding = "post"))
padded_doc = padding_doc(encoded_doc, max_length)
padded_doc[:5]
print("Shape of padded docs = ",padded_doc.shape)
#tokenizer with filter changed
output_tokenizer = create_tokenizer(unique_intent, filters = '!"#$%&()*+,-/:;<=>?@[\]^`{|}~')
output_tokenizer.word_index
encoded_output = encoding_doc(output_tokenizer, intent)
encoded_output = np.array(encoded_output).reshape(len(encoded_output), 1)
encoded_output.shape
def one_hot(encode):
o = OneHotEncoder(sparse = False)
return(o.fit_transform(encode))
output_one_hot = one_hot(encoded_output)
output_one_hot.shape
"""# 4. Train and Validation Split"""
from sklearn.model_selection import train_test_split
train_X, val_X, train_Y, val_Y = train_test_split(padded_doc, output_one_hot, shuffle = True, test_size = 0.2)
print("Shape of train_X = %s and train_Y = %s" % (train_X.shape, train_Y.shape))
print("Shape of val_X = %s and val_Y = %s" % (val_X.shape, val_Y.shape))
"""# 5. GRU Modeling"""
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Bidirectional(LSTM(64)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
model = create_model(vocab_size, max_length)
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.summary()
"""# 6. Training"""
filename = 'model.h5'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
hist = model.fit(train_X, train_Y, epochs = 100, batch_size = 32, validation_data = (val_X, val_Y), callbacks = [checkpoint])
loss = pd.DataFrame({'loss': model.history.history['accuracy'], 'auc': model.history.history['val_accuracy'] })
loss.plot()
model = load_model("model.h5")
def predictions(text):
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
test_word = word_tokenize(clean)
test_word = [w.lower() for w in test_word]
test_ls = word_tokenizer.texts_to_sequences(test_word)
print(test_word)
#Check for unknown words
if [] in test_ls:
test_ls = list(filter(None, test_ls))
test_ls = np.array(test_ls).reshape(1, len(test_ls))
x = padding_doc(test_ls, max_length)
pred = model.predict_proba(x)
return pred
def get_final_output(pred, classes):
predictions = pred[0]
classes = np.array(classes)
ids = np.argsort(-predictions)
classes = classes[ids]
predictions = -np.sort(-predictions)
for i in range(pred.shape[1]):
|
"""# 7. Testing"""
text = "Can you help me?"
pred = predictions(text)
get_final_output(pred, unique_intent)
"""# 8. Save/Load Pickle"""
# from sklearn.externals import joblib
# joblib.dump(model, 'modelnlp.pkl')
# nlp_model = open('modelnlp.pkl','rb')
# nlp = joblib.load(nlp_model)
# !pip install git+https://github.com/TinkerMob/keras_albert_model.git
# from keras_albert_model import build_albert
"""# 9. Experiment with Monkeyzlearn API"""
from monkeylearn import MonkeyLearn
ml = MonkeyLearn('e7e230d51a8668a72eea86c29559bef04bd6c8fb')
data = ["Hi Feco, looks promising, I would like to schedule a call tomorrow and see the demo. What times do you have available? Thanks, Ryan."]
model_id = 'cl_v9GTn7zi'
result = ml.classifiers.classify(model_id, data)
print(result.body)
# !pip install monkeylearn
"""# 10. BERT Model"""
!pip install bert-for-tf2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import os
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Embedding, Activation, LSTM, SimpleRNN, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
from tqdm import tqdm
from tensorflow.keras import backend as K
import tensorflow as tf
import tensorflow_hub as hub
print("TensorFlow Version:",tf.__version__)
print("Hub version: ",hub.__version__)
# Params for bert model
class BertModel(object):
def __init__(self):
self.max_len = 128
bert_path = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1"
FullTokenizer=bert.bert_tokenization.FullTokenizer
self.bert_module = hub.KerasLayer(bert_path,trainable=True)
self.vocab_file = self.bert_module.resolved_object.vocab_file.asset_path.numpy()
self.do_lower_case = self.bert_module.resolved_object.do_lower_case.numpy()
self.tokenizer = FullTokenizer(self.vocab_file,self.do_lower_case)
def get_masks(self,tokens, max_seq_length):
return [1]*len(tokens) + [0] * (max_seq_length - len(tokens))
def get_segments(self,tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return segments + [0] * (max_seq_length - len(tokens))
def get_ids(self,tokens, tokenizer, max_seq_length):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens,)
input_ids = token_ids + [0] * (max_seq_length-len(token_ids))
return input_ids
def create_single_input(self,sentence,maxlen):
stokens = self.tokenizer.tokenize(sentence)
stokens = stokens[:maxlen]
stokens = ["[CLS]"] + stokens + ["[SEP]"]
ids = self.get_ids(stokens, self.tokenizer, self.max_len)
masks = self.get_masks(stokens, self.max_len)
segments = self.get_segments(stokens, self.max_len)
return ids,masks,segments
def create_input_array(self,sentences):
input_ids, input_masks, input_segments = [], [], []
for sentence in tqdm(sentences,position=0, leave=True):
ids,masks,segments=self.create_single_input(sentence,self.max_len-2)
input_ids.append(ids)
input_masks.append(masks)
input_segments.append(segments)
tensor = [np.asarray(input_ids, dtype=np.int32),
np.asarray(input_masks, dtype=np.int32),
np.asarray(input_segments, dtype=np.int32)]
return tensor
class PreprocessingBertData():
def prepare_data_x(self,train_sentences):
x = bert_model_obj.create_input_array(train_sentences)
return x
def prepare_data_y(self,train_labels):
y = list()
for item in train_labels:
label = item
y.append(label)
y = np.array(y)
return y
bert_model_obj = BertModel()
train_sentences = sentences
output_one_hot.shape
train_labels = output_one_hot.tolist()
output_one_hot
preprocess_bert_data_obj = PreprocessingBertData()
x = preprocess_bert_data_obj.prepare_data_x(train_sentences)
y = preprocess_bert_data_obj.prepare_data_y(train_labels)
train_input_ids, train_input_masks, train_segment_ids = x
train_labels = y
class DesignModel():
def __init__(self):
self.model = None
self.train_data = [train_input_ids, train_input_masks, train_segment_ids]
self.train_labels = train_labels
def bert_model(self,max_seq_length):
in_id = Input(shape=(max_seq_length,), dtype=tf.int32, name="input_ids")
in_mask = Input(shape=(max_seq_length,), dtype=tf.int32, name="input_masks")
in_segment = Input(shape=(max_seq_length,), dtype=tf.int32, name="segment_ids")
bert_inputs = [in_id, in_mask, in_segment]
pooled_output, sequence_output = bert_model_obj.bert_module(bert_inputs)
x = tf.keras.layers.GlobalAveragePooling1D()(sequence_output)
x = tf.keras.layers.Dropout(0.2)(x)
out = tf.keras.layers.Dense(21, activation="softmax", name="dense_output")(x)
self.model = tf.keras.models.Model(inputs=bert_inputs, outputs=out)
self.model.compile(optimizer=tf.keras.optimizers.Adam(1e-5),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")])
self.model.summary()
def model_train(self,batch_size,num_epoch):
print("Fitting to model")
self.model.fit(self.train_data,self.train_labels,epochs=num_epoch,batch_size=batch_size,validation_split=0.2,shuffle=True)
print("Model Training complete.")
def save_model(self,model,model_name):
self.model.save(model_name+".h5")
print("Model saved to Model folder.")
model_obj = DesignModel()
model_obj.bert_model(bert_model_obj.max_len)
model_obj.bert_model(21)
# model_obj.model_train(1113, 1)
| print("%s has confidence = %s" % (classes[i], (predictions[i]))) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.