file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
lib.rs | !( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connect(options: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T: 'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send + 'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
| Box::pin(f(self))
}
} | conditional_block | |
git_ftp.js | 1); //remove values from array
}
} while(index > 0);
return this;
};
grunt.util = grunt.util || grunt.utils;
var session = {},
async = grunt.util.async,
log = grunt.log,
_ = grunt.util._,
file = grunt.file,
fs = require('fs'),
path = require('path'),
FtpClient = require('ftp'),
cmd = require("child_process").exec,
gruntRootPath = process.cwd(),
ftp = new FtpClient(),
remoteDirectories = [],
done = null,
GruntGitFtp = function(){
this.hostConfig = null;
return this;
};
/*
* Get host file and parse json
*/
GruntGitFtp.prototype.getHostFile = function(hostName,filename){
//check if file exist
if(fs.existsSync(filename)){
//check key value
if(hostName != null){
this.hostConfig = grunt.file.readJSON(filename);
this.hostConfig._key = hostName;
this.revisionNumber = null;
this.done = null,
this.localDirectories = {},
this.localFiles = null;
}else{
log.error('Error, please check {' + hostName.red + '} or json file format');
}
}else{
log.error('Error, ftp configuration file not found in : ' + gruntRootPath + '/' + filename.red);
}
return this.hostConfig;
};
/*
* Get/Set FTP Key Values
*/
GruntGitFtp.prototype.ftp = function(key,val){
//if key isn't
if(this.hostConfig._key === undefined || typeof(this.hostConfig[this.hostConfig._key]) !== 'object'){
log.error('Error, please check that { \n' +
' "'+ this.hostConfig._key.red +'": { \n' +
' "host": "ftp.host-address.com", \n' +
' "port": 21, \n' +
' "user": "ftp-username", \n' +
' "password": "ftp-account-password", \n' +
' "remotePath": "ftp-basepath" \n' +
' } \n' +
'} exist your ftp configuration file');
throw 'key not found in .gitftppass';
}
//get host config key
if(arguments.length === 2){
this.hostConfig[this.hostConfig._key][key] = val;
}
return (this.hostConfig[this.hostConfig._key][key] ? this.hostConfig[this.hostConfig._key][key] : null);
};
/*
* Command function return string
*/
GruntGitFtp.prototype.cmd = function(command,cb,err){
this.commands(false,command,cb,err);
};
/*
* Command function return array
*/
GruntGitFtp.prototype.cmdSplit = function(split,command,cb,err){
this.commands(split,command,cb,err);
};
/*
* Command wrapper function
*/
GruntGitFtp.prototype.commands = function(shouldSplit,command,cb,err){
cmd(command,function(error, stdout, stderr){
var temp = null;
if(!error){
if(shouldSplit === false){
cb(stdout);
}else{
temp = stdout.split(shouldSplit).remove('');
cb(temp);
}
}else{
err(error);
}
});
};
/*
* report errors
*/
GruntGitFtp.prototype.gitCmdErr = function(err){
if(err.toString().indexOf('Needed a single revision')){
log.error('Git Needed a single revision, please run'.red);
log.ok('git add .'.red);
log.ok('git commit -m "your message goes here"'.red);
}else{
log.error(err);
}
done();
};
/*
* Create Remote Directory
*/
GruntGitFtp.prototype.createRemoteDirectory = function(list,cb){
var remoteRootPath = this.ftp('remotePath');
if(Object.keys(list).length){
async.forEach(Object.keys(list),function(dir, nextArray){
ftp.mkdir(dir,true,function(err){
log.ok('created remote directory: ' + dir);
nextArray();
});
},function(err){
cb(null);
});
}else{
cb(true,'error while creating directory');
}
};
/*
* Upload local file to server
*/
GruntGitFtp.prototype.uploadFiles = function(list,cb){
var remoteRootPath = this.ftp('remotePath'),
host = this.ftp('host');
if(list.length){
async.forEach(list,function(filepath, nextArray){
ftp.put(gruntRootPath + '/' + filepath,path.normalize(remoteRootPath + '/' + filepath),function(err){
if(err) |
log.ok('Uploaded from: ' + filepath + ' >> ' + host.blue + '@' + path.normalize(remoteRootPath + '/' + filepath).green);
ftp.end();
nextArray();
});
},function(err){
cb(null);
});
}else{
cb(true,'error while uploading file');
}
};
/*
* Grunt Multi Task
*/
grunt.registerMultiTask('git_ftp','queries last git commit and FTPs modified files to server',function(){
var gruntGitFtpApp = new GruntGitFtp(),
//options with these defaults
options = this.options({
'hostFile':'.gitftppass',
'host':'default'
});
done = this.async();
//get all ignored files
if (options.ignore) {
gruntGitFtpApp.ignoredFiles = grunt.file.expand(options.ignore);
}
//get host file
if(gruntGitFtpApp.getHostFile(options.host,options.hostFile) === null){
//return if json file is not parse
return this;
}
//login to FTP
ftp.connect({
host: gruntGitFtpApp.ftp('host'),
port: gruntGitFtpApp.ftp('port'),
user: gruntGitFtpApp.ftp('user'),
password: gruntGitFtpApp.ftp('password')
});
//FTP is ready
ftp.on('ready',function(){
log.ok('Connected to ftp host: ' + gruntGitFtpApp.ftp('host').blue + ' | root: ' + gruntGitFtpApp.ftp('remotePath').blue );
//callbacks
grunt.util.async.waterfall([
function(callback){ //handles git commands
//get last commited revision number
gruntGitFtpApp.cmd('git rev-parse --verify HEAD',function(output){
//revisionNumber trim and toString
if (grunt.option('commit')) {
gruntGitFtpApp.revisionNumber = grunt.option('commit');
} else {
gruntGitFtpApp.revisionNumber = output.toString();
}
//check string length
if(gruntGitFtpApp.revisionNumber.length !== 0){
//notify user
log.ok('git last commit HASH: ' + gruntGitFtpApp.revisionNumber.blue);
gruntGitFtpApp.cmd("git log --pretty=format:'%h' | wc -l",function(output){
var totalRevisions = parseInt(output,16);
//if first commit upload all files
if(totalRevisions === 0){
//get list of commited files/directories
gruntGitFtpApp.cmdSplit(/\n/,'git show --pretty="format:" --name-only ' + gruntGitFtpApp.revisionNumber,function(output){
//check output length
if(output.length !== 0){
//Get List of commited items
gruntGitFtpApp.lastCommitedItems = output;
//next callback
callback(null,gruntGitFtpApp.lastCommitedItems);
}else{
log.error('Error while getting Git Commited items');
callback(true);
}
},gruntGitFtpApp.gitCmdErr);
}else{ //else only upload changes files
//get list of commited files/directories
gruntGitFtpApp.cmdSplit(/\n/,'git diff-tree --no-commit-id --name-only -r ' + gruntGitFtpApp.revisionNumber,function(output){
//check output length
if(output.length !== 0){
//Get List of commited items
gruntGitFtpApp.lastCommitedItems = output;
//next callback
callback(null,gruntGitFtpApp.lastCommitedItems);
}else{
log.error('Error while getting Git Commited items');
callback(true);
}
},gruntGitFtpApp.gitCmdErr);
}
});
}else{
log.error('Error while getting Git Commit Hash');
callback(true);
}
},gruntGitFtpApp.gitCmdErr);
},function(argLastCommited,callback){ //handles filtering files/directories
var relative_path = null,
remoteRootPath = gruntGitFtpApp.ftp('remotePath'); //get remote path from .gitftppass | {
cb(true,err);
throw err;
} | conditional_block |
git_ftp.js | untGitFtp = function(){
this.hostConfig = null;
return this;
};
/*
* Get host file and parse json
*/
GruntGitFtp.prototype.getHostFile = function(hostName,filename){
//check if file exist
if(fs.existsSync(filename)){
//check key value
if(hostName != null){
this.hostConfig = grunt.file.readJSON(filename);
this.hostConfig._key = hostName;
this.revisionNumber = null;
this.done = null,
this.localDirectories = {},
this.localFiles = null;
}else{
log.error('Error, please check {' + hostName.red + '} or json file format');
}
}else{
log.error('Error, ftp configuration file not found in : ' + gruntRootPath + '/' + filename.red);
}
return this.hostConfig;
};
/*
* Get/Set FTP Key Values
*/
GruntGitFtp.prototype.ftp = function(key,val){
//if key isn't
if(this.hostConfig._key === undefined || typeof(this.hostConfig[this.hostConfig._key]) !== 'object'){
log.error('Error, please check that { \n' +
' "'+ this.hostConfig._key.red +'": { \n' +
' "host": "ftp.host-address.com", \n' +
' "port": 21, \n' +
' "user": "ftp-username", \n' +
' "password": "ftp-account-password", \n' +
' "remotePath": "ftp-basepath" \n' +
' } \n' +
'} exist your ftp configuration file');
throw 'key not found in .gitftppass';
}
//get host config key
if(arguments.length === 2){
this.hostConfig[this.hostConfig._key][key] = val;
}
return (this.hostConfig[this.hostConfig._key][key] ? this.hostConfig[this.hostConfig._key][key] : null);
};
/*
* Command function return string
*/
GruntGitFtp.prototype.cmd = function(command,cb,err){
this.commands(false,command,cb,err);
};
/*
* Command function return array
*/
GruntGitFtp.prototype.cmdSplit = function(split,command,cb,err){
this.commands(split,command,cb,err);
};
/*
* Command wrapper function
*/
GruntGitFtp.prototype.commands = function(shouldSplit,command,cb,err){
cmd(command,function(error, stdout, stderr){
var temp = null;
if(!error){
if(shouldSplit === false){
cb(stdout);
}else{
temp = stdout.split(shouldSplit).remove('');
cb(temp);
}
}else{
err(error);
}
});
};
/*
* report errors
*/
GruntGitFtp.prototype.gitCmdErr = function(err){
if(err.toString().indexOf('Needed a single revision')){
log.error('Git Needed a single revision, please run'.red);
log.ok('git add .'.red);
log.ok('git commit -m "your message goes here"'.red);
}else{
log.error(err);
}
done();
};
/*
* Create Remote Directory
*/
GruntGitFtp.prototype.createRemoteDirectory = function(list,cb){
var remoteRootPath = this.ftp('remotePath');
if(Object.keys(list).length){
async.forEach(Object.keys(list),function(dir, nextArray){
ftp.mkdir(dir,true,function(err){
log.ok('created remote directory: ' + dir);
nextArray();
});
},function(err){
cb(null);
});
}else{
cb(true,'error while creating directory');
}
};
/*
* Upload local file to server
*/
GruntGitFtp.prototype.uploadFiles = function(list,cb){
var remoteRootPath = this.ftp('remotePath'),
host = this.ftp('host');
if(list.length){
async.forEach(list,function(filepath, nextArray){
ftp.put(gruntRootPath + '/' + filepath,path.normalize(remoteRootPath + '/' + filepath),function(err){
if(err){
cb(true,err);
throw err;
}
log.ok('Uploaded from: ' + filepath + ' >> ' + host.blue + '@' + path.normalize(remoteRootPath + '/' + filepath).green);
ftp.end();
nextArray();
});
},function(err){
cb(null);
});
}else{
cb(true,'error while uploading file');
}
};
/*
* Grunt Multi Task
*/
grunt.registerMultiTask('git_ftp','queries last git commit and FTPs modified files to server',function(){
var gruntGitFtpApp = new GruntGitFtp(),
//options with these defaults
options = this.options({
'hostFile':'.gitftppass',
'host':'default'
});
done = this.async();
//get all ignored files
if (options.ignore) {
gruntGitFtpApp.ignoredFiles = grunt.file.expand(options.ignore);
}
//get host file
if(gruntGitFtpApp.getHostFile(options.host,options.hostFile) === null){
//return if json file is not parse
return this;
}
//login to FTP
ftp.connect({
host: gruntGitFtpApp.ftp('host'),
port: gruntGitFtpApp.ftp('port'),
user: gruntGitFtpApp.ftp('user'),
password: gruntGitFtpApp.ftp('password')
});
//FTP is ready
ftp.on('ready',function(){
log.ok('Connected to ftp host: ' + gruntGitFtpApp.ftp('host').blue + ' | root: ' + gruntGitFtpApp.ftp('remotePath').blue );
//callbacks
grunt.util.async.waterfall([
function(callback){ //handles git commands
//get last commited revision number
gruntGitFtpApp.cmd('git rev-parse --verify HEAD',function(output){
//revisionNumber trim and toString
if (grunt.option('commit')) {
gruntGitFtpApp.revisionNumber = grunt.option('commit');
} else {
gruntGitFtpApp.revisionNumber = output.toString();
}
//check string length
if(gruntGitFtpApp.revisionNumber.length !== 0){
//notify user
log.ok('git last commit HASH: ' + gruntGitFtpApp.revisionNumber.blue);
gruntGitFtpApp.cmd("git log --pretty=format:'%h' | wc -l",function(output){
var totalRevisions = parseInt(output,16);
//if first commit upload all files
if(totalRevisions === 0){
//get list of commited files/directories
gruntGitFtpApp.cmdSplit(/\n/,'git show --pretty="format:" --name-only ' + gruntGitFtpApp.revisionNumber,function(output){
//check output length
if(output.length !== 0){
//Get List of commited items
gruntGitFtpApp.lastCommitedItems = output;
//next callback
callback(null,gruntGitFtpApp.lastCommitedItems);
}else{
log.error('Error while getting Git Commited items');
callback(true);
}
},gruntGitFtpApp.gitCmdErr);
}else{ //else only upload changes files
//get list of commited files/directories
gruntGitFtpApp.cmdSplit(/\n/,'git diff-tree --no-commit-id --name-only -r ' + gruntGitFtpApp.revisionNumber,function(output){
//check output length
if(output.length !== 0){
//Get List of commited items
gruntGitFtpApp.lastCommitedItems = output;
//next callback
callback(null,gruntGitFtpApp.lastCommitedItems);
}else{
log.error('Error while getting Git Commited items');
callback(true);
}
},gruntGitFtpApp.gitCmdErr);
}
});
}else{
log.error('Error while getting Git Commit Hash');
callback(true);
}
},gruntGitFtpApp.gitCmdErr);
},function(argLastCommited,callback){ //handles filtering files/directories
var relative_path = null,
remoteRootPath = gruntGitFtpApp.ftp('remotePath'); //get remote path from .gitftppass
if(remoteRootPath.length === 0){
log.error('Error, please check {remote path} in .gitftppass');
return callback(true);
}
//filter array and return remote filepath
gruntGitFtpApp.localFiles = argLastCommited.filter(function(filepath){
if(fs.existsSync(gruntRootPath + '/' + filepath)){
//skip ignored
if (gruntGitFtpApp.ignoredFiles && gruntGitFtpApp.ignoredFiles.indexOf(filepath) > -1) {
return false;
}
//store directory as a key(path) value(successfully uploaded) | random_line_split | ||
main.rs |
//! below.
//!
//! ### Linux / OS X
//!
//! You have two options, place `cargo-count` into a directory that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore .gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//! 'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications
)]
#[macro_use]
extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <kbknapp@gmail.com>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore .gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> | {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg); | identifier_body | |
main.rs | 327,792 3,163
//! (0.96%)
//! C 54 9,962 1,445 1,492 7,025 7,025
//! (100.00%)
//! CSS 4 1,266 149 52 1,065
//! JavaScript 4 1,118 131 166 821
//! Python 31 4,797 843 585 3,369
//! C Header 13 1,865 284 585 996 996
//! (100.00%)
//! C++ 4 1,611 185 81 1,345 1,345
//! (100.00%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Totals: 6,128 549,129 70,021 136,659 342,413 12,529
//! (3.66%)
//!
//! ```
//!
//! The `--separator ,` sets a `,` character as the thousands separator, and
//! `--unsafe-statistics` looks for, and counts lines of `unsafe`.
//!
//! ## Compiling
//!
//! Follow these instructions to compile `cargo-count`, then skip down to
//! Installation.
//!
//! 1. Ensure you have current version of `cargo` and
//! [Rust](https://www.rust-lang.org) installed
//! 2. Clone the project
//! `$ git clone https://github.com/kbknapp/cargo-count && cd cargo-count`
//! 3. Build the project `$ cargo build --release` (**NOTE:** There is a large
//! performance differnce when compiling without optimizations, so I
//! recommend alwasy using `--release` to enable to them)
//! 4. Once complete, the binary will be located at
//! `target/release/cargo-count`
//!
//! ## Installation and Usage
//!
//! All you need to do is place `cargo-count` somewhere in your `$PATH`. Then
//! run `cargo count` anywhere in your project directory. For full details see
//! below.
//!
//! ### Linux / OS X
//!
//! You have two options, place `cargo-count` into a directory that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore .gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//! 'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications | extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K | )]
#[macro_use] | random_line_split |
main.rs | terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore .gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//! 'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications
)]
#[macro_use]
extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <kbknapp@gmail.com>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore .gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg);
counts.fill_from();
cli_try!(counts.count());
cli_try!(counts.write_results());
Ok(())
}
fn single_char(s: String) -> Result<(), String> {
if s.len() == 1 | {
Ok(())
} | conditional_block | |
main.rs | that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore .gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//! 'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications
)]
#[macro_use]
extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <kbknapp@gmail.com>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore .gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg);
counts.fill_from();
cli_try!(counts.count());
cli_try!(counts.write_results());
Ok(())
}
fn | single_char | identifier_name | |
route.rs | diags);
attr.path.query.as_ref().map(|q| dup_check(&mut segments, q.iter().cloned(), &mut diags));
dup_check(&mut segments, attr.data.clone().map(|s| s.value.0).into_iter(), &mut diags);
// Check the validity of function arguments.
let mut inputs = vec![];
let mut fn_segments: IndexSet<Segment> = IndexSet::new();
for input in &function.sig.inputs {
let help = "all handler arguments must be of the form: `ident: Type`";
let span = input.span();
let (ident, ty) = match input {
syn::FnArg::Typed(arg) => match *arg.pat {
syn::Pat::Ident(ref pat) => (&pat.ident, &arg.ty),
syn::Pat::Wild(_) => {
diags.push(span.error("handler arguments cannot be ignored").help(help));
continue;
}
_ => {
diags.push(span.error("invalid use of pattern").help(help));
continue;
}
}
// Other cases shouldn't happen since we parsed an `ItemFn`.
_ => {
diags.push(span.error("invalid handler argument").help(help));
continue;
}
};
let rocket_ident = ident.prepend(ROCKET_PARAM_PREFIX);
inputs.push((ident.clone(), rocket_ident, ty.with_stripped_lifetimes()));
fn_segments.insert(ident.into());
}
// Check that all of the declared parameters are function inputs.
let span = match function.sig.inputs.is_empty() {
false => function.sig.inputs.span(),
true => function.span()
};
for missing in segments.difference(&fn_segments) {
diags.push(missing.span.error("unused dynamic parameter")
.span_note(span, format!("expected argument named `{}` here", missing.name)))
}
diags.head_err_or(Route { attribute: attr, function, inputs, segments })
}
fn param_expr(seg: &Segment, ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, error, log, request, _None, _Some, _Ok, _Err, Outcome);
let i = seg.index.expect("dynamic parameters must be indexed");
let span = ident.span().unstable().join(ty.span()).unwrap().into();
let name = ident.to_string();
// All dynamic parameter should be found if this function is being called;
// that's the point of statically checking the URI parameters.
let internal_error = quote!({
#log::error("Internal invariant error: expected dynamic parameter not found.");
#log::error("Please report this error to the Rocket issue tracker.");
#Outcome::Forward(#data)
});
// Returned when a dynamic parameter fails to parse.
let parse_error = quote!({
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, #error));
#Outcome::Forward(#data)
});
let expr = match seg.kind {
Kind::Single => quote_spanned! { span =>
match #req.raw_segment_str(#i) {
#_Some(__s) => match <#ty as #request::FromParam>::from_param(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Multi => quote_spanned! { span =>
match #req.raw_segments(#i) {
#_Some(__s) => match <#ty as #request::FromSegments>::from_segments(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Static => return quote!() | #[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = #expr;
}
}
fn data_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, FromData, Outcome, Transform);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
let __transform = <#ty as #FromData>::transform(#req, #data);
#[allow(unreachable_patterns, unreachable_code)]
let __outcome = match __transform {
#Transform::Owned(#Outcome::Success(__v)) => {
#Transform::Owned(#Outcome::Success(__v))
},
#Transform::Borrowed(#Outcome::Success(ref __v)) => {
#Transform::Borrowed(#Outcome::Success(::std::borrow::Borrow::borrow(__v)))
},
#Transform::Borrowed(__o) => #Transform::Borrowed(__o.map(|_| {
unreachable!("Borrowed(Success(..)) case handled in previous block")
})),
#Transform::Owned(__o) => #Transform::Owned(__o),
};
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #FromData>::from_data(#req, __outcome) {
#Outcome::Success(__d) => __d,
#Outcome::Forward(__d) => return #Outcome::Forward(__d),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn query_exprs(route: &Route) -> Option<TokenStream2> {
define_vars_and_mods!(_None, _Some, _Ok, _Err, _Option);
define_vars_and_mods!(data, trail, log, request, req, Outcome, SmallVec, Query);
let query_segments = route.attribute.path.query.as_ref()?;
let (mut decls, mut matchers, mut builders) = (vec![], vec![], vec![]);
for segment in query_segments {
let name = &segment.name;
let (ident, ty, span) = if segment.kind != Kind::Static {
let (ident, ty) = route.inputs.iter()
.find(|(ident, _, _)| ident == &segment.name)
.map(|(_, rocket_ident, ty)| (rocket_ident, ty))
.unwrap();
let span = ident.span().unstable().join(ty.span()).unwrap();
(Some(ident), Some(ty), span.into())
} else {
(None, None, segment.span.into())
};
let decl = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #ident: #_Option<#ty> = #_None;
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #trail = #SmallVec::<[#request::FormItem; 8]>::new();
},
Kind::Static => quote!()
};
let matcher = match segment.kind {
Kind::Single => quote_spanned! { span =>
(_, #name, __v) => {
#[allow(unreachable_patterns, unreachable_code)]
let __v = match <#ty as #request::FromFormValue>::from_form_value(__v) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
#ident = #_Some(__v);
}
},
Kind::Static => quote! {
(#name, _, _) => continue,
},
Kind::Multi => quote! {
_ => #trail.push(__i),
}
};
let builder = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match #ident.or_else(<#ty as #request::FromFormValue>::default) {
#_Some(__v) => __v,
#_None => {
#log::warn_(&format!("Missing required query parameter '{}'.", #name));
return #Outcome::Forward(#data);
}
};
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match <#ty as #request::FromQuery>::from_query(#Query(&#trail)) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
},
Kind::Static => quote!()
};
decls.push(decl);
matchers.push(matcher);
builders.push(builder);
}
matchers.push(quote!(_ => continue));
Some(quote! {
#(#decls)*
if let #_Some(__items) = #req.raw_query_items() {
for __i in __items {
match (__i.raw.as_str(), __i.key.as_str(), __i.value) {
#(
#[allow(unreachable_patterns, unreachable_code)]
#matchers
)*
}
}
}
#(
#[allow(unreachable_patterns, unreachable_code)]
#builders
)*
})
}
| };
quote! { | random_line_split |
route.rs | {
#[meta(naked)]
method: SpanWrapped<Method>,
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// The raw, parsed `#[method]` (e.g, `get`, `put`, `post`, etc.) attribute.
#[derive(Debug, FromMeta)]
struct MethodRouteAttribute {
#[meta(naked)]
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// This structure represents the parsed `route` attribute and associated items.
#[derive(Debug)]
struct Route {
/// The status associated with the code in the `#[route(code)]` attribute.
attribute: RouteAttribute,
/// The function that was decorated with the `route` attribute.
function: syn::ItemFn,
/// The non-static parameters declared in the route segments.
segments: IndexSet<Segment>,
/// The parsed inputs to the user's function. The first ident is the ident
/// as the user wrote it, while the second ident is the identifier that
/// should be used during code generation, the `rocket_ident`.
inputs: Vec<(syn::Ident, syn::Ident, syn::Type)>,
}
fn parse_route(attr: RouteAttribute, function: syn::ItemFn) -> Result<Route> {
// Gather diagnostics as we proceed.
let mut diags = Diagnostics::new();
// Emit a warning if a `data` param was supplied for non-payload methods.
if let Some(ref data) = attr.data {
if !attr.method.0.supports_payload() {
let msg = format!("'{}' does not typically support payloads", attr.method.0);
data.full_span.warning("`data` used with non-payload-supporting method")
.span_note(attr.method.span, msg)
.emit()
}
}
// Collect all of the dynamic segments in an `IndexSet`, checking for dups.
let mut segments: IndexSet<Segment> = IndexSet::new();
fn dup_check<I>(set: &mut IndexSet<Segment>, iter: I, diags: &mut Diagnostics)
where I: Iterator<Item = Segment>
{
for segment in iter.filter(|s| s.kind != Kind::Static) {
let span = segment.span;
if let Some(previous) = set.replace(segment) {
diags.push(span.error(format!("duplicate parameter: `{}`", previous.name))
.span_note(previous.span, "previous parameter with the same name here"))
}
}
}
dup_check(&mut segments, attr.path.path.iter().cloned(), &mut diags);
attr.path.query.as_ref().map(|q| dup_check(&mut segments, q.iter().cloned(), &mut diags));
dup_check(&mut segments, attr.data.clone().map(|s| s.value.0).into_iter(), &mut diags);
// Check the validity of function arguments.
let mut inputs = vec![];
let mut fn_segments: IndexSet<Segment> = IndexSet::new();
for input in &function.sig.inputs {
let help = "all handler arguments must be of the form: `ident: Type`";
let span = input.span();
let (ident, ty) = match input {
syn::FnArg::Typed(arg) => match *arg.pat {
syn::Pat::Ident(ref pat) => (&pat.ident, &arg.ty),
syn::Pat::Wild(_) => {
diags.push(span.error("handler arguments cannot be ignored").help(help));
continue;
}
_ => {
diags.push(span.error("invalid use of pattern").help(help));
continue;
}
}
// Other cases shouldn't happen since we parsed an `ItemFn`.
_ => {
diags.push(span.error("invalid handler argument").help(help));
continue;
}
};
let rocket_ident = ident.prepend(ROCKET_PARAM_PREFIX);
inputs.push((ident.clone(), rocket_ident, ty.with_stripped_lifetimes()));
fn_segments.insert(ident.into());
}
// Check that all of the declared parameters are function inputs.
let span = match function.sig.inputs.is_empty() {
false => function.sig.inputs.span(),
true => function.span()
};
for missing in segments.difference(&fn_segments) {
diags.push(missing.span.error("unused dynamic parameter")
.span_note(span, format!("expected argument named `{}` here", missing.name)))
}
diags.head_err_or(Route { attribute: attr, function, inputs, segments })
}
fn param_expr(seg: &Segment, ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, error, log, request, _None, _Some, _Ok, _Err, Outcome);
let i = seg.index.expect("dynamic parameters must be indexed");
let span = ident.span().unstable().join(ty.span()).unwrap().into();
let name = ident.to_string();
// All dynamic parameter should be found if this function is being called;
// that's the point of statically checking the URI parameters.
let internal_error = quote!({
#log::error("Internal invariant error: expected dynamic parameter not found.");
#log::error("Please report this error to the Rocket issue tracker.");
#Outcome::Forward(#data)
});
// Returned when a dynamic parameter fails to parse.
let parse_error = quote!({
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, #error));
#Outcome::Forward(#data)
});
let expr = match seg.kind {
Kind::Single => quote_spanned! { span =>
match #req.raw_segment_str(#i) {
#_Some(__s) => match <#ty as #request::FromParam>::from_param(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Multi => quote_spanned! { span =>
match #req.raw_segments(#i) {
#_Some(__s) => match <#ty as #request::FromSegments>::from_segments(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Static => return quote!()
};
quote! {
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = #expr;
}
}
fn data_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, FromData, Outcome, Transform);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
let __transform = <#ty as #FromData>::transform(#req, #data);
#[allow(unreachable_patterns, unreachable_code)]
let __outcome = match __transform {
#Transform::Owned(#Outcome::Success(__v)) => {
#Transform::Owned(#Outcome::Success(__v))
},
#Transform::Borrowed(#Outcome::Success(ref __v)) => {
#Transform::Borrowed(#Outcome::Success(::std::borrow::Borrow::borrow(__v)))
},
#Transform::Borrowed(__o) => #Transform::Borrowed(__o.map(|_| {
unreachable!("Borrowed(Success(..)) case handled in previous block")
})),
#Transform::Owned(__o) => #Transform::Owned(__o),
};
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #FromData>::from_data(#req, __outcome) {
#Outcome::Success(__d) => __d,
#Outcome::Forward(__d) => return #Outcome::Forward(__d),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn query_exprs(route: &Route) -> Option<TokenStream2> {
define_vars_and_mods!(_None, _Some, _Ok, _Err, _Option);
define_vars_and_mods!(data, trail, log, request, req, Outcome, SmallVec, Query);
let query_segments = route.attribute.path.query.as_ref()?;
let (mut decls, mut matchers, mut builders) = (vec![], vec![], vec![]);
for segment in query_segments {
let name = &segment.name;
let (ident, ty, span) = if segment.kind != Kind::Static {
let (ident, ty) = route.inputs.iter()
.find(|(ident, _, _)| ident == &segment.name)
.map(|(_, rocket_ident, ty)| (rocket_ident, ty))
.unwrap();
let span = ident.span().unstable().join(ty.span()).unwrap();
(Some(ident), Some(ty), span.into())
} else {
(None, None, segment.span.into())
};
let decl = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #ident: #_Option<#ty> = #_ | RouteAttribute | identifier_name | |
route.rs |
dup_check(&mut segments, attr.path.path.iter().cloned(), &mut diags);
attr.path.query.as_ref().map(|q| dup_check(&mut segments, q.iter().cloned(), &mut diags));
dup_check(&mut segments, attr.data.clone().map(|s| s.value.0).into_iter(), &mut diags);
// Check the validity of function arguments.
let mut inputs = vec![];
let mut fn_segments: IndexSet<Segment> = IndexSet::new();
for input in &function.sig.inputs {
let help = "all handler arguments must be of the form: `ident: Type`";
let span = input.span();
let (ident, ty) = match input {
syn::FnArg::Typed(arg) => match *arg.pat {
syn::Pat::Ident(ref pat) => (&pat.ident, &arg.ty),
syn::Pat::Wild(_) => {
diags.push(span.error("handler arguments cannot be ignored").help(help));
continue;
}
_ => {
diags.push(span.error("invalid use of pattern").help(help));
continue;
}
}
// Other cases shouldn't happen since we parsed an `ItemFn`.
_ => {
diags.push(span.error("invalid handler argument").help(help));
continue;
}
};
let rocket_ident = ident.prepend(ROCKET_PARAM_PREFIX);
inputs.push((ident.clone(), rocket_ident, ty.with_stripped_lifetimes()));
fn_segments.insert(ident.into());
}
// Check that all of the declared parameters are function inputs.
let span = match function.sig.inputs.is_empty() {
false => function.sig.inputs.span(),
true => function.span()
};
for missing in segments.difference(&fn_segments) {
diags.push(missing.span.error("unused dynamic parameter")
.span_note(span, format!("expected argument named `{}` here", missing.name)))
}
diags.head_err_or(Route { attribute: attr, function, inputs, segments })
}
fn param_expr(seg: &Segment, ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, error, log, request, _None, _Some, _Ok, _Err, Outcome);
let i = seg.index.expect("dynamic parameters must be indexed");
let span = ident.span().unstable().join(ty.span()).unwrap().into();
let name = ident.to_string();
// All dynamic parameter should be found if this function is being called;
// that's the point of statically checking the URI parameters.
let internal_error = quote!({
#log::error("Internal invariant error: expected dynamic parameter not found.");
#log::error("Please report this error to the Rocket issue tracker.");
#Outcome::Forward(#data)
});
// Returned when a dynamic parameter fails to parse.
let parse_error = quote!({
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, #error));
#Outcome::Forward(#data)
});
let expr = match seg.kind {
Kind::Single => quote_spanned! { span =>
match #req.raw_segment_str(#i) {
#_Some(__s) => match <#ty as #request::FromParam>::from_param(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Multi => quote_spanned! { span =>
match #req.raw_segments(#i) {
#_Some(__s) => match <#ty as #request::FromSegments>::from_segments(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Static => return quote!()
};
quote! {
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = #expr;
}
}
fn data_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, FromData, Outcome, Transform);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
let __transform = <#ty as #FromData>::transform(#req, #data);
#[allow(unreachable_patterns, unreachable_code)]
let __outcome = match __transform {
#Transform::Owned(#Outcome::Success(__v)) => {
#Transform::Owned(#Outcome::Success(__v))
},
#Transform::Borrowed(#Outcome::Success(ref __v)) => {
#Transform::Borrowed(#Outcome::Success(::std::borrow::Borrow::borrow(__v)))
},
#Transform::Borrowed(__o) => #Transform::Borrowed(__o.map(|_| {
unreachable!("Borrowed(Success(..)) case handled in previous block")
})),
#Transform::Owned(__o) => #Transform::Owned(__o),
};
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #FromData>::from_data(#req, __outcome) {
#Outcome::Success(__d) => __d,
#Outcome::Forward(__d) => return #Outcome::Forward(__d),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn query_exprs(route: &Route) -> Option<TokenStream2> {
define_vars_and_mods!(_None, _Some, _Ok, _Err, _Option);
define_vars_and_mods!(data, trail, log, request, req, Outcome, SmallVec, Query);
let query_segments = route.attribute.path.query.as_ref()?;
let (mut decls, mut matchers, mut builders) = (vec![], vec![], vec![]);
for segment in query_segments {
let name = &segment.name;
let (ident, ty, span) = if segment.kind != Kind::Static {
let (ident, ty) = route.inputs.iter()
.find(|(ident, _, _)| ident == &segment.name)
.map(|(_, rocket_ident, ty)| (rocket_ident, ty))
.unwrap();
let span = ident.span().unstable().join(ty.span()).unwrap();
(Some(ident), Some(ty), span.into())
} else {
(None, None, segment.span.into())
};
let decl = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #ident: #_Option<#ty> = #_None;
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #trail = #SmallVec::<[#request::FormItem; 8]>::new();
},
Kind::Static => quote!()
};
let matcher = match segment.kind {
Kind::Single => quote_spanned! { span =>
(_, #name, __v) => {
#[allow(unreachable_patterns, unreachable_code)]
let __v = match <#ty as #request::FromFormValue>::from_form_value(__v) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
#ident = #_Some(__v);
}
},
Kind::Static => quote! {
(#name, _, _) => continue,
},
Kind::Multi => quote! {
_ => #trail.push(__i),
}
};
let builder = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match #ident.or_else(<#ty as #request::FromFormValue>::default) {
#_Some(__v) => __v,
#_None => {
#log::warn_(&format!("Missing required query parameter '{}'.", #name));
return #Outcome::Forward(#data);
}
};
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match <#ty as #request::FromQuery>::from_query(#Query(&#trail)) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
},
Kind::Static => quote!()
};
decls.push(decl);
matchers.push(matcher);
builders.push(builder);
}
matchers.push(quote!(_ => continue));
Some(quote! {
#(#decls)*
| {
for segment in iter.filter(|s| s.kind != Kind::Static) {
let span = segment.span;
if let Some(previous) = set.replace(segment) {
diags.push(span.error(format!("duplicate parameter: `{}`", previous.name))
.span_note(previous.span, "previous parameter with the same name here"))
}
}
} | identifier_body | |
sendmail.rs | );
}
if strlen(answer) <= 3i32 as libc::c_ulong || '-' as i32 != *answer.offset(3) as libc::c_int {
break;
}
free(answer as *mut libc::c_void);
}
if !answer.is_null() {
let mut n: libc::c_int = atoi(answer);
if (*ptr_to_globals).timeout != 0 {
alarm(0i32 as libc::c_uint);
}
free(answer as *mut libc::c_void);
if -1i32 == code || n == code {
free(msg as *mut libc::c_void);
return n;
}
}
bb_error_msg_and_die(b"%s failed\x00" as *const u8 as *const libc::c_char, msg);
}
unsafe extern "C" fn smtp_check(
mut fmt: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
return smtp_checkp(fmt, 0 as *const libc::c_char, code);
}
// strip argument of bad chars
unsafe extern "C" fn sane_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
trim(str);
s = str;
while *s != 0 {
/* Standard allows these chars in username without quoting:
* /!#$%&'*+-=?^_`{|}~
* and allows dot (.) with some restrictions.
* I chose to only allow a saner subset.
* I propose to expand it only on user's request.
*/
if bb_ascii_isalnum(*s as libc::c_uchar) == 0
&& strchr(
b"=+_-.@\x00" as *const u8 as *const libc::c_char,
*s as libc::c_int,
)
.is_null()
{
bb_error_msg(
b"bad address \'%s\'\x00" as *const u8 as *const libc::c_char,
str,
);
/* returning "": */
*str.offset(0) = '\u{0}' as i32 as libc::c_char;
return str;
}
s = s.offset(1)
}
return str;
}
// check for an address inside angle brackets, if not found fall back to normal
unsafe extern "C" fn angle_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut e: *mut libc::c_char = 0 as *mut libc::c_char;
e = trim(str);
if e != str && {
e = e.offset(-1);
(*e as libc::c_int) == '>' as i32
} {
s = strrchr(str, '<' as i32);
if !s.is_null() {
*e = '\u{0}' as i32 as libc::c_char;
str = s.offset(1)
}
}
return sane_address(str);
}
unsafe extern "C" fn rcptto(mut s: *const libc::c_char) {
if *s == 0 {
return;
}
// N.B. we don't die if recipient is rejected, for the other recipients may be accepted
if 250i32
!= smtp_checkp(
b"RCPT TO:<%s>\x00" as *const u8 as *const libc::c_char,
s,
-1i32,
)
{
bb_error_msg(
b"Bad recipient: <%s>\x00" as *const u8 as *const libc::c_char,
s,
);
};
}
// send to a list of comma separated addresses
unsafe extern "C" fn rcptto_list(mut list: *const libc::c_char) {
let mut free_me: *mut libc::c_char = xstrdup(list);
let mut str: *mut libc::c_char = free_me;
let mut s: *mut libc::c_char = free_me;
let mut prev: libc::c_char = 0i32 as libc::c_char;
let mut in_quote: libc::c_int = 0i32;
while *s != 0 {
let fresh0 = s;
s = s.offset(1);
let mut ch: libc::c_char = *fresh0;
if ch as libc::c_int == '\"' as i32 && prev as libc::c_int != '\\' as i32 {
in_quote = (in_quote == 0) as libc::c_int
} else if in_quote == 0 && ch as libc::c_int == ',' as i32 {
*s.offset(-1i32 as isize) = '\u{0}' as i32 as libc::c_char;
rcptto(angle_address(str));
str = s
}
prev = ch
}
if prev as libc::c_int != ',' as i32 {
rcptto(angle_address(str));
}
free(free_me as *mut libc::c_void);
}
#[no_mangle]
pub unsafe extern "C" fn sendmail_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut current_block: u64;
let mut opt_connect: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt_from: *mut libc::c_char = 0 as *mut libc::c_char;
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut list: *mut llist_t = 0 as *mut llist_t;
let mut host: *mut libc::c_char = sane_address(safe_gethostname());
let mut nheaders: libc::c_uint = 0i32 as libc::c_uint;
let mut code: libc::c_int = 0;
let mut last_hdr: C2RustUnnamed = HDR_OTHER;
let mut check_hdr: libc::c_int = 0;
let mut has_to: libc::c_int = 0i32;
// init global variables
let ref mut fresh1 = *(not_const_pp(&ptr_to_globals as *const *mut globals as *const libc::c_void)
as *mut *mut globals);
*fresh1 = xzalloc(::std::mem::size_of::<globals>() as libc::c_ulong) as *mut globals;
asm!("" : : : "memory" : "volatile");
(*ptr_to_globals).opt_charset =
b"us-ascii\x00" as *const u8 as *const libc::c_char as *mut libc::c_char;
// default HOST[:PORT] is $SMTPHOST, or localhost
opt_connect = getenv(b"SMTPHOST\x00" as *const u8 as *const libc::c_char);
if opt_connect.is_null() {
opt_connect = b"127.0.0.1\x00" as *const u8 as *const libc::c_char as *mut libc::c_char
}
// save initial stdin since body is piped!
xdup2(0i32, 3i32);
(*ptr_to_globals).fp0 = xfdopen_for_read(3i32);
// parse options
// N.B. since -H and -S are mutually exclusive they do not interfere in opt_connect
// -a is for ssmtp (http://downloads.openwrt.org/people/nico/man/man8/ssmtp.8.html) compatibility,
// it is still under development.
(*ptr_to_globals).opts = getopt32(
argv,
b"^tf:o:iw:+H:S:a:*:v\x00vv:H--S:S--H\x00" as *const u8 as *const libc::c_char,
&mut opt_from as *mut *mut libc::c_char,
0 as *mut libc::c | {
let mut answer: *mut libc::c_char = 0 as *mut libc::c_char;
let mut msg: *mut libc::c_char = send_mail_command(fmt, param);
loop
// read stdin
// if the string has a form NNN- -- read next string. E.g. EHLO response
// parse first bytes to a number
// if code = -1 then just return this number
// if code != -1 then checks whether the number equals the code
// if not equal -> die saying msg
{
answer = xmalloc_fgetline(stdin);
if answer.is_null() {
break;
}
if (*ptr_to_globals).verbose != 0 {
bb_error_msg(
b"recv:\'%.*s\'\x00" as *const u8 as *const libc::c_char,
strchrnul(answer, '\r' as i32).wrapping_offset_from(answer) as libc::c_long as libc::c_int,
answer, | identifier_body | |
sendmail.rs | ,
a.offset(1),
);
}
}
}
// N.B. list == NULL here
//bb_error_msg("OPT[%x] AU[%s], AP[%s], AM[%s], ARGV[%s]", opts, au, ap, am, *argv);
// connect to server
// connection helper ordered? ->
if (*ptr_to_globals).opts & OPT_H as libc::c_int as libc::c_uint != 0 {
let mut delay: *const libc::c_char = 0 as *const libc::c_char;
let mut args: [*const libc::c_char; 4] = [
b"sh\x00" as *const u8 as *const libc::c_char,
b"-c\x00" as *const u8 as *const libc::c_char,
opt_connect as *const libc::c_char,
0 as *const libc::c_char,
];
// plug it in
launch_helper(args.as_mut_ptr());
// Now:
// our stdout will go to helper's stdin,
// helper's stdout will be available on our stdin.
// Wait for initial server message.
// If helper (such as openssl) invokes STARTTLS, the initial 220
// is swallowed by helper (and not repeated after TLS is initiated).
// We will send NOOP cmd to server and check the response.
// We should get 220+250 on plain connection, 250 on STARTTLSed session.
//
// The problem here is some servers delay initial 220 message,
// and consider client to be a spammer if it starts sending cmds
// before 220 reached it. The code below is unsafe in this regard:
// in non-STARTTLSed case, we potentially send NOOP before 220
// is sent by server.
//
// If $SMTP_ANTISPAM_DELAY is set, we pause before sending NOOP.
//
delay = getenv(b"SMTP_ANTISPAM_DELAY\x00" as *const u8 as *const libc::c_char);
if !delay.is_null() {
sleep(atoi(delay) as libc::c_uint);
}
code = smtp_check(b"NOOP\x00" as *const u8 as *const libc::c_char, -1i32);
if code == 220i32 {
// we got 220 - this is not STARTTLSed connection,
// eat 250 response to our NOOP
smtp_check(0 as *const libc::c_char, 250i32);
} else if code != 250i32 {
bb_simple_error_msg_and_die(b"SMTP init failed\x00" as *const u8 as *const libc::c_char);
}
} else {
// vanilla connection
let mut fd: libc::c_int = 0;
fd = create_and_connect_stream_or_die(opt_connect, 25i32);
// and make ourselves a simple IO filter
xmove_fd(fd, 0i32);
xdup2(0i32, 1i32);
// Wait for initial server 220 message
smtp_check(0 as *const libc::c_char, 220i32);
}
// we should start with modern EHLO
if 250i32
!= smtp_checkp(
b"EHLO %s\x00" as *const u8 as *const libc::c_char,
host,
-1i32,
)
{
smtp_checkp(
b"HELO %s\x00" as *const u8 as *const libc::c_char,
host,
250i32,
);
}
// perform authentication
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint != 0 {
// read credentials unless they are given via -a[up] options
if (*ptr_to_globals).user.is_null() || (*ptr_to_globals).pass.is_null() {
get_cred_or_die(4i32);
}
if (*ptr_to_globals).opts & OPT_am_plain as libc::c_int as libc::c_uint != 0 {
// C: AUTH PLAIN
// S: 334
// C: base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
//Note: a shorter format is allowed:
// C: AUTH PLAIN base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH PLAIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
let mut user_len: libc::c_uint = strlen((*ptr_to_globals).user) as libc::c_uint;
let mut pass_len: libc::c_uint = strlen((*ptr_to_globals).pass) as libc::c_uint;
let mut sz: libc::c_uint = (1i32 as libc::c_uint)
.wrapping_add(user_len)
.wrapping_add(1i32 as libc::c_uint)
.wrapping_add(pass_len);
let vla = sz.wrapping_add(1i32 as libc::c_uint) as usize;
let mut plain_auth: Vec<libc::c_char> = ::std::vec::from_elem(0, vla);
// the format is:
// "authorization identity<NUL>username<NUL>password"
// authorization identity is empty.
*plain_auth.as_mut_ptr().offset(0) = '\u{0}' as i32 as libc::c_char;
strcpy(
stpcpy(plain_auth.as_mut_ptr().offset(1), (*ptr_to_globals).user).offset(1),
(*ptr_to_globals).pass,
);
printbuf_base64(plain_auth.as_mut_ptr(), sz);
} else {
// C: AUTH LOGIN
// S: 334 VXNlcm5hbWU6
// ^^^^^^^^^^^^ server says "Username:"
// C: base64encoded(user)
// S: 334 UGFzc3dvcmQ6
// ^^^^^^^^^^^^ server says "Password:"
// C: base64encoded(pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH LOGIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
printstr_base64((*ptr_to_globals).user);
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 334i32);
printstr_base64((*ptr_to_globals).pass);
}
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 235i32);
}
// set sender
// N.B. we have here a very loosely defined algorythm
// since sendmail historically offers no means to specify secrets on cmdline.
// 1) server can require no authentication ->
// we must just provide a (possibly fake) reply address.
// 2) server can require AUTH ->
// we must provide valid username and password along with a (possibly fake) reply address.
// For the sake of security username and password are to be read either from console or from a secured file.
// Since reading from console may defeat usability, the solution is either to read from a predefined
// file descriptor (e.g. 4), or again from a secured file.
// got no sender address? use auth name, then UID username as a last resort
if opt_from.is_null() {
opt_from = xasprintf(
b"%s@%s\x00" as *const u8 as *const libc::c_char,
if !(*ptr_to_globals).user.is_null() {
(*ptr_to_globals).user
} else {
xuid2uname(getuid())
},
(*xgethostbyname(host)).h_name,
)
}
free(host as *mut libc::c_void);
smtp_checkp(
b"MAIL FROM:<%s>\x00" as *const u8 as *const libc::c_char,
opt_from,
250i32,
);
// process message
// read recipients from message and add them to those given on cmdline.
// this means we scan stdin for To:, Cc:, Bcc: lines until an empty line
// and then use the rest of stdin as message body
code = 0i32; // set "analyze headers" mode
's_369: loop {
s = xmalloc_fgetline((*ptr_to_globals).fp0);
if !s.is_null() | {
current_block = 16252544171633782868;
} | conditional_block | |
sendmail.rs | libc::c_char = send_mail_command(fmt, param);
loop
// read stdin
// if the string has a form NNN- -- read next string. E.g. EHLO response
// parse first bytes to a number
// if code = -1 then just return this number
// if code != -1 then checks whether the number equals the code
// if not equal -> die saying msg
{
answer = xmalloc_fgetline(stdin);
if answer.is_null() {
break;
}
if (*ptr_to_globals).verbose != 0 {
bb_error_msg(
b"recv:\'%.*s\'\x00" as *const u8 as *const libc::c_char,
strchrnul(answer, '\r' as i32).wrapping_offset_from(answer) as libc::c_long as libc::c_int,
answer,
);
}
if strlen(answer) <= 3i32 as libc::c_ulong || '-' as i32 != *answer.offset(3) as libc::c_int {
break;
}
free(answer as *mut libc::c_void);
}
if !answer.is_null() {
let mut n: libc::c_int = atoi(answer);
if (*ptr_to_globals).timeout != 0 {
alarm(0i32 as libc::c_uint);
}
free(answer as *mut libc::c_void);
if -1i32 == code || n == code {
free(msg as *mut libc::c_void);
return n;
}
}
bb_error_msg_and_die(b"%s failed\x00" as *const u8 as *const libc::c_char, msg);
}
unsafe extern "C" fn smtp_check(
mut fmt: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
return smtp_checkp(fmt, 0 as *const libc::c_char, code);
}
// strip argument of bad chars
unsafe extern "C" fn sane_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
trim(str);
s = str;
while *s != 0 {
/* Standard allows these chars in username without quoting:
* /!#$%&'*+-=?^_`{|}~
* and allows dot (.) with some restrictions.
* I chose to only allow a saner subset.
* I propose to expand it only on user's request.
*/
if bb_ascii_isalnum(*s as libc::c_uchar) == 0
&& strchr(
b"=+_-.@\x00" as *const u8 as *const libc::c_char,
*s as libc::c_int,
)
.is_null()
{
bb_error_msg(
b"bad address \'%s\'\x00" as *const u8 as *const libc::c_char,
str,
);
/* returning "": */
*str.offset(0) = '\u{0}' as i32 as libc::c_char;
return str;
}
s = s.offset(1)
}
return str;
}
// check for an address inside angle brackets, if not found fall back to normal
unsafe extern "C" fn angle_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut e: *mut libc::c_char = 0 as *mut libc::c_char;
e = trim(str);
if e != str && {
e = e.offset(-1);
(*e as libc::c_int) == '>' as i32
} {
s = strrchr(str, '<' as i32);
if !s.is_null() {
*e = '\u{0}' as i32 as libc::c_char;
str = s.offset(1)
}
}
return sane_address(str);
}
unsafe extern "C" fn rcptto(mut s: *const libc::c_char) {
if *s == 0 {
return;
}
// N.B. we don't die if recipient is rejected, for the other recipients may be accepted
if 250i32
!= smtp_checkp(
b"RCPT TO:<%s>\x00" as *const u8 as *const libc::c_char,
s,
-1i32,
)
{
bb_error_msg(
b"Bad recipient: <%s>\x00" as *const u8 as *const libc::c_char,
s,
);
};
}
// send to a list of comma separated addresses
unsafe extern "C" fn | (mut list: *const libc::c_char) {
let mut free_me: *mut libc::c_char = xstrdup(list);
let mut str: *mut libc::c_char = free_me;
let mut s: *mut libc::c_char = free_me;
let mut prev: libc::c_char = 0i32 as libc::c_char;
let mut in_quote: libc::c_int = 0i32;
while *s != 0 {
let fresh0 = s;
s = s.offset(1);
let mut ch: libc::c_char = *fresh0;
if ch as libc::c_int == '\"' as i32 && prev as libc::c_int != '\\' as i32 {
in_quote = (in_quote == 0) as libc::c_int
} else if in_quote == 0 && ch as libc::c_int == ',' as i32 {
*s.offset(-1i32 as isize) = '\u{0}' as i32 as libc::c_char;
rcptto(angle_address(str));
str = s
}
prev = ch
}
if prev as libc::c_int != ',' as i32 {
rcptto(angle_address(str));
}
free(free_me as *mut libc::c_void);
}
#[no_mangle]
pub unsafe extern "C" fn sendmail_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut current_block: u64;
let mut opt_connect: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt_from: *mut libc::c_char = 0 as *mut libc::c_char;
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut list: *mut llist_t = 0 as *mut llist_t;
let mut host: *mut libc::c_char = sane_address(safe_gethostname());
let mut nheaders: libc::c_uint = 0i32 as libc::c_uint;
let mut code: libc::c_int = 0;
let mut last_hdr: C2RustUnnamed = HDR_OTHER;
let mut check_hdr: libc::c_int = 0;
let mut has_to: libc::c_int = 0i32;
// init global variables
let ref mut fresh1 = *(not_const_pp(&ptr_to_globals as *const *mut globals as *const libc::c_void)
as *mut *mut globals);
*fresh1 = xzalloc(::std::mem::size_of::<globals>() as libc::c_ulong) as *mut globals;
asm!("" : : : "memory" : "volatile");
(*ptr_to_globals).opt_charset =
b"us-ascii\x00" as *const u8 as *const libc::c_char as *mut libc::c_char;
// default HOST[:PORT] is $SMTPHOST, or localhost
opt_connect = getenv(b"SMTPHOST\x00" as *const u8 as *const libc::c_char);
if opt_connect.is_null() {
opt_connect = b"127.0.0.1\x00" as *const u8 as *const libc::c_char as *mut libc::c_char
}
// save initial stdin since body is piped!
xdup2(0i32, 3i32);
(*ptr_to_globals).fp0 = xfdopen_for_read(3i32);
// parse options
// N.B. since -H and -S are mutually exclusive they do not interfere in opt_connect
// -a is for ssmtp (http://downloads.openwrt.org/people/nico/man/man8/ssmtp.8.html) compatibility,
// it is still under development.
(*ptr_to_globals).opts = getopt32(
argv,
b"^tf:o:iw:+H:S:a:*:v\x00vv:H--S:S--H\x00" as *const u8 as *const libc::c_char,
&mut opt_from as *mut *mut libc::c_char,
0 as *mut libc::c_void,
&mut (*ptr_to_globals).timeout as *mut libc::c_uint,
&mut opt_connect as *mut *mut | rcptto_list | identifier_name |
sendmail.rs | 32 {
bb_simple_error_msg_and_die(b"SMTP init failed\x00" as *const u8 as *const libc::c_char);
}
} else {
// vanilla connection
let mut fd: libc::c_int = 0;
fd = create_and_connect_stream_or_die(opt_connect, 25i32);
// and make ourselves a simple IO filter
xmove_fd(fd, 0i32);
xdup2(0i32, 1i32);
// Wait for initial server 220 message
smtp_check(0 as *const libc::c_char, 220i32);
}
// we should start with modern EHLO
if 250i32
!= smtp_checkp(
b"EHLO %s\x00" as *const u8 as *const libc::c_char,
host,
-1i32,
)
{
smtp_checkp(
b"HELO %s\x00" as *const u8 as *const libc::c_char,
host,
250i32,
);
}
// perform authentication
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint != 0 {
// read credentials unless they are given via -a[up] options
if (*ptr_to_globals).user.is_null() || (*ptr_to_globals).pass.is_null() {
get_cred_or_die(4i32);
}
if (*ptr_to_globals).opts & OPT_am_plain as libc::c_int as libc::c_uint != 0 {
// C: AUTH PLAIN
// S: 334
// C: base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
//Note: a shorter format is allowed:
// C: AUTH PLAIN base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH PLAIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
let mut user_len: libc::c_uint = strlen((*ptr_to_globals).user) as libc::c_uint;
let mut pass_len: libc::c_uint = strlen((*ptr_to_globals).pass) as libc::c_uint;
let mut sz: libc::c_uint = (1i32 as libc::c_uint)
.wrapping_add(user_len)
.wrapping_add(1i32 as libc::c_uint)
.wrapping_add(pass_len);
let vla = sz.wrapping_add(1i32 as libc::c_uint) as usize;
let mut plain_auth: Vec<libc::c_char> = ::std::vec::from_elem(0, vla);
// the format is:
// "authorization identity<NUL>username<NUL>password"
// authorization identity is empty.
*plain_auth.as_mut_ptr().offset(0) = '\u{0}' as i32 as libc::c_char;
strcpy(
stpcpy(plain_auth.as_mut_ptr().offset(1), (*ptr_to_globals).user).offset(1),
(*ptr_to_globals).pass,
);
printbuf_base64(plain_auth.as_mut_ptr(), sz);
} else {
// C: AUTH LOGIN
// S: 334 VXNlcm5hbWU6
// ^^^^^^^^^^^^ server says "Username:"
// C: base64encoded(user)
// S: 334 UGFzc3dvcmQ6
// ^^^^^^^^^^^^ server says "Password:"
// C: base64encoded(pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH LOGIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
printstr_base64((*ptr_to_globals).user);
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 334i32);
printstr_base64((*ptr_to_globals).pass);
}
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 235i32);
}
// set sender
// N.B. we have here a very loosely defined algorythm
// since sendmail historically offers no means to specify secrets on cmdline.
// 1) server can require no authentication ->
// we must just provide a (possibly fake) reply address.
// 2) server can require AUTH ->
// we must provide valid username and password along with a (possibly fake) reply address.
// For the sake of security username and password are to be read either from console or from a secured file.
// Since reading from console may defeat usability, the solution is either to read from a predefined
// file descriptor (e.g. 4), or again from a secured file.
// got no sender address? use auth name, then UID username as a last resort
if opt_from.is_null() {
opt_from = xasprintf(
b"%s@%s\x00" as *const u8 as *const libc::c_char,
if !(*ptr_to_globals).user.is_null() {
(*ptr_to_globals).user
} else {
xuid2uname(getuid())
},
(*xgethostbyname(host)).h_name,
)
}
free(host as *mut libc::c_void);
smtp_checkp(
b"MAIL FROM:<%s>\x00" as *const u8 as *const libc::c_char,
opt_from,
250i32,
);
// process message
// read recipients from message and add them to those given on cmdline.
// this means we scan stdin for To:, Cc:, Bcc: lines until an empty line
// and then use the rest of stdin as message body
code = 0i32; // set "analyze headers" mode
's_369: loop {
s = xmalloc_fgetline((*ptr_to_globals).fp0);
if !s.is_null() {
current_block = 16252544171633782868;
} else {
current_block = 228501038991332163;
}
loop {
match current_block {
228501038991332163 =>
// odd case: we didn't stop "analyze headers" mode -> message body is empty. Reenter the loop
// N.B. after reenter code will be > 0
{
if !(code == 0) {
// finalize the message
smtp_check(b".\x00" as *const u8 as *const libc::c_char, 250i32);
break 's_369;
}
}
_ =>
// put message lines doubling leading dots
{
if code != 0 {
// escape leading dots
// N.B. this feature is implied even if no -i (-oi) switch given
// N.B. we need to escape the leading dot regardless of
// whether it is single or not character on the line
if '.' as i32 == *s.offset(0) as libc::c_int {
/*&& '\0' == s[1] */
bb_putchar('.' as i32);
}
// dump read line
send_r_n(s);
free(s as *mut libc::c_void);
continue 's_369;
} else {
// analyze headers
// To: or Cc: headers add recipients
check_hdr = (0i32
== strncasecmp(
b"To:\x00" as *const u8 as *const libc::c_char,
s,
3i32 as libc::c_ulong,
)) as libc::c_int;
has_to |= check_hdr;
if (*ptr_to_globals).opts & OPT_t as libc::c_int as libc::c_uint != 0 {
if check_hdr != 0
|| 0i32
== strncasecmp(
(b"Bcc:\x00" as *const u8 as *const libc::c_char).offset(1),
s,
3i32 as libc::c_ulong,
)
{
rcptto_list(s.offset(3));
last_hdr = HDR_TOCC;
current_block = 2265380199544777579;
break;
} else if 0i32
== strncasecmp( | b"Bcc:\x00" as *const u8 as *const libc::c_char,
s,
4i32 as libc::c_ulong,
) | random_line_split | |
replicaset.go |
// the controller needs to create a new template status for it
newTemplate := v1alpha1.TemplateStatus{
Name: template.Name,
CollisionCount: new(int32),
}
// Matching ReplicaSet is not equal - increment the collisionCount in the ExperimentStatus
// and requeue the Experiment.
preCollisionCount := *newTemplate.CollisionCount
*newTemplate.CollisionCount++
statusCpy := ec.ex.Status.DeepCopy()
statusCpy.TemplateStatuses = append(statusCpy.TemplateStatuses, newTemplate)
templateStatusBytes, marshalErr := json.Marshal(statusCpy.TemplateStatuses)
if marshalErr != nil {
return nil, marshalErr
}
patch := fmt.Sprintf(CollisionCountPatch, string(templateStatusBytes))
_, patchErr := ec.argoProjClientset.ArgoprojV1alpha1().Experiments(ec.ex.Namespace).Patch(ctx, ec.ex.Name, patchtypes.MergePatchType, []byte(patch), metav1.PatchOptions{})
ec.log.WithField("patch", patch).Debug("Applied Patch")
if patchErr != nil {
ec.log.Errorf("Error patching service %s", err.Error())
return nil, patchErr
}
ec.log.Warnf("Found a hash collision - bumped collisionCount (%d->%d) to resolve it", preCollisionCount, *newTemplate.CollisionCount)
return nil, err
case err != nil:
msg := fmt.Sprintf(conditions.FailedRSCreateMessage, newRS.Name, err)
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.FailedRSCreateReason}, msg)
return nil, err
default:
ec.log.Infof("Created ReplicaSet %s", createdRS.Name)
}
if !alreadyExists && newReplicasCount > int32(0) {
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.NewReplicaSetReason}, conditions.NewReplicaSetMessage+" with size %d", createdRS.Name, newReplicasCount)
}
return createdRS, nil
}
// newReplicaSetFromTemplate is a helper to formulate a replicaset from an experiment's template
func newReplicaSetFromTemplate(experiment *v1alpha1.Experiment, template v1alpha1.TemplateSpec, collisionCount *int32) appsv1.ReplicaSet {
newRSTemplate := *template.Template.DeepCopy()
replicaSetAnnotations := newReplicaSetAnnotations(experiment.Name, template.Name)
if newRSTemplate.Labels != nil {
if _, ok := newRSTemplate.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]; ok {
delete(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey)
}
}
podHash := hash.ComputePodTemplateHash(&newRSTemplate, collisionCount)
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// Add podTemplateHash label to selector.
newRSSelector := labelsutil.CloneSelectorAndAddLabel(template.Selector, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// The annotations must be different for each template because annotations are used to match
// replicasets to templates. We inject the experiment and template name in the replicaset
// annotations to ensure uniqueness.
rs := appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", experiment.Name, template.Name),
Namespace: experiment.Namespace,
Labels: map[string]string{
v1alpha1.DefaultRolloutUniqueLabelKey: podHash,
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(experiment, controllerKind)},
Annotations: replicaSetAnnotations,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: new(int32),
MinReadySeconds: template.MinReadySeconds,
Selector: newRSSelector,
Template: newRSTemplate,
},
}
return rs
}
// isReplicaSetSemanticallyEqual checks to see if an existing ReplicaSet is semantically equal
// to the ReplicaSet we are trying to create
func (ec *experimentContext) isReplicaSetSemanticallyEqual(newRS, existingRS *appsv1.ReplicaSet) bool {
controllerRef := metav1.GetControllerOf(existingRS)
podTemplatesEqual := replicasetutil.PodTemplateEqualIgnoreHash(&existingRS.Spec.Template, &newRS.Spec.Template)
existingAnnotations := existingRS.GetAnnotations()
newAnnotations := newRS.GetAnnotations()
return controllerRef != nil &&
controllerRef.UID == ec.ex.UID &&
podTemplatesEqual &&
existingAnnotations != nil &&
existingAnnotations[v1alpha1.ExperimentNameAnnotationKey] == newAnnotations[v1alpha1.ExperimentNameAnnotationKey] &&
existingAnnotations[v1alpha1.ExperimentTemplateNameAnnotationKey] == newAnnotations[v1alpha1.ExperimentTemplateNameAnnotationKey]
}
// addScaleDownDelay injects the `scale-down-deadline` annotation to the ReplicaSet, or if
// scaleDownDelaySeconds is zero, removes it if it exists
// returns True if ReplicaSet is patched, otherwise False
func (ec *experimentContext) addScaleDownDelay(rs *appsv1.ReplicaSet) (bool, error) {
rsIsUpdated := false
if rs == nil {
return rsIsUpdated, nil
}
ctx := context.TODO()
scaleDownDelaySeconds := time.Duration(defaults.GetExperimentScaleDownDelaySecondsOrDefault(ec.ex))
if scaleDownDelaySeconds == 0 {
// If scaledown deadline is zero, it means we need to remove any replicasets with the delay
if replicasetutil.HasScaleDownDeadline(rs) {
return ec.removeScaleDownDelay(rs)
}
return rsIsUpdated, nil
} else {
// If RS already has non-zero scaleDownDelayDeadline set, then we don't do anything
if replicasetutil.HasScaleDownDeadline(rs) {
return rsIsUpdated, nil
}
}
deadline := timeutil.MetaNow().Add(scaleDownDelaySeconds * time.Second).UTC().Format(time.RFC3339)
patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, deadline)
_, err := ec.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
if err == nil {
ec.log.Infof("Set '%s' annotation on '%s' to %s (%s)", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name, deadline, scaleDownDelaySeconds)
rsIsUpdated = true
}
return rsIsUpdated, err
}
// removeScaleDownDelay removes the `scale-down-deadline` annotation from the ReplicaSet (if it exists)
// returns True if ReplicaSet is patched, otherwise False
func (ec *experimentContext) removeScaleDownDelay(rs *appsv1.ReplicaSet) (bool, error) {
ctx := context.TODO()
rsIsUpdated := false
if !replicasetutil.HasScaleDownDeadline(rs) {
return rsIsUpdated, nil
}
patch := fmt.Sprintf(removeScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey)
_, err := ec.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
if err == nil {
ec.log.Infof("Removed '%s' annotation from RS '%s'", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name)
rsIsUpdated = true
}
return rsIsUpdated, err
}
func (ec *experimentContext) scaleReplicaSetAndRecordEvent(rs *appsv1.ReplicaSet, newScale int32) (bool, *appsv1.ReplicaSet, error) {
// No need to scale
if *(rs.Spec.Replicas) == newScale {
return false, rs, nil
}
var scalingOperation string
if *(rs.Spec.Replicas) < newScale {
scalingOperation = "up"
} else {
scalingOperation = "down"
}
scaled, newRS, err := ec.scaleReplicaSet(rs, newScale, scalingOperation)
if err != nil {
if k8serrors.IsConflict(err) {
ec.log.Warnf("Retrying scaling of ReplicaSet '%s': %s", rs.Name, err)
ec.enqueueExperimentAfter(ec.ex, time.Second)
return false, nil, nil
}
msg := fmt.Sprintf("Failed to scale %s %s: %v", rs.Name, scalingOperation, err)
ec.recorder.Warnf(ec.ex, record.EventOptions{EventReason: "ReplicaSetUpdateError"}, msg)
} else {
ec.log.Infof("Scaled %s ReplicaSet %s from %d to %d", scalingOperation, rs.Name, *(rs.Spec.Replicas), newScale)
}
return scaled, newRS, err
}
func (ec *experimentContext) | scaleReplicaSet | identifier_name | |
replicaset.go | "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
"github.com/argoproj/argo-rollouts/utils/conditions"
"github.com/argoproj/argo-rollouts/utils/defaults"
experimentutil "github.com/argoproj/argo-rollouts/utils/experiment"
"github.com/argoproj/argo-rollouts/utils/hash"
logutil "github.com/argoproj/argo-rollouts/utils/log"
"github.com/argoproj/argo-rollouts/utils/record"
replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset"
timeutil "github.com/argoproj/argo-rollouts/utils/time"
)
const (
CollisionCountPatch = `{
"status" : {
"templateStatuses" : %s
}
}`
addScaleDownAtAnnotationsPatch = `[{ "op": "add", "path": "/metadata/annotations/%s", "value": "%s"}]`
removeScaleDownAtAnnotationsPatch = `[{ "op": "remove", "path": "/metadata/annotations/%s"}]`
)
var controllerKind = v1alpha1.SchemeGroupVersion.WithKind("Experiment")
func (c *Controller) getReplicaSetsForExperiment(experiment *v1alpha1.Experiment) (map[string]*appsv1.ReplicaSet, error) {
rsList, err := c.replicaSetLister.ReplicaSets(experiment.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
templateDefined := func(name string) bool {
for _, tmpl := range experiment.Spec.Templates {
if tmpl.Name == name {
return true
}
}
return false
}
templateToRS := make(map[string]*appsv1.ReplicaSet)
for _, rs := range rsList {
controllerRef := metav1.GetControllerOf(rs)
if controllerRef == nil || controllerRef.UID != experiment.UID || rs.Annotations == nil || rs.Annotations[v1alpha1.ExperimentNameAnnotationKey] != experiment.Name {
continue
}
if templateName := rs.Annotations[v1alpha1.ExperimentTemplateNameAnnotationKey]; templateName != "" {
if _, ok := templateToRS[templateName]; ok {
return nil, fmt.Errorf("multiple ReplicaSets match single experiment template: %s", templateName)
}
if templateDefined(templateName) {
templateToRS[templateName] = rs
logCtx := log.WithField(logutil.ExperimentKey, experiment.Name).WithField(logutil.NamespaceKey, experiment.Namespace)
logCtx.Infof("Claimed ReplicaSet '%s' for template '%s'", rs.Name, templateName)
}
}
}
return templateToRS, nil
}
// createReplicaSet creates a new replicaset based on the template
func (ec *experimentContext) createReplicaSet(template v1alpha1.TemplateSpec, collisionCount *int32) (*appsv1.ReplicaSet, error) {
ctx := context.TODO()
newRS := newReplicaSetFromTemplate(ec.ex, template, collisionCount)
newReplicasCount := experimentutil.CalculateTemplateReplicasCount(ec.ex, template)
*(newRS.Spec.Replicas) = newReplicasCount
// Create the new ReplicaSet. If it already exists, then we need to check for possible
// hash collisions. If there is any other error, we need to report it in the status of
// the Experiment.
alreadyExists := false
createdRS, err := ec.kubeclientset.AppsV1().ReplicaSets(ec.ex.Namespace).Create(ctx, &newRS, metav1.CreateOptions{})
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Experiment.
case errors.IsAlreadyExists(err):
alreadyExists = true
// Fetch a copy of the ReplicaSet.
rs, rsErr := ec.replicaSetLister.ReplicaSets(newRS.Namespace).Get(newRS.Name)
if rsErr != nil {
return nil, rsErr
}
// If the Experiment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
// deep equal to the PodTemplateSpec of the Experiment, it's the Experiment's new ReplicaSet.
// Otherwise, this is a hash collision and we need to increment the collisionCount field in
// the status of the Experiment and requeue to try the creation in the next sync.
if ec.isReplicaSetSemanticallyEqual(&newRS, rs) {
// NOTE: we should only get here when the informer cache is stale and we already
// succeeded in creating this replicaset
createdRS = rs
err = nil
ec.log.Warnf("Claimed existing ReplicaSet %s with equivalent template spec", createdRS.Name)
break
}
// Since the replicaset is a collision, the experiment will not have a status for that rs and
// the controller needs to create a new template status for it
newTemplate := v1alpha1.TemplateStatus{
Name: template.Name,
CollisionCount: new(int32),
}
// Matching ReplicaSet is not equal - increment the collisionCount in the ExperimentStatus
// and requeue the Experiment.
preCollisionCount := *newTemplate.CollisionCount
*newTemplate.CollisionCount++
statusCpy := ec.ex.Status.DeepCopy()
statusCpy.TemplateStatuses = append(statusCpy.TemplateStatuses, newTemplate)
templateStatusBytes, marshalErr := json.Marshal(statusCpy.TemplateStatuses)
if marshalErr != nil {
return nil, marshalErr
}
patch := fmt.Sprintf(CollisionCountPatch, string(templateStatusBytes))
_, patchErr := ec.argoProjClientset.ArgoprojV1alpha1().Experiments(ec.ex.Namespace).Patch(ctx, ec.ex.Name, patchtypes.MergePatchType, []byte(patch), metav1.PatchOptions{})
ec.log.WithField("patch", patch).Debug("Applied Patch")
if patchErr != nil {
ec.log.Errorf("Error patching service %s", err.Error())
return nil, patchErr
}
ec.log.Warnf("Found a hash collision - bumped collisionCount (%d->%d) to resolve it", preCollisionCount, *newTemplate.CollisionCount)
return nil, err
case err != nil:
msg := fmt.Sprintf(conditions.FailedRSCreateMessage, newRS.Name, err)
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.FailedRSCreateReason}, msg)
return nil, err
default:
ec.log.Infof("Created ReplicaSet %s", createdRS.Name)
}
if !alreadyExists && newReplicasCount > int32(0) {
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.NewReplicaSetReason}, conditions.NewReplicaSetMessage+" with size %d", createdRS.Name, newReplicasCount)
}
return createdRS, nil
}
// newReplicaSetFromTemplate is a helper to formulate a replicaset from an experiment's template
func newReplicaSetFromTemplate(experiment *v1alpha1.Experiment, template v1alpha1.TemplateSpec, collisionCount *int32) appsv1.ReplicaSet {
newRSTemplate := *template.Template.DeepCopy()
replicaSetAnnotations := newReplicaSetAnnotations(experiment.Name, template.Name)
if newRSTemplate.Labels != nil {
if _, ok := newRSTemplate.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]; ok {
delete(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey)
}
}
podHash := hash.ComputePodTemplateHash(&newRSTemplate, collisionCount)
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// Add podTemplateHash label to selector.
newRSSelector := labelsutil.CloneSelectorAndAddLabel(template.Selector, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// The annotations must be different for each template because annotations are used to match
// replicasets to templates. We inject the experiment and template name in the replicaset
// annotations to ensure uniqueness.
rs := appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", experiment.Name, template.Name),
Namespace: experiment.Namespace,
Labels: map[string]string{
v1alpha1.DefaultRolloutUniqueLabelKey: podHash,
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(experiment, controllerKind)},
Annotations: replicaSetAnnotations,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: new(int32),
MinReadySeconds: template.MinReadySeconds,
Selector: newRSSelector,
Template: newRSTemplate,
},
}
return rs
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
patchtypes "k8s.io/apimachinery/pkg/types"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
| random_line_split | |
replicaset.go | .MergePatchType, []byte(patch), metav1.PatchOptions{})
ec.log.WithField("patch", patch).Debug("Applied Patch")
if patchErr != nil {
ec.log.Errorf("Error patching service %s", err.Error())
return nil, patchErr
}
ec.log.Warnf("Found a hash collision - bumped collisionCount (%d->%d) to resolve it", preCollisionCount, *newTemplate.CollisionCount)
return nil, err
case err != nil:
msg := fmt.Sprintf(conditions.FailedRSCreateMessage, newRS.Name, err)
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.FailedRSCreateReason}, msg)
return nil, err
default:
ec.log.Infof("Created ReplicaSet %s", createdRS.Name)
}
if !alreadyExists && newReplicasCount > int32(0) {
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.NewReplicaSetReason}, conditions.NewReplicaSetMessage+" with size %d", createdRS.Name, newReplicasCount)
}
return createdRS, nil
}
// newReplicaSetFromTemplate is a helper to formulate a replicaset from an experiment's template
func newReplicaSetFromTemplate(experiment *v1alpha1.Experiment, template v1alpha1.TemplateSpec, collisionCount *int32) appsv1.ReplicaSet {
newRSTemplate := *template.Template.DeepCopy()
replicaSetAnnotations := newReplicaSetAnnotations(experiment.Name, template.Name)
if newRSTemplate.Labels != nil {
if _, ok := newRSTemplate.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]; ok {
delete(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey)
}
}
podHash := hash.ComputePodTemplateHash(&newRSTemplate, collisionCount)
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// Add podTemplateHash label to selector.
newRSSelector := labelsutil.CloneSelectorAndAddLabel(template.Selector, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// The annotations must be different for each template because annotations are used to match
// replicasets to templates. We inject the experiment and template name in the replicaset
// annotations to ensure uniqueness.
rs := appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", experiment.Name, template.Name),
Namespace: experiment.Namespace,
Labels: map[string]string{
v1alpha1.DefaultRolloutUniqueLabelKey: podHash,
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(experiment, controllerKind)},
Annotations: replicaSetAnnotations,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: new(int32),
MinReadySeconds: template.MinReadySeconds,
Selector: newRSSelector,
Template: newRSTemplate,
},
}
return rs
}
// isReplicaSetSemanticallyEqual checks to see if an existing ReplicaSet is semantically equal
// to the ReplicaSet we are trying to create
func (ec *experimentContext) isReplicaSetSemanticallyEqual(newRS, existingRS *appsv1.ReplicaSet) bool {
controllerRef := metav1.GetControllerOf(existingRS)
podTemplatesEqual := replicasetutil.PodTemplateEqualIgnoreHash(&existingRS.Spec.Template, &newRS.Spec.Template)
existingAnnotations := existingRS.GetAnnotations()
newAnnotations := newRS.GetAnnotations()
return controllerRef != nil &&
controllerRef.UID == ec.ex.UID &&
podTemplatesEqual &&
existingAnnotations != nil &&
existingAnnotations[v1alpha1.ExperimentNameAnnotationKey] == newAnnotations[v1alpha1.ExperimentNameAnnotationKey] &&
existingAnnotations[v1alpha1.ExperimentTemplateNameAnnotationKey] == newAnnotations[v1alpha1.ExperimentTemplateNameAnnotationKey]
}
// addScaleDownDelay injects the `scale-down-deadline` annotation to the ReplicaSet, or if
// scaleDownDelaySeconds is zero, removes it if it exists
// returns True if ReplicaSet is patched, otherwise False
func (ec *experimentContext) addScaleDownDelay(rs *appsv1.ReplicaSet) (bool, error) {
rsIsUpdated := false
if rs == nil {
return rsIsUpdated, nil
}
ctx := context.TODO()
scaleDownDelaySeconds := time.Duration(defaults.GetExperimentScaleDownDelaySecondsOrDefault(ec.ex))
if scaleDownDelaySeconds == 0 {
// If scaledown deadline is zero, it means we need to remove any replicasets with the delay
if replicasetutil.HasScaleDownDeadline(rs) {
return ec.removeScaleDownDelay(rs)
}
return rsIsUpdated, nil
} else {
// If RS already has non-zero scaleDownDelayDeadline set, then we don't do anything
if replicasetutil.HasScaleDownDeadline(rs) {
return rsIsUpdated, nil
}
}
deadline := timeutil.MetaNow().Add(scaleDownDelaySeconds * time.Second).UTC().Format(time.RFC3339)
patch := fmt.Sprintf(addScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, deadline)
_, err := ec.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
if err == nil {
ec.log.Infof("Set '%s' annotation on '%s' to %s (%s)", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name, deadline, scaleDownDelaySeconds)
rsIsUpdated = true
}
return rsIsUpdated, err
}
// removeScaleDownDelay removes the `scale-down-deadline` annotation from the ReplicaSet (if it exists)
// returns True if ReplicaSet is patched, otherwise False
func (ec *experimentContext) removeScaleDownDelay(rs *appsv1.ReplicaSet) (bool, error) {
ctx := context.TODO()
rsIsUpdated := false
if !replicasetutil.HasScaleDownDeadline(rs) {
return rsIsUpdated, nil
}
patch := fmt.Sprintf(removeScaleDownAtAnnotationsPatch, v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey)
_, err := ec.kubeclientset.AppsV1().ReplicaSets(rs.Namespace).Patch(ctx, rs.Name, patchtypes.JSONPatchType, []byte(patch), metav1.PatchOptions{})
if err == nil {
ec.log.Infof("Removed '%s' annotation from RS '%s'", v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey, rs.Name)
rsIsUpdated = true
}
return rsIsUpdated, err
}
func (ec *experimentContext) scaleReplicaSetAndRecordEvent(rs *appsv1.ReplicaSet, newScale int32) (bool, *appsv1.ReplicaSet, error) {
// No need to scale
if *(rs.Spec.Replicas) == newScale {
return false, rs, nil
}
var scalingOperation string
if *(rs.Spec.Replicas) < newScale {
scalingOperation = "up"
} else {
scalingOperation = "down"
}
scaled, newRS, err := ec.scaleReplicaSet(rs, newScale, scalingOperation)
if err != nil {
if k8serrors.IsConflict(err) {
ec.log.Warnf("Retrying scaling of ReplicaSet '%s': %s", rs.Name, err)
ec.enqueueExperimentAfter(ec.ex, time.Second)
return false, nil, nil
}
msg := fmt.Sprintf("Failed to scale %s %s: %v", rs.Name, scalingOperation, err)
ec.recorder.Warnf(ec.ex, record.EventOptions{EventReason: "ReplicaSetUpdateError"}, msg)
} else {
ec.log.Infof("Scaled %s ReplicaSet %s from %d to %d", scalingOperation, rs.Name, *(rs.Spec.Replicas), newScale)
}
return scaled, newRS, err
}
func (ec *experimentContext) scaleReplicaSet(rs *appsv1.ReplicaSet, newScale int32, scalingOperation string) (bool, *appsv1.ReplicaSet, error) | {
ctx := context.TODO()
oldScale := *(rs.Spec.Replicas)
sizeNeedsUpdate := oldScale != newScale
scaled := false
var err error
if sizeNeedsUpdate {
rsCopy := rs.DeepCopy()
*(rsCopy.Spec.Replicas) = newScale
rs, err = ec.kubeclientset.AppsV1().ReplicaSets(rsCopy.Namespace).Update(ctx, rsCopy, metav1.UpdateOptions{})
if err == nil && sizeNeedsUpdate {
scaled = true
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.ScalingReplicaSetReason}, "Scaled %s ReplicaSet %s from %d to %d", scalingOperation, rs.Name, oldScale, newScale)
}
}
return scaled, rs, err
} | identifier_body | |
replicaset.go | != nil {
return nil, err
}
templateDefined := func(name string) bool {
for _, tmpl := range experiment.Spec.Templates {
if tmpl.Name == name {
return true
}
}
return false
}
templateToRS := make(map[string]*appsv1.ReplicaSet)
for _, rs := range rsList {
controllerRef := metav1.GetControllerOf(rs)
if controllerRef == nil || controllerRef.UID != experiment.UID || rs.Annotations == nil || rs.Annotations[v1alpha1.ExperimentNameAnnotationKey] != experiment.Name {
continue
}
if templateName := rs.Annotations[v1alpha1.ExperimentTemplateNameAnnotationKey]; templateName != "" {
if _, ok := templateToRS[templateName]; ok |
if templateDefined(templateName) {
templateToRS[templateName] = rs
logCtx := log.WithField(logutil.ExperimentKey, experiment.Name).WithField(logutil.NamespaceKey, experiment.Namespace)
logCtx.Infof("Claimed ReplicaSet '%s' for template '%s'", rs.Name, templateName)
}
}
}
return templateToRS, nil
}
// createReplicaSet creates a new replicaset based on the template
func (ec *experimentContext) createReplicaSet(template v1alpha1.TemplateSpec, collisionCount *int32) (*appsv1.ReplicaSet, error) {
ctx := context.TODO()
newRS := newReplicaSetFromTemplate(ec.ex, template, collisionCount)
newReplicasCount := experimentutil.CalculateTemplateReplicasCount(ec.ex, template)
*(newRS.Spec.Replicas) = newReplicasCount
// Create the new ReplicaSet. If it already exists, then we need to check for possible
// hash collisions. If there is any other error, we need to report it in the status of
// the Experiment.
alreadyExists := false
createdRS, err := ec.kubeclientset.AppsV1().ReplicaSets(ec.ex.Namespace).Create(ctx, &newRS, metav1.CreateOptions{})
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Experiment.
case errors.IsAlreadyExists(err):
alreadyExists = true
// Fetch a copy of the ReplicaSet.
rs, rsErr := ec.replicaSetLister.ReplicaSets(newRS.Namespace).Get(newRS.Name)
if rsErr != nil {
return nil, rsErr
}
// If the Experiment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
// deep equal to the PodTemplateSpec of the Experiment, it's the Experiment's new ReplicaSet.
// Otherwise, this is a hash collision and we need to increment the collisionCount field in
// the status of the Experiment and requeue to try the creation in the next sync.
if ec.isReplicaSetSemanticallyEqual(&newRS, rs) {
// NOTE: we should only get here when the informer cache is stale and we already
// succeeded in creating this replicaset
createdRS = rs
err = nil
ec.log.Warnf("Claimed existing ReplicaSet %s with equivalent template spec", createdRS.Name)
break
}
// Since the replicaset is a collision, the experiment will not have a status for that rs and
// the controller needs to create a new template status for it
newTemplate := v1alpha1.TemplateStatus{
Name: template.Name,
CollisionCount: new(int32),
}
// Matching ReplicaSet is not equal - increment the collisionCount in the ExperimentStatus
// and requeue the Experiment.
preCollisionCount := *newTemplate.CollisionCount
*newTemplate.CollisionCount++
statusCpy := ec.ex.Status.DeepCopy()
statusCpy.TemplateStatuses = append(statusCpy.TemplateStatuses, newTemplate)
templateStatusBytes, marshalErr := json.Marshal(statusCpy.TemplateStatuses)
if marshalErr != nil {
return nil, marshalErr
}
patch := fmt.Sprintf(CollisionCountPatch, string(templateStatusBytes))
_, patchErr := ec.argoProjClientset.ArgoprojV1alpha1().Experiments(ec.ex.Namespace).Patch(ctx, ec.ex.Name, patchtypes.MergePatchType, []byte(patch), metav1.PatchOptions{})
ec.log.WithField("patch", patch).Debug("Applied Patch")
if patchErr != nil {
ec.log.Errorf("Error patching service %s", err.Error())
return nil, patchErr
}
ec.log.Warnf("Found a hash collision - bumped collisionCount (%d->%d) to resolve it", preCollisionCount, *newTemplate.CollisionCount)
return nil, err
case err != nil:
msg := fmt.Sprintf(conditions.FailedRSCreateMessage, newRS.Name, err)
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.FailedRSCreateReason}, msg)
return nil, err
default:
ec.log.Infof("Created ReplicaSet %s", createdRS.Name)
}
if !alreadyExists && newReplicasCount > int32(0) {
ec.recorder.Eventf(ec.ex, record.EventOptions{EventReason: conditions.NewReplicaSetReason}, conditions.NewReplicaSetMessage+" with size %d", createdRS.Name, newReplicasCount)
}
return createdRS, nil
}
// newReplicaSetFromTemplate is a helper to formulate a replicaset from an experiment's template
func newReplicaSetFromTemplate(experiment *v1alpha1.Experiment, template v1alpha1.TemplateSpec, collisionCount *int32) appsv1.ReplicaSet {
newRSTemplate := *template.Template.DeepCopy()
replicaSetAnnotations := newReplicaSetAnnotations(experiment.Name, template.Name)
if newRSTemplate.Labels != nil {
if _, ok := newRSTemplate.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]; ok {
delete(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey)
}
}
podHash := hash.ComputePodTemplateHash(&newRSTemplate, collisionCount)
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(newRSTemplate.Labels, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// Add podTemplateHash label to selector.
newRSSelector := labelsutil.CloneSelectorAndAddLabel(template.Selector, v1alpha1.DefaultRolloutUniqueLabelKey, podHash)
// The annotations must be different for each template because annotations are used to match
// replicasets to templates. We inject the experiment and template name in the replicaset
// annotations to ensure uniqueness.
rs := appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", experiment.Name, template.Name),
Namespace: experiment.Namespace,
Labels: map[string]string{
v1alpha1.DefaultRolloutUniqueLabelKey: podHash,
},
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(experiment, controllerKind)},
Annotations: replicaSetAnnotations,
},
Spec: appsv1.ReplicaSetSpec{
Replicas: new(int32),
MinReadySeconds: template.MinReadySeconds,
Selector: newRSSelector,
Template: newRSTemplate,
},
}
return rs
}
// isReplicaSetSemanticallyEqual checks to see if an existing ReplicaSet is semantically equal
// to the ReplicaSet we are trying to create
func (ec *experimentContext) isReplicaSetSemanticallyEqual(newRS, existingRS *appsv1.ReplicaSet) bool {
controllerRef := metav1.GetControllerOf(existingRS)
podTemplatesEqual := replicasetutil.PodTemplateEqualIgnoreHash(&existingRS.Spec.Template, &newRS.Spec.Template)
existingAnnotations := existingRS.GetAnnotations()
newAnnotations := newRS.GetAnnotations()
return controllerRef != nil &&
controllerRef.UID == ec.ex.UID &&
podTemplatesEqual &&
existingAnnotations != nil &&
existingAnnotations[v1alpha1.ExperimentNameAnnotationKey] == newAnnotations[v1alpha1.ExperimentNameAnnotationKey] &&
existingAnnotations[v1alpha1.ExperimentTemplateNameAnnotationKey] == newAnnotations[v1alpha1.ExperimentTemplateNameAnnotationKey]
}
// addScaleDownDelay injects the `scale-down-deadline` annotation to the ReplicaSet, or if
// scaleDownDelaySeconds is zero, removes it if it exists
// returns True if ReplicaSet is patched, otherwise False
func (ec *experimentContext) addScaleDownDelay(rs *appsv1.ReplicaSet) (bool, error) {
rsIsUpdated := false
if rs == nil {
return rsIsUpdated, nil
}
ctx := context.TODO()
scaleDownDelaySeconds := time.Duration(defaults.GetExperimentScaleDownDelaySecondsOrDefault(ec.ex))
if scaleDownDelaySeconds == 0 {
// If scaledown deadline is zero, it means we need to remove any replicasets with the delay
if replicasetutil.HasScaleDownDeadline(rs) {
return ec.remove | {
return nil, fmt.Errorf("multiple ReplicaSets match single experiment template: %s", templateName)
} | conditional_block |
TongitzApi.ts | store in var
let players = p.map((x,i) => {
return {
id:i+1,
name:x,
turn:i+1,
hand:[]
} as domain.playerStatus
})
//make deck, shuffle, store in var
let deck = this.shuffle(this.generateDeck());
//distribute/splice cards
for(let x = 0;x<12;x++)
players.forEach(x => x.hand.push(deck.splice(0,1)[0]));
players[0].hand.push(deck.splice(0,1)[0])
//save deck
this._svc.setDeck(gameId,deck)
//save players
this._svc.addPlayers(gameId,...players)
//save changes
this._svc.applyState(gameId);
}
/**
* desc: get gameStat from requester's perspective
* params:
* -gameId: id of the game retreiving. for now, it defaults to 1
* -playerId: id of the player retreiving the gameState
*/
public GetState (gameId: number, playerId: number) : resource.gameStateResource{
gameId = gameId || 1;
//service get whole gameState
return mapper.gameStateToResource(this._svc.fetchState(gameId),playerId);
}
/**
* desc:
* get gameState poll method. returns gameState if parameter:turn is less than current gameState turn
* params:
* -turn: turn of last gameState fetched by player
* -playerId: id of the player retreiving the gameState
*/
public CheckState (gameId: number, playerId: number, turn: number) : resource.gameStateResource{
gameId = gameId || 1;
//return only when gameState.turn is different than passed turn
//return gameState with turn only, when turn is same. else, gameStatus
//getState
let gameTurn:number = this._svc.getTurn(gameId);
if(turn < gameTurn){
return mapper.gameStateToResource(this._svc.fetchState(gameId), playerId);
}
else{
let emptyState = new domain.gameState();
emptyState.turn = gameTurn;
return mapper.gameStateToResource(emptyState, playerId);
}
}
/**
* could check if deck.length > 0, but if the game can be marked as ended (if really ended) every chow and play, then just check that
*/
public Draw(gameId:number,playerId:number):resource.cardResource {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//check if turn of player
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
//check if chowed or drawn already
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
//should store deck to var and save it later, but if splice apply to service's stored gameState's deck,
//then it will just be saved when applyState is called
//splice card from deck
let drawnCard = this._svc.getDeck(gameId).splice(0,1)[0];
//push spliced card to player hand
gamePlayer.hand.push(drawnCard);//TODO:: addHand
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//apply save
this._svc.applyState(gameId)
//return spliced card
return mapper.cardToResource(drawnCard);
}
/**
* check parameters
* -cards must be at least 2 to chow
* check if player's turn and if hasn't drawn or chowed
* check client authenticity/sync to server gameState cards
* check if cards form a valid house
* Create house from player's hand
* Update phase
* SaveState
*/
public Chow(gameId:number,playerId:number,playerCardIds:number[]) {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
if(playerCardIds.length < 2) //check if there are at least 2 cards
{
throw "badRequest:can't form a house with less than 3 cards"
}
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//check combination if possible
let proposedHand: domain.card[] = []; | let gameDiscards = this._svc.getDiscards(gameId);
proposedHand.push(gameDiscards[gameDiscards.length-1]);//last discard
gamePlayer.hand.filter(x => playerCardIds.indexOf(x.id) != -1).forEach(x => proposedHand.push(x));//hand card to combine
// playerCardIds.map(x => gamePlayer.hand[gamePlayer.hand.map(x => x.id).indexOf(x)]).forEach(x => proposedHand.push(x));// same, just cool
this.validateCombination(proposedHand);
//new house
let gameHouses = this._svc.getHouses(gameId);
let newHouse:domain.house = new domain.house();
newHouse.id = gameHouses.length + 1;
newHouse.playerId = playerId;
//splice last discard to new house
newHouse.cards.push(gameDiscards.splice(gameDiscards.length - 1,1)[0]);
//splice from hand to new house
//map hand cards to id, map playerCardIds to it's index //TOOK some secs to do, some secs to lose, and 1 hour to remake
playerCardIds.forEach((x,i) => {
let cardIndexFromHand = gamePlayer.hand.map(hc => hc.id).indexOf(x);
let cardTaken = gamePlayer.hand.splice(cardIndexFromHand,1)[0];
newHouse.cards.push(new domain.playedCard(cardTaken,playerId,gameTurn))
})
this._svc.addHouse(gameId,newHouse);
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//check if won. condition: gamePlayer.hand.length == 0
if(gamePlayer.hand.length == 0)
{
this._svc.setWinner(gameId,playerId,domain.winMethodEnum.noHand);
}
this._svc.applyState(gameId);
}
public Play(gameId:number, playerId: number, playCards: resource.playRequestResource){
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//discard is required
if(!(playCards ? !isNaN(playCards.discard) ? true : false : false))
return;
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
let playerCardIds = [];
playerCardIds.push(...this.flatten(playCards));
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.play);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//validate each request part
//sapaw; forEach get house cards from game houses and player's card and validateCombination
let gameHouses = this._svc.getHouses(gameId);
playCards.sapaw.forEach(s => {
let proposedSapaw:domain.card[] = [];
let gameHouse = gameHouses.filter(gh => gh.id == s.houseId)[0].cards
let pHandCards = s.cardId.map(cid => gamePlayer.hand.filter(c => c.id == cid)[0])
proposedSapaw.push(...gameHouse,...pHandCards)
this.validateCombination(proposedSapaw);
})
//house; forEach, get house cards from request, validateCombination
playCards.houses.forEach(h => {
let proposedHouse:domain.card[] = [];
let house = h.map(x => gamePlayer.hand.filter(hc => hc.id == x)[0])
proposedHouse.push(...house);
this.validateCombination(proposedHouse);
})
//splice from hand, put to respective set of cards(list of sapaw, list of houses, discards)
playCards.sapaw.forEach(s => {
s.cardId.forEach(cid => {
let handCardIndex = gamePlayer.hand.map(hc => hc.id).indexOf(cid);
new domain.playedCard(gamePlayer.hand.splice(handCardIndex,1)[0],playerId,gameTurn)
})
})
playCards.houses.forEach(hs => {
let newHouse = new domain.house();
new | //get lastDiscard as card | random_line_split |
TongitzApi.ts | (gameId?:number,...p: string[]) : void//: gameStateResource {
{
gameId = gameId || 1;
if(p.length < 2 || p.length > 3) throw "500","error";
//set gameId, turn, phase
this._svc.addGame(gameId,1,domain.turnPhaseEnum.play);
//make players, set name,turn,id , store in var
let players = p.map((x,i) => {
return {
id:i+1,
name:x,
turn:i+1,
hand:[]
} as domain.playerStatus
})
//make deck, shuffle, store in var
let deck = this.shuffle(this.generateDeck());
//distribute/splice cards
for(let x = 0;x<12;x++)
players.forEach(x => x.hand.push(deck.splice(0,1)[0]));
players[0].hand.push(deck.splice(0,1)[0])
//save deck
this._svc.setDeck(gameId,deck)
//save players
this._svc.addPlayers(gameId,...players)
//save changes
this._svc.applyState(gameId);
}
/**
* desc: get gameStat from requester's perspective
* params:
* -gameId: id of the game retreiving. for now, it defaults to 1
* -playerId: id of the player retreiving the gameState
*/
public GetState (gameId: number, playerId: number) : resource.gameStateResource{
gameId = gameId || 1;
//service get whole gameState
return mapper.gameStateToResource(this._svc.fetchState(gameId),playerId);
}
/**
* desc:
* get gameState poll method. returns gameState if parameter:turn is less than current gameState turn
* params:
* -turn: turn of last gameState fetched by player
* -playerId: id of the player retreiving the gameState
*/
public CheckState (gameId: number, playerId: number, turn: number) : resource.gameStateResource{
gameId = gameId || 1;
//return only when gameState.turn is different than passed turn
//return gameState with turn only, when turn is same. else, gameStatus
//getState
let gameTurn:number = this._svc.getTurn(gameId);
if(turn < gameTurn){
return mapper.gameStateToResource(this._svc.fetchState(gameId), playerId);
}
else{
let emptyState = new domain.gameState();
emptyState.turn = gameTurn;
return mapper.gameStateToResource(emptyState, playerId);
}
}
/**
* could check if deck.length > 0, but if the game can be marked as ended (if really ended) every chow and play, then just check that
*/
public Draw(gameId:number,playerId:number):resource.cardResource {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//check if turn of player
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
//check if chowed or drawn already
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
//should store deck to var and save it later, but if splice apply to service's stored gameState's deck,
//then it will just be saved when applyState is called
//splice card from deck
let drawnCard = this._svc.getDeck(gameId).splice(0,1)[0];
//push spliced card to player hand
gamePlayer.hand.push(drawnCard);//TODO:: addHand
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//apply save
this._svc.applyState(gameId)
//return spliced card
return mapper.cardToResource(drawnCard);
}
/**
* check parameters
* -cards must be at least 2 to chow
* check if player's turn and if hasn't drawn or chowed
* check client authenticity/sync to server gameState cards
* check if cards form a valid house
* Create house from player's hand
* Update phase
* SaveState
*/
public Chow(gameId:number,playerId:number,playerCardIds:number[]) {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
if(playerCardIds.length < 2) //check if there are at least 2 cards
{
throw "badRequest:can't form a house with less than 3 cards"
}
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//check combination if possible
let proposedHand: domain.card[] = [];
//get lastDiscard as card
let gameDiscards = this._svc.getDiscards(gameId);
proposedHand.push(gameDiscards[gameDiscards.length-1]);//last discard
gamePlayer.hand.filter(x => playerCardIds.indexOf(x.id) != -1).forEach(x => proposedHand.push(x));//hand card to combine
// playerCardIds.map(x => gamePlayer.hand[gamePlayer.hand.map(x => x.id).indexOf(x)]).forEach(x => proposedHand.push(x));// same, just cool
this.validateCombination(proposedHand);
//new house
let gameHouses = this._svc.getHouses(gameId);
let newHouse:domain.house = new domain.house();
newHouse.id = gameHouses.length + 1;
newHouse.playerId = playerId;
//splice last discard to new house
newHouse.cards.push(gameDiscards.splice(gameDiscards.length - 1,1)[0]);
//splice from hand to new house
//map hand cards to id, map playerCardIds to it's index //TOOK some secs to do, some secs to lose, and 1 hour to remake
playerCardIds.forEach((x,i) => {
let cardIndexFromHand = gamePlayer.hand.map(hc => hc.id).indexOf(x);
let cardTaken = gamePlayer.hand.splice(cardIndexFromHand,1)[0];
newHouse.cards.push(new domain.playedCard(cardTaken,playerId,gameTurn))
})
this._svc.addHouse(gameId,newHouse);
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//check if won. condition: gamePlayer.hand.length == 0
if(gamePlayer.hand.length == 0)
{
this._svc.setWinner(gameId,playerId,domain.winMethodEnum.noHand);
}
this._svc.applyState(gameId);
}
public Play(gameId:number, playerId: number, playCards: resource.playRequestResource){
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//discard is required
if(!(playCards ? !isNaN(playCards.discard) ? true : false : false))
return;
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
let playerCardIds = [];
playerCardIds.push(...this.flatten(playCards));
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.play);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//validate each request part
//sapaw; forEach get house cards from game houses and player's card and validateCombination
let gameHouses = this._svc.getHouses(gameId);
playCards.sapaw.forEach(s => {
let proposedSapaw:domain.card[] = [];
let gameHouse = gameHouses.filter(gh => gh.id == s.houseId)[0].cards
let pHandCards = s.cardId.map(cid => gamePlayer.hand.filter(c => c.id == cid)[0])
proposedSapaw.push(...gameHouse,...pHandCards)
this.validateCombination(proposedSapaw);
})
//house; forEach, get house cards from request, validateCombination
playCards.houses.forEach(h => {
let proposedHouse:domain.card[] = [];
let house = h.map(x => gamePlayer.hand.filter(hc => hc.id == x)[0])
proposedHouse.push(...house);
this.validateCombination(proposedHouse);
})
//splice from hand, put to respective set of cards(list of sapaw, list of houses, | NewGame | identifier_name | |
TongitzApi.ts | in var
let players = p.map((x,i) => {
return {
id:i+1,
name:x,
turn:i+1,
hand:[]
} as domain.playerStatus
})
//make deck, shuffle, store in var
let deck = this.shuffle(this.generateDeck());
//distribute/splice cards
for(let x = 0;x<12;x++)
players.forEach(x => x.hand.push(deck.splice(0,1)[0]));
players[0].hand.push(deck.splice(0,1)[0])
//save deck
this._svc.setDeck(gameId,deck)
//save players
this._svc.addPlayers(gameId,...players)
//save changes
this._svc.applyState(gameId);
}
/**
* desc: get gameStat from requester's perspective
* params:
* -gameId: id of the game retreiving. for now, it defaults to 1
* -playerId: id of the player retreiving the gameState
*/
public GetState (gameId: number, playerId: number) : resource.gameStateResource{
gameId = gameId || 1;
//service get whole gameState
return mapper.gameStateToResource(this._svc.fetchState(gameId),playerId);
}
/**
* desc:
* get gameState poll method. returns gameState if parameter:turn is less than current gameState turn
* params:
* -turn: turn of last gameState fetched by player
* -playerId: id of the player retreiving the gameState
*/
public CheckState (gameId: number, playerId: number, turn: number) : resource.gameStateResource{
gameId = gameId || 1;
//return only when gameState.turn is different than passed turn
//return gameState with turn only, when turn is same. else, gameStatus
//getState
let gameTurn:number = this._svc.getTurn(gameId);
if(turn < gameTurn){
return mapper.gameStateToResource(this._svc.fetchState(gameId), playerId);
}
else{
let emptyState = new domain.gameState();
emptyState.turn = gameTurn;
return mapper.gameStateToResource(emptyState, playerId);
}
}
/**
* could check if deck.length > 0, but if the game can be marked as ended (if really ended) every chow and play, then just check that
*/
public Draw(gameId:number,playerId:number):resource.cardResource {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//check if turn of player
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
//check if chowed or drawn already
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
//should store deck to var and save it later, but if splice apply to service's stored gameState's deck,
//then it will just be saved when applyState is called
//splice card from deck
let drawnCard = this._svc.getDeck(gameId).splice(0,1)[0];
//push spliced card to player hand
gamePlayer.hand.push(drawnCard);//TODO:: addHand
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//apply save
this._svc.applyState(gameId)
//return spliced card
return mapper.cardToResource(drawnCard);
}
/**
* check parameters
* -cards must be at least 2 to chow
* check if player's turn and if hasn't drawn or chowed
* check client authenticity/sync to server gameState cards
* check if cards form a valid house
* Create house from player's hand
* Update phase
* SaveState
*/
public Chow(gameId:number,playerId:number,playerCardIds:number[]) | // playerCardIds.map(x => gamePlayer.hand[gamePlayer.hand.map(x => x.id).indexOf(x)]).forEach(x => proposedHand.push(x));// same, just cool
this.validateCombination(proposedHand);
//new house
let gameHouses = this._svc.getHouses(gameId);
let newHouse:domain.house = new domain.house();
newHouse.id = gameHouses.length + 1;
newHouse.playerId = playerId;
//splice last discard to new house
newHouse.cards.push(gameDiscards.splice(gameDiscards.length - 1,1)[0]);
//splice from hand to new house
//map hand cards to id, map playerCardIds to it's index //TOOK some secs to do, some secs to lose, and 1 hour to remake
playerCardIds.forEach((x,i) => {
let cardIndexFromHand = gamePlayer.hand.map(hc => hc.id).indexOf(x);
let cardTaken = gamePlayer.hand.splice(cardIndexFromHand,1)[0];
newHouse.cards.push(new domain.playedCard(cardTaken,playerId,gameTurn))
})
this._svc.addHouse(gameId,newHouse);
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//check if won. condition: gamePlayer.hand.length == 0
if(gamePlayer.hand.length == 0)
{
this._svc.setWinner(gameId,playerId,domain.winMethodEnum.noHand);
}
this._svc.applyState(gameId);
}
public Play(gameId:number, playerId: number, playCards: resource.playRequestResource){
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//discard is required
if(!(playCards ? !isNaN(playCards.discard) ? true : false : false))
return;
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
let playerCardIds = [];
playerCardIds.push(...this.flatten(playCards));
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.play);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//validate each request part
//sapaw; forEach get house cards from game houses and player's card and validateCombination
let gameHouses = this._svc.getHouses(gameId);
playCards.sapaw.forEach(s => {
let proposedSapaw:domain.card[] = [];
let gameHouse = gameHouses.filter(gh => gh.id == s.houseId)[0].cards
let pHandCards = s.cardId.map(cid => gamePlayer.hand.filter(c => c.id == cid)[0])
proposedSapaw.push(...gameHouse,...pHandCards)
this.validateCombination(proposedSapaw);
})
//house; forEach, get house cards from request, validateCombination
playCards.houses.forEach(h => {
let proposedHouse:domain.card[] = [];
let house = h.map(x => gamePlayer.hand.filter(hc => hc.id == x)[0])
proposedHouse.push(...house);
this.validateCombination(proposedHouse);
})
//splice from hand, put to respective set of cards(list of sapaw, list of houses, discards)
playCards.sapaw.forEach(s => {
s.cardId.forEach(cid => {
let handCardIndex = gamePlayer.hand.map(hc => hc.id).indexOf(cid);
new domain.playedCard(gamePlayer.hand.splice(handCardIndex,1)[0],playerId,gameTurn)
})
})
playCards.houses.forEach(hs => {
let newHouse = new domain.house();
new | {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
if(playerCardIds.length < 2) //check if there are at least 2 cards
{
throw "badRequest:can't form a house with less than 3 cards"
}
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//check combination if possible
let proposedHand: domain.card[] = [];
//get lastDiscard as card
let gameDiscards = this._svc.getDiscards(gameId);
proposedHand.push(gameDiscards[gameDiscards.length-1]);//last discard
gamePlayer.hand.filter(x => playerCardIds.indexOf(x.id) != -1).forEach(x => proposedHand.push(x));//hand card to combine | identifier_body |
TongitzApi.ts | in var
let players = p.map((x,i) => {
return {
id:i+1,
name:x,
turn:i+1,
hand:[]
} as domain.playerStatus
})
//make deck, shuffle, store in var
let deck = this.shuffle(this.generateDeck());
//distribute/splice cards
for(let x = 0;x<12;x++)
players.forEach(x => x.hand.push(deck.splice(0,1)[0]));
players[0].hand.push(deck.splice(0,1)[0])
//save deck
this._svc.setDeck(gameId,deck)
//save players
this._svc.addPlayers(gameId,...players)
//save changes
this._svc.applyState(gameId);
}
/**
* desc: get gameStat from requester's perspective
* params:
* -gameId: id of the game retreiving. for now, it defaults to 1
* -playerId: id of the player retreiving the gameState
*/
public GetState (gameId: number, playerId: number) : resource.gameStateResource{
gameId = gameId || 1;
//service get whole gameState
return mapper.gameStateToResource(this._svc.fetchState(gameId),playerId);
}
/**
* desc:
* get gameState poll method. returns gameState if parameter:turn is less than current gameState turn
* params:
* -turn: turn of last gameState fetched by player
* -playerId: id of the player retreiving the gameState
*/
public CheckState (gameId: number, playerId: number, turn: number) : resource.gameStateResource{
gameId = gameId || 1;
//return only when gameState.turn is different than passed turn
//return gameState with turn only, when turn is same. else, gameStatus
//getState
let gameTurn:number = this._svc.getTurn(gameId);
if(turn < gameTurn){
return mapper.gameStateToResource(this._svc.fetchState(gameId), playerId);
}
else{
let emptyState = new domain.gameState();
emptyState.turn = gameTurn;
return mapper.gameStateToResource(emptyState, playerId);
}
}
/**
* could check if deck.length > 0, but if the game can be marked as ended (if really ended) every chow and play, then just check that
*/
public Draw(gameId:number,playerId:number):resource.cardResource {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//check if turn of player
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
//check if chowed or drawn already
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
//should store deck to var and save it later, but if splice apply to service's stored gameState's deck,
//then it will just be saved when applyState is called
//splice card from deck
let drawnCard = this._svc.getDeck(gameId).splice(0,1)[0];
//push spliced card to player hand
gamePlayer.hand.push(drawnCard);//TODO:: addHand
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//apply save
this._svc.applyState(gameId)
//return spliced card
return mapper.cardToResource(drawnCard);
}
/**
* check parameters
* -cards must be at least 2 to chow
* check if player's turn and if hasn't drawn or chowed
* check client authenticity/sync to server gameState cards
* check if cards form a valid house
* Create house from player's hand
* Update phase
* SaveState
*/
public Chow(gameId:number,playerId:number,playerCardIds:number[]) {
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
if(playerCardIds.length < 2) //check if there are at least 2 cards
{
throw "badRequest:can't form a house with less than 3 cards"
}
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.drawOrChow);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//check combination if possible
let proposedHand: domain.card[] = [];
//get lastDiscard as card
let gameDiscards = this._svc.getDiscards(gameId);
proposedHand.push(gameDiscards[gameDiscards.length-1]);//last discard
gamePlayer.hand.filter(x => playerCardIds.indexOf(x.id) != -1).forEach(x => proposedHand.push(x));//hand card to combine
// playerCardIds.map(x => gamePlayer.hand[gamePlayer.hand.map(x => x.id).indexOf(x)]).forEach(x => proposedHand.push(x));// same, just cool
this.validateCombination(proposedHand);
//new house
let gameHouses = this._svc.getHouses(gameId);
let newHouse:domain.house = new domain.house();
newHouse.id = gameHouses.length + 1;
newHouse.playerId = playerId;
//splice last discard to new house
newHouse.cards.push(gameDiscards.splice(gameDiscards.length - 1,1)[0]);
//splice from hand to new house
//map hand cards to id, map playerCardIds to it's index //TOOK some secs to do, some secs to lose, and 1 hour to remake
playerCardIds.forEach((x,i) => {
let cardIndexFromHand = gamePlayer.hand.map(hc => hc.id).indexOf(x);
let cardTaken = gamePlayer.hand.splice(cardIndexFromHand,1)[0];
newHouse.cards.push(new domain.playedCard(cardTaken,playerId,gameTurn))
})
this._svc.addHouse(gameId,newHouse);
//update phase
gamePhase = domain.turnPhaseEnum.play;//TODO:: setPhase
//check if won. condition: gamePlayer.hand.length == 0
if(gamePlayer.hand.length == 0)
|
this._svc.applyState(gameId);
}
public Play(gameId:number, playerId: number, playCards: resource.playRequestResource){
this.validateGameOnGoing(this._svc.getWinMethod(gameId));
//discard is required
if(!(playCards ? !isNaN(playCards.discard) ? true : false : false))
return;
let gameTurn = this._svc.getTurn(gameId);
let gamePhase = this._svc.getPhase(gameId);
let gamePlayer = this._svc.getPlayer(gameId,playerId);
let gamePlayerTurn = gamePlayer.turn;
let gamePlayerCount = this._svc.getPlayerCount(gameId);
let playerCardIds = [];
playerCardIds.push(...this.flatten(playCards));
this.validateTurnAndPhase(gameTurn,gamePlayerTurn,gamePlayerCount,gamePhase,domain.turnPhaseEnum.play);
this.validateCards(gamePlayer.hand.map(x => x.id),playerCardIds)//gameState,playerId,cardIds);
//validate each request part
//sapaw; forEach get house cards from game houses and player's card and validateCombination
let gameHouses = this._svc.getHouses(gameId);
playCards.sapaw.forEach(s => {
let proposedSapaw:domain.card[] = [];
let gameHouse = gameHouses.filter(gh => gh.id == s.houseId)[0].cards
let pHandCards = s.cardId.map(cid => gamePlayer.hand.filter(c => c.id == cid)[0])
proposedSapaw.push(...gameHouse,...pHandCards)
this.validateCombination(proposedSapaw);
})
//house; forEach, get house cards from request, validateCombination
playCards.houses.forEach(h => {
let proposedHouse:domain.card[] = [];
let house = h.map(x => gamePlayer.hand.filter(hc => hc.id == x)[0])
proposedHouse.push(...house);
this.validateCombination(proposedHouse);
})
//splice from hand, put to respective set of cards(list of sapaw, list of houses, discards)
playCards.sapaw.forEach(s => {
s.cardId.forEach(cid => {
let handCardIndex = gamePlayer.hand.map(hc => hc.id).indexOf(cid);
new domain.playedCard(gamePlayer.hand.splice(handCardIndex,1)[0],playerId,gameTurn)
})
})
playCards.houses.forEach(hs => {
let newHouse = new domain.house();
| {
this._svc.setWinner(gameId,playerId,domain.winMethodEnum.noHand);
} | conditional_block |
cookie_article_parser.py | if short_date == "Jun": return 6
if short_date == "Jul": return 7
if short_date == "Aug": return 8
if short_date == "Sep": return 9
if short_date == "Oct": return 10
if short_date == "Nov": return 11
if short_date == "Dec": return 12
return -1
def load_secret():
fname = "secret_sauce.dat"
f = open(fname,"r")
global secret_login
secret_login = f.readline().split(' ')[1].rstrip()
global secret_comments
secret_comments = f.readline().split(' ')[1].rstrip()
print secret_comments
f.close()
def load_db():
fname="dbcreds.dat"
f = open(fname,"r")
#rstrip() is needde here or else it includse the newline from the creds file
dbhost = f.readline().split(' ')[1].rstrip()
dbuser = f.readline().split(' ')[1].rstrip()
dbpw = f.readline().split(' ')[1].rstrip()
dbdb = f.readline().split(' ')[1].rstrip()
f.close()
db = my.connect(host=dbhost,
user=dbuser,
passwd=dbpw,
db=dbdb
)
return db
def close_db(db):
|
def load_creds_login():
fname="wsjcreds.dat"
f = open(fname,"r")
uname = f.readline().split(' ')[1]
pw = f.readline().split(' ')[1]
f.close()
# Input username
loginID = browser.find_element_by_id("username").send_keys(uname)
# Input password
loginPass = browser.find_element_by_id("password").send_keys(pw)
loginReady = browser.find_element_by_class_name("basic-login-submit")
loginReady.submit()
time.sleep(10) #this is needed because it takes time to login
def sel_init():
options = Options()
#options.add_argument("--headless")
browser = webdriver.Firefox(firefox_options=options, executable_path='/home/shelbyt/geckodriver')
return browser
def profile_comment_load(browser):
#for i in range(0,3):
# print "range"
# load_more = browser.find_element_by_class_name("load-more").click()
# #load_more.submit()
# time.sleep(3)
# Use this to press the 'load more' button until there is nothing left'
# can also use the 'page num' selector from the page itself to find how many pages to iterate
index = 0
while True:
try:
load_more = browser.find_element_by_class_name("load-more").click()
time.sleep(3)
index = index + 1
if (index%50) == 0:
print "50 Pages loaded"
except Exception as e:
print e
print index
break
print "Complete Loading User Comment Page"
def gather_comments(browser,db,cursor):
# Need to store date_time processing
t_arr= []
date_time = []
############# OBTAIN THESE ONCE PER PROFILE ##################
# username appears once
db_user_name= str(browser.find_elements_by_class_name("info-username")[0].text)
# get the uid returned using URL method
#url = browser.current_url
#match = re.search('.*\/(.*)\?',url)
#db_user_id = str(match.group(1))
# get the uid returned using css method
db_user_id = str(browser.find_element_by_css_selector("div.module.module-profile").get_attribute("data-vxid"))
# If we just grab this module we get EVERYTHING we need excluding the userID
# uid = browser.find_elements_by_class_name("module")
# print uid
# for elem in uid:
# print elem.text
############# ITERATE EACH COMMENT BLOCK ##################
for cblock in browser.find_elements_by_class_name("module-comments"):
likes = cblock.find_elements_by_class_name("comments-recommendations")[0]
# need to convert it to utf-8 or str format or else can't use string operators like split
likes= str(likes.text)
#The likes text looks like " 7 likes" so i just want the 7 part
likes = likes.split(' ')[0]
#ignore the header child and get the a-href text
headline = cblock.find_elements_by_class_name("comments-headline")[0].find_element_by_tag_name("a")
comment = cblock.find_elements_by_class_name("comments-content")[0]
time = cblock.find_elements_by_class_name("comments-time")[0]
real_t = time.text
real_t_arr = [x.encode('utf-8') for x in real_t.split()]
real_t_arr[1] = real_t_arr[1][:-1] # remove last comma
[hour,mins] = [int (x) for x in real_t_arr[3].split(':')]
t_format = datetime.timedelta(hours=hour,minutes=mins)
date_time.insert(0, datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])))
if real_t_arr[4] == "PM": # for some reason using "IS" doesn't work here
if real_t_arr[3].split(':')[0] != "12":
t_format = t_format + datetime.timedelta(hours=12) # add 12 hours if it is PM
# I dont think this format() call is needed
date_time.insert(1,format(t_format))
t_arr.append(date_time) # append into final time array the sequence of times
################## INSERT EVERYTHING INTO THE GLOBAL DB STRINGS ##############
# just use the date part of date_time
db_date = datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])).date().strftime("%Y-%m-%d")
db_time=t_format
# without this str encoding i get a 'latin-1 codec cant encode character'
# use ascii ignore to remove any weird characters
db_comment = str(comment.text).encode('ascii','ignore')
db_article_name = str(headline.text).encode('ascii','ignore')
db_likes = likes
################## PERFORM SQL QUERY ##############
# This works but it causes weird formatting issues because I'm not specifiyng the column
# sql="insert into Members values ('%s', '%s','%s','%s','%s','%s','%s')"
# % (db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes)
sql="insert into Members(user_id,user_name,date,time,comment,article_name,likes) VALUES (%s, %s,%s,%s,%s,%s,%s)"
num_rows=cursor.execute(sql,(db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes))
db.commit()
date_time = []
def article_comments_load(browser,article_url):
global profile_url
# Access comments
print secret_comments
comment_url = secret_comments+str(article_url)
print comment_url
# Load browser
browser.get(comment_url)
time.sleep(10)
# First load the entire page by clicking the button, works for up to 1000 comments
while True:
try:
loader = browser.find_elements_by_xpath("//*[@class='talk-load-more']/button")
# The last button on the page will be the one to load more. Other buttons on the page
# Are for loading replies. Don't think replies are that useful
loader[len(loader)-1].click()
time.sleep(5)
except Exception as e:
print e
break
print "Complete Loading Article Comments"
# We want to keep a list of the profile urls so we can access later
url_list = browser.find_elements_by_xpath("//*[@class='talk-stream-comment-container']/div/div/div/a")
# Insert into global list here
for user in url_list:
# Need to test if this works to maintain a unique list within article
# This is useful especially if we're getting replies
# If a user comments twice, only keep one.
if user not in profile_url:
# Get the actual users profile url here
# TODO(shelbyt): Fix bad naming user is a userblock
match = re.search('.*\/(.*)\?',user.get_attribute("href"))
# Returns a user_id which we can match with the database list
article_user_id = str(match.group(1))
# If the user_id from the url isn't found in the database list then
# we can insert it.
if article_user_id not in unique_users:
profile_url.append(user.get_attribute("href"))
return len(profile_url)
def insert_article_user_comments(browser, db, cursor):
login_flag = 0
for profile in profile_url:
print "Trying to load = " + profile
browser.get(profile)
if login_flag == 0:
load_creds_login()
login_flag = 1
profile_comment | db.close() | identifier_body |
cookie_article_parser.py | if short_date == "Jun": return 6
if short_date == "Jul": return 7
if short_date == "Aug": return 8
if short_date == "Sep": return 9
if short_date == "Oct": return 10
if short_date == "Nov": return 11
if short_date == "Dec": return 12
return -1
def load_secret():
fname = "secret_sauce.dat"
f = open(fname,"r")
global secret_login
secret_login = f.readline().split(' ')[1].rstrip()
global secret_comments
secret_comments = f.readline().split(' ')[1].rstrip()
print secret_comments
f.close()
def load_db():
fname="dbcreds.dat"
f = open(fname,"r")
#rstrip() is needde here or else it includse the newline from the creds file
dbhost = f.readline().split(' ')[1].rstrip()
dbuser = f.readline().split(' ')[1].rstrip()
dbpw = f.readline().split(' ')[1].rstrip()
dbdb = f.readline().split(' ')[1].rstrip()
f.close()
db = my.connect(host=dbhost,
user=dbuser,
passwd=dbpw,
db=dbdb
)
return db
def close_db(db):
db.close()
def load_creds_login():
fname="wsjcreds.dat"
f = open(fname,"r")
uname = f.readline().split(' ')[1]
pw = f.readline().split(' ')[1]
f.close()
# Input username
loginID = browser.find_element_by_id("username").send_keys(uname)
# Input password
loginPass = browser.find_element_by_id("password").send_keys(pw)
loginReady = browser.find_element_by_class_name("basic-login-submit")
loginReady.submit()
time.sleep(10) #this is needed because it takes time to login
def | ():
options = Options()
#options.add_argument("--headless")
browser = webdriver.Firefox(firefox_options=options, executable_path='/home/shelbyt/geckodriver')
return browser
def profile_comment_load(browser):
#for i in range(0,3):
# print "range"
# load_more = browser.find_element_by_class_name("load-more").click()
# #load_more.submit()
# time.sleep(3)
# Use this to press the 'load more' button until there is nothing left'
# can also use the 'page num' selector from the page itself to find how many pages to iterate
index = 0
while True:
try:
load_more = browser.find_element_by_class_name("load-more").click()
time.sleep(3)
index = index + 1
if (index%50) == 0:
print "50 Pages loaded"
except Exception as e:
print e
print index
break
print "Complete Loading User Comment Page"
def gather_comments(browser,db,cursor):
# Need to store date_time processing
t_arr= []
date_time = []
############# OBTAIN THESE ONCE PER PROFILE ##################
# username appears once
db_user_name= str(browser.find_elements_by_class_name("info-username")[0].text)
# get the uid returned using URL method
#url = browser.current_url
#match = re.search('.*\/(.*)\?',url)
#db_user_id = str(match.group(1))
# get the uid returned using css method
db_user_id = str(browser.find_element_by_css_selector("div.module.module-profile").get_attribute("data-vxid"))
# If we just grab this module we get EVERYTHING we need excluding the userID
# uid = browser.find_elements_by_class_name("module")
# print uid
# for elem in uid:
# print elem.text
############# ITERATE EACH COMMENT BLOCK ##################
for cblock in browser.find_elements_by_class_name("module-comments"):
likes = cblock.find_elements_by_class_name("comments-recommendations")[0]
# need to convert it to utf-8 or str format or else can't use string operators like split
likes= str(likes.text)
#The likes text looks like " 7 likes" so i just want the 7 part
likes = likes.split(' ')[0]
#ignore the header child and get the a-href text
headline = cblock.find_elements_by_class_name("comments-headline")[0].find_element_by_tag_name("a")
comment = cblock.find_elements_by_class_name("comments-content")[0]
time = cblock.find_elements_by_class_name("comments-time")[0]
real_t = time.text
real_t_arr = [x.encode('utf-8') for x in real_t.split()]
real_t_arr[1] = real_t_arr[1][:-1] # remove last comma
[hour,mins] = [int (x) for x in real_t_arr[3].split(':')]
t_format = datetime.timedelta(hours=hour,minutes=mins)
date_time.insert(0, datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])))
if real_t_arr[4] == "PM": # for some reason using "IS" doesn't work here
if real_t_arr[3].split(':')[0] != "12":
t_format = t_format + datetime.timedelta(hours=12) # add 12 hours if it is PM
# I dont think this format() call is needed
date_time.insert(1,format(t_format))
t_arr.append(date_time) # append into final time array the sequence of times
################## INSERT EVERYTHING INTO THE GLOBAL DB STRINGS ##############
# just use the date part of date_time
db_date = datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])).date().strftime("%Y-%m-%d")
db_time=t_format
# without this str encoding i get a 'latin-1 codec cant encode character'
# use ascii ignore to remove any weird characters
db_comment = str(comment.text).encode('ascii','ignore')
db_article_name = str(headline.text).encode('ascii','ignore')
db_likes = likes
################## PERFORM SQL QUERY ##############
# This works but it causes weird formatting issues because I'm not specifiyng the column
# sql="insert into Members values ('%s', '%s','%s','%s','%s','%s','%s')"
# % (db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes)
sql="insert into Members(user_id,user_name,date,time,comment,article_name,likes) VALUES (%s, %s,%s,%s,%s,%s,%s)"
num_rows=cursor.execute(sql,(db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes))
db.commit()
date_time = []
def article_comments_load(browser,article_url):
global profile_url
# Access comments
print secret_comments
comment_url = secret_comments+str(article_url)
print comment_url
# Load browser
browser.get(comment_url)
time.sleep(10)
# First load the entire page by clicking the button, works for up to 1000 comments
while True:
try:
loader = browser.find_elements_by_xpath("//*[@class='talk-load-more']/button")
# The last button on the page will be the one to load more. Other buttons on the page
# Are for loading replies. Don't think replies are that useful
loader[len(loader)-1].click()
time.sleep(5)
except Exception as e:
print e
break
print "Complete Loading Article Comments"
# We want to keep a list of the profile urls so we can access later
url_list = browser.find_elements_by_xpath("//*[@class='talk-stream-comment-container']/div/div/div/a")
# Insert into global list here
for user in url_list:
# Need to test if this works to maintain a unique list within article
# This is useful especially if we're getting replies
# If a user comments twice, only keep one.
if user not in profile_url:
# Get the actual users profile url here
# TODO(shelbyt): Fix bad naming user is a userblock
match = re.search('.*\/(.*)\?',user.get_attribute("href"))
# Returns a user_id which we can match with the database list
article_user_id = str(match.group(1))
# If the user_id from the url isn't found in the database list then
# we can insert it.
if article_user_id not in unique_users:
profile_url.append(user.get_attribute("href"))
return len(profile_url)
def insert_article_user_comments(browser, db, cursor):
login_flag = 0
for profile in profile_url:
print "Trying to load = " + profile
browser.get(profile)
if login_flag == 0:
load_creds_login()
login_flag = 1
profile_comment | sel_init | identifier_name |
cookie_article_parser.py | if short_date == "Jun": return 6
if short_date == "Jul": return 7
if short_date == "Aug": return 8
if short_date == "Sep": return 9
if short_date == "Oct": return 10
if short_date == "Nov": return 11
if short_date == "Dec": return 12
return -1
def load_secret():
fname = "secret_sauce.dat"
f = open(fname,"r")
global secret_login
secret_login = f.readline().split(' ')[1].rstrip()
global secret_comments
secret_comments = f.readline().split(' ')[1].rstrip()
print secret_comments
f.close()
def load_db():
fname="dbcreds.dat"
f = open(fname,"r")
#rstrip() is needde here or else it includse the newline from the creds file
dbhost = f.readline().split(' ')[1].rstrip()
dbuser = f.readline().split(' ')[1].rstrip()
dbpw = f.readline().split(' ')[1].rstrip()
dbdb = f.readline().split(' ')[1].rstrip()
f.close()
db = my.connect(host=dbhost,
user=dbuser,
passwd=dbpw,
db=dbdb
)
return db
def close_db(db):
db.close()
def load_creds_login():
fname="wsjcreds.dat"
f = open(fname,"r")
uname = f.readline().split(' ')[1]
pw = f.readline().split(' ')[1]
f.close()
# Input username
loginID = browser.find_element_by_id("username").send_keys(uname)
# Input password
loginPass = browser.find_element_by_id("password").send_keys(pw)
loginReady = browser.find_element_by_class_name("basic-login-submit")
loginReady.submit()
time.sleep(10) #this is needed because it takes time to login
def sel_init():
options = Options()
#options.add_argument("--headless")
browser = webdriver.Firefox(firefox_options=options, executable_path='/home/shelbyt/geckodriver')
return browser
def profile_comment_load(browser):
#for i in range(0,3):
# print "range"
# load_more = browser.find_element_by_class_name("load-more").click()
# #load_more.submit()
# time.sleep(3)
# Use this to press the 'load more' button until there is nothing left'
# can also use the 'page num' selector from the page itself to find how many pages to iterate
index = 0
while True:
try:
load_more = browser.find_element_by_class_name("load-more").click()
time.sleep(3)
index = index + 1
if (index%50) == 0:
print "50 Pages loaded"
except Exception as e:
print e
print index
break
print "Complete Loading User Comment Page"
def gather_comments(browser,db,cursor):
# Need to store date_time processing
t_arr= []
date_time = []
############# OBTAIN THESE ONCE PER PROFILE ##################
# username appears once
db_user_name= str(browser.find_elements_by_class_name("info-username")[0].text)
# get the uid returned using URL method
#url = browser.current_url
#match = re.search('.*\/(.*)\?',url)
#db_user_id = str(match.group(1))
# get the uid returned using css method
db_user_id = str(browser.find_element_by_css_selector("div.module.module-profile").get_attribute("data-vxid"))
# If we just grab this module we get EVERYTHING we need excluding the userID
# uid = browser.find_elements_by_class_name("module")
# print uid
# for elem in uid:
# print elem.text
############# ITERATE EACH COMMENT BLOCK ##################
for cblock in browser.find_elements_by_class_name("module-comments"):
likes = cblock.find_elements_by_class_name("comments-recommendations")[0]
# need to convert it to utf-8 or str format or else can't use string operators like split
likes= str(likes.text)
#The likes text looks like " 7 likes" so i just want the 7 part
likes = likes.split(' ')[0]
#ignore the header child and get the a-href text
headline = cblock.find_elements_by_class_name("comments-headline")[0].find_element_by_tag_name("a")
comment = cblock.find_elements_by_class_name("comments-content")[0]
time = cblock.find_elements_by_class_name("comments-time")[0]
real_t = time.text
real_t_arr = [x.encode('utf-8') for x in real_t.split()]
real_t_arr[1] = real_t_arr[1][:-1] # remove last comma
[hour,mins] = [int (x) for x in real_t_arr[3].split(':')]
t_format = datetime.timedelta(hours=hour,minutes=mins)
date_time.insert(0, datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])))
if real_t_arr[4] == "PM": # for some reason using "IS" doesn't work here
if real_t_arr[3].split(':')[0] != "12":
t_format = t_format + datetime.timedelta(hours=12) # add 12 hours if it is PM
# I dont think this format() call is needed
date_time.insert(1,format(t_format))
t_arr.append(date_time) # append into final time array the sequence of times
################## INSERT EVERYTHING INTO THE GLOBAL DB STRINGS ##############
# just use the date part of date_time
db_date = datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])).date().strftime("%Y-%m-%d")
db_time=t_format
# without this str encoding i get a 'latin-1 codec cant encode character'
# use ascii ignore to remove any weird characters
db_comment = str(comment.text).encode('ascii','ignore')
db_article_name = str(headline.text).encode('ascii','ignore')
db_likes = likes
################## PERFORM SQL QUERY ##############
# This works but it causes weird formatting issues because I'm not specifiyng the column
# sql="insert into Members values ('%s', '%s','%s','%s','%s','%s','%s')"
# % (db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes)
sql="insert into Members(user_id,user_name,date,time,comment,article_name,likes) VALUES (%s, %s,%s,%s,%s,%s,%s)"
num_rows=cursor.execute(sql,(db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes))
db.commit()
date_time = []
def article_comments_load(browser,article_url):
global profile_url
# Access comments
print secret_comments
comment_url = secret_comments+str(article_url)
print comment_url
# Load browser
browser.get(comment_url)
time.sleep(10)
# First load the entire page by clicking the button, works for up to 1000 comments
while True:
try:
loader = browser.find_elements_by_xpath("//*[@class='talk-load-more']/button")
# The last button on the page will be the one to load more. Other buttons on the page
# Are for loading replies. Don't think replies are that useful
loader[len(loader)-1].click()
time.sleep(5)
except Exception as e:
print e
break
print "Complete Loading Article Comments"
# We want to keep a list of the profile urls so we can access later
url_list = browser.find_elements_by_xpath("//*[@class='talk-stream-comment-container']/div/div/div/a")
# Insert into global list here
for user in url_list:
# Need to test if this works to maintain a unique list within article
# This is useful especially if we're getting replies
# If a user comments twice, only keep one.
|
return len(profile_url)
def insert_article_user_comments(browser, db, cursor):
login_flag = 0
for profile in profile_url:
print "Trying to load = " + profile
browser.get(profile)
if login_flag == 0:
load_creds_login()
login_flag = 1
profile_comment | if user not in profile_url:
# Get the actual users profile url here
# TODO(shelbyt): Fix bad naming user is a userblock
match = re.search('.*\/(.*)\?',user.get_attribute("href"))
# Returns a user_id which we can match with the database list
article_user_id = str(match.group(1))
# If the user_id from the url isn't found in the database list then
# we can insert it.
if article_user_id not in unique_users:
profile_url.append(user.get_attribute("href")) | conditional_block |
cookie_article_parser.py | if short_date == "Jun": return 6
if short_date == "Jul": return 7
if short_date == "Aug": return 8
if short_date == "Sep": return 9
if short_date == "Oct": return 10
if short_date == "Nov": return 11
if short_date == "Dec": return 12
return -1
def load_secret():
fname = "secret_sauce.dat"
f = open(fname,"r")
global secret_login
secret_login = f.readline().split(' ')[1].rstrip()
global secret_comments
secret_comments = f.readline().split(' ')[1].rstrip()
print secret_comments
f.close()
def load_db():
fname="dbcreds.dat"
f = open(fname,"r")
#rstrip() is needde here or else it includse the newline from the creds file
dbhost = f.readline().split(' ')[1].rstrip()
dbuser = f.readline().split(' ')[1].rstrip()
dbpw = f.readline().split(' ')[1].rstrip()
dbdb = f.readline().split(' ')[1].rstrip()
f.close()
db = my.connect(host=dbhost,
user=dbuser,
passwd=dbpw,
db=dbdb
)
return db
def close_db(db):
db.close()
def load_creds_login():
fname="wsjcreds.dat"
f = open(fname,"r")
uname = f.readline().split(' ')[1]
pw = f.readline().split(' ')[1]
f.close()
# Input username
loginID = browser.find_element_by_id("username").send_keys(uname)
# Input password
loginPass = browser.find_element_by_id("password").send_keys(pw)
loginReady = browser.find_element_by_class_name("basic-login-submit")
loginReady.submit()
time.sleep(10) #this is needed because it takes time to login
def sel_init():
options = Options()
#options.add_argument("--headless")
browser = webdriver.Firefox(firefox_options=options, executable_path='/home/shelbyt/geckodriver')
return browser
def profile_comment_load(browser):
#for i in range(0,3):
# print "range"
# load_more = browser.find_element_by_class_name("load-more").click()
# #load_more.submit()
# time.sleep(3)
# Use this to press the 'load more' button until there is nothing left'
# can also use the 'page num' selector from the page itself to find how many pages to iterate
index = 0
while True:
try:
load_more = browser.find_element_by_class_name("load-more").click()
time.sleep(3)
index = index + 1
if (index%50) == 0:
print "50 Pages loaded"
except Exception as e:
print e
print index
break
print "Complete Loading User Comment Page"
def gather_comments(browser,db,cursor):
# Need to store date_time processing
t_arr= []
date_time = []
############# OBTAIN THESE ONCE PER PROFILE ##################
# username appears once
db_user_name= str(browser.find_elements_by_class_name("info-username")[0].text)
# get the uid returned using URL method
#url = browser.current_url
#match = re.search('.*\/(.*)\?',url)
#db_user_id = str(match.group(1))
# get the uid returned using css method
db_user_id = str(browser.find_element_by_css_selector("div.module.module-profile").get_attribute("data-vxid"))
# If we just grab this module we get EVERYTHING we need excluding the userID
# uid = browser.find_elements_by_class_name("module")
# print uid
# for elem in uid:
# print elem.text
############# ITERATE EACH COMMENT BLOCK ##################
for cblock in browser.find_elements_by_class_name("module-comments"):
likes = cblock.find_elements_by_class_name("comments-recommendations")[0]
# need to convert it to utf-8 or str format or else can't use string operators like split
likes= str(likes.text)
#The likes text looks like " 7 likes" so i just want the 7 part
likes = likes.split(' ')[0]
#ignore the header child and get the a-href text
headline = cblock.find_elements_by_class_name("comments-headline")[0].find_element_by_tag_name("a")
comment = cblock.find_elements_by_class_name("comments-content")[0]
time = cblock.find_elements_by_class_name("comments-time")[0]
real_t = time.text
real_t_arr = [x.encode('utf-8') for x in real_t.split()]
real_t_arr[1] = real_t_arr[1][:-1] # remove last comma
[hour,mins] = [int (x) for x in real_t_arr[3].split(':')]
t_format = datetime.timedelta(hours=hour,minutes=mins)
date_time.insert(0, datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])))
if real_t_arr[4] == "PM": # for some reason using "IS" doesn't work here
if real_t_arr[3].split(':')[0] != "12":
t_format = t_format + datetime.timedelta(hours=12) # add 12 hours if it is PM
# I dont think this format() call is needed
date_time.insert(1,format(t_format))
t_arr.append(date_time) # append into final time array the sequence of times
| db_date = datetime.datetime(int(real_t_arr[2]),
int(month_to_num(real_t_arr[0])),int(real_t_arr[1])).date().strftime("%Y-%m-%d")
db_time=t_format
# without this str encoding i get a 'latin-1 codec cant encode character'
# use ascii ignore to remove any weird characters
db_comment = str(comment.text).encode('ascii','ignore')
db_article_name = str(headline.text).encode('ascii','ignore')
db_likes = likes
################## PERFORM SQL QUERY ##############
# This works but it causes weird formatting issues because I'm not specifiyng the column
# sql="insert into Members values ('%s', '%s','%s','%s','%s','%s','%s')"
# % (db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes)
sql="insert into Members(user_id,user_name,date,time,comment,article_name,likes) VALUES (%s, %s,%s,%s,%s,%s,%s)"
num_rows=cursor.execute(sql,(db_user_id,db_user_name,db_date,db_time,db_comment,db_article_name,db_likes))
db.commit()
date_time = []
def article_comments_load(browser,article_url):
global profile_url
# Access comments
print secret_comments
comment_url = secret_comments+str(article_url)
print comment_url
# Load browser
browser.get(comment_url)
time.sleep(10)
# First load the entire page by clicking the button, works for up to 1000 comments
while True:
try:
loader = browser.find_elements_by_xpath("//*[@class='talk-load-more']/button")
# The last button on the page will be the one to load more. Other buttons on the page
# Are for loading replies. Don't think replies are that useful
loader[len(loader)-1].click()
time.sleep(5)
except Exception as e:
print e
break
print "Complete Loading Article Comments"
# We want to keep a list of the profile urls so we can access later
url_list = browser.find_elements_by_xpath("//*[@class='talk-stream-comment-container']/div/div/div/a")
# Insert into global list here
for user in url_list:
# Need to test if this works to maintain a unique list within article
# This is useful especially if we're getting replies
# If a user comments twice, only keep one.
if user not in profile_url:
# Get the actual users profile url here
# TODO(shelbyt): Fix bad naming user is a userblock
match = re.search('.*\/(.*)\?',user.get_attribute("href"))
# Returns a user_id which we can match with the database list
article_user_id = str(match.group(1))
# If the user_id from the url isn't found in the database list then
# we can insert it.
if article_user_id not in unique_users:
profile_url.append(user.get_attribute("href"))
return len(profile_url)
def insert_article_user_comments(browser, db, cursor):
login_flag = 0
for profile in profile_url:
print "Trying to load = " + profile
browser.get(profile)
if login_flag == 0:
load_creds_login()
login_flag = 1
profile_comment | ################## INSERT EVERYTHING INTO THE GLOBAL DB STRINGS ##############
# just use the date part of date_time | random_line_split |
pylon.go | nil {
return nil, err
}
m.Route = RegexRoute{
Regex: reg,
}
} else if s.Prefix != "" {
m.Route = PrefixRoute{
Prefix: s.Prefix,
}
} else {
return nil, ErrServiceNoRoute
}
maxCon := defaultMaxCon
if s.MaxCon > 0 {
maxCon = s.MaxCon
}
var weightSum float32 = 0.0
for _, inst := range s.Instances {
var weight float32 = 1
if inst.Weight > 0 {
weight = inst.Weight
}
weightSum += weight
m.Instances = append(m.Instances, &Instance{
inst.Host,
weight,
make(chan int, maxCon),
NewSharedInt(0),
})
}
m.Name = s.Name
m.Strategy = s.Strategy
m.Mutex = &sync.RWMutex{}
m.BlackList = make(map[int]bool, len(s.Instances))
m.LastUsedIdx = NewSharedInt(0)
m.ReqCount = make(chan int, maxCon)
m.WeightSum = weightSum
m.HealthCheck = s.HealthCheck
if m.HealthCheck.Interval == 0 {
m.HealthCheck.Interval = defaultHealthCheckInterval
}
return m, nil
}
// serve serves a Pylon with all of its MicroServices given
// a port to listen to and a route that will be used to access
// some stats about this very Pylon
func serve(p *Pylon, port int, healthRoute string) {
mux := http.NewServeMux()
mux.Handle("/", NewPylonHandler(p))
mux.Handle(healthRoute, NewPylonHealthHandler(p))
server := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: mux,
ReadTimeout: 20 * time.Second,
WriteTimeout: 20 * time.Second,
MaxHeaderBytes: 1 << 20,
}
for _, s := range p.Services {
logDebug("Starting initial health check of service: " + s.Name)
d := &net.Dialer{
Timeout: defaultDialerTimeout,
}
if s.HealthCheck.DialTO != 0 {
d.Timeout = time.Second * time.Duration(s.HealthCheck.DialTO)
}
// Do an initial health check
go handleHealthCheck(s, d)
if s.HealthCheck.Enabled {
go startPeriodicHealthCheck(s, time.Second * time.Duration(s.HealthCheck.Interval), d)
logDebug("Periodic Health checks started for service: " + s.Name)
}
}
logInfo("Serving on " + strconv.Itoa(port))
server.ListenAndServe()
}
// startPeriodicHealthCheck starts a timer that will check
// the health of the given MicroService given an interval and
// a dialer which is used to ping the instances/endpoints
func startPeriodicHealthCheck(m *MicroService, interval time.Duration, d *net.Dialer) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for t := range ticker.C {
logVerbose("Checking health of Service:", m.Route, " ---tick:", t)
handleHealthCheck(m, d)
}
}
// handleHealthCheck checks whether every instance of the given
// MicroService is UP or DOWN. Performed by the given Dialer
func handleHealthCheck(m *MicroService, d *net.Dialer) bool {
change := false
for i, inst := range m.Instances {
_, err := d.Dial("tcp", inst.Host)
if err != nil {
if !m.isBlacklisted(i) {
m.blackList(i, true)
logInfo("Instance: " + inst.Host + " is now marked as DOWN")
change = true
}
} else {
if m.isBlacklisted(i) {
m.blackList(i, false)
logInfo("Instance: " + inst.Host + " is now marked as UP")
change = true
}
}
}
return change
}
// NewPylonHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will handle incoming requests to the given Pylon
func NewPylonHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
//route, err := p.getRoute(r.URL.Path)
//if err != nil {
// logError(err)
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
//}
m, err := p.getMicroServiceFromRoute(r.URL.Path)
if err != nil || m == nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
inst, _, err := m.getLoadBalancedInstance()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
m.ReqCount <- 1
inst.ReqCount <- 1
logVerbose("Serving " + r.URL.Path + r.URL.RawQuery + ", current request count: " + strconv.Itoa(len(m.ReqCount)))
logVerbose("Instance is " + inst.Host)
proxy := proxyPool.Get()
setUpProxy(proxy, m, inst.Host)
proxy.ServeHTTP(w, r)
proxyPool.Put(proxy)
<-inst.ReqCount
<-m.ReqCount
logVerbose("Request served, count: " + strconv.Itoa(len(m.ReqCount)))
}
}
// NewPylonHealthHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will collect and render some stats about the given Pylon:
// (Name / Strategy / Current request count)
// For every instance: (UP or DOWN / Host / Weight / Current request count)
func NewPylonHealthHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
t, err := template.New("PylonHealthTemplate").Parse(pylonTemplate)
if err != nil {
logError(err.Error())
}
if err := t.Execute(w, getRenders(p)); err != nil {
logError("Could not render the HTML template")
}
logDebug("Served heath page HTML")
}
}
// getMicroServiceFromRoute returns the first MicroService
// that matches the given route (nil and an error if no
// MicroService could match that route)
func (p *Pylon) getMicroServiceFromRoute(path string) (*MicroService, error) {
for _, ser := range p.Services {
switch ser.Route.Type() {
case Regex:
reg := ser.Route.Data().(*regexp.Regexp)
if reg.Match([]byte(path)) {
return ser, nil
}
case Prefix:
pref := ser.Route.Data().(string)
if strings.HasPrefix(path, pref) {
return ser, nil
}
default:
return nil, ErrInvalidRouteType
}
}
return nil, NewError(ErrRouteNoRouteCode, "No route available for path " + path)
}
// getLoadBalancedInstance will return a load balanced Instance
// according to the MicroService strategy and current state
func (m *MicroService) getLoadBalancedInstance() (*Instance, int, error) {
instCount := len(m.Instances)
if instCount == 0 {
return nil, -1, ErrServiceNoInstance
}
if len(m.BlackList) == instCount {
return nil, -1, ErrAllInstancesDown
}
instances := make([]*Instance, instCount)
copy(instances, m.Instances)
var idx int
var err error
for {
switch m.Strategy {
case RoundRobin:
idx, err = getRoundRobinInstIdx(instances, m.LastUsedIdx.Get())
case LeastConnected:
idx = getLeastConInstIdx(instances)
case Random:
idx = getRandomInstIdx(instances)
default:
return nil, -1, NewError(ErrInvalidStrategyCode, "Unexpected strategy " + string(m.Strategy))
}
if err != nil {
return nil, -1, err
}
if m.isBlacklisted(idx) {
instances[idx] = nil
} else {
m.LastUsedIdx.Set(idx)
return instances[idx], idx, nil
}
}
}
// getRoundRobinInstIdx returns the index of the Instance that should be
// picked according to round robin rules and a given slice of Instance
func getRoundRobinInstIdx(instances []*Instance, idx int) (int, error) {
tryCount := 1
instCount := len(instances)
lastNonNil := -1
for {
inst := instances[idx]
if inst != nil {
if inst.isRoundRobinPicked() {
break
}
lastNonNil = idx
}
idx++
tryCount++
if tryCount > instCount {
if lastNonNil != -1 {
return lastNonNil, nil
}
return -1, ErrFailedRoundRobin
}
if idx >= instCount {
idx = 0
}
}
return idx, nil
}
// getLeastConInstIdx returns the index of the Instance that should be
// picked according to the least connected rules and a given slice of Instance
| random_line_split | ||
pylon.go | MicroService returns a new MicroService object given a Service
func NewMicroService(s *Service) (*MicroService, error) {
m := &MicroService{}
if s.Pattern != "" {
reg, err := regexp.Compile(s.Pattern)
if err != nil {
return nil, err
}
m.Route = RegexRoute{
Regex: reg,
}
} else if s.Prefix != "" {
m.Route = PrefixRoute{
Prefix: s.Prefix,
}
} else {
return nil, ErrServiceNoRoute
}
maxCon := defaultMaxCon
if s.MaxCon > 0 {
maxCon = s.MaxCon
}
var weightSum float32 = 0.0
for _, inst := range s.Instances {
var weight float32 = 1
if inst.Weight > 0 {
weight = inst.Weight
}
weightSum += weight
m.Instances = append(m.Instances, &Instance{
inst.Host,
weight,
make(chan int, maxCon),
NewSharedInt(0),
})
}
m.Name = s.Name
m.Strategy = s.Strategy
m.Mutex = &sync.RWMutex{}
m.BlackList = make(map[int]bool, len(s.Instances))
m.LastUsedIdx = NewSharedInt(0)
m.ReqCount = make(chan int, maxCon)
m.WeightSum = weightSum
m.HealthCheck = s.HealthCheck
if m.HealthCheck.Interval == 0 {
m.HealthCheck.Interval = defaultHealthCheckInterval
}
return m, nil
}
// serve serves a Pylon with all of its MicroServices given
// a port to listen to and a route that will be used to access
// some stats about this very Pylon
func serve(p *Pylon, port int, healthRoute string) {
mux := http.NewServeMux()
mux.Handle("/", NewPylonHandler(p))
mux.Handle(healthRoute, NewPylonHealthHandler(p))
server := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: mux,
ReadTimeout: 20 * time.Second,
WriteTimeout: 20 * time.Second,
MaxHeaderBytes: 1 << 20,
}
for _, s := range p.Services {
logDebug("Starting initial health check of service: " + s.Name)
d := &net.Dialer{
Timeout: defaultDialerTimeout,
}
if s.HealthCheck.DialTO != 0 {
d.Timeout = time.Second * time.Duration(s.HealthCheck.DialTO)
}
// Do an initial health check
go handleHealthCheck(s, d)
if s.HealthCheck.Enabled {
go startPeriodicHealthCheck(s, time.Second * time.Duration(s.HealthCheck.Interval), d)
logDebug("Periodic Health checks started for service: " + s.Name)
}
}
logInfo("Serving on " + strconv.Itoa(port))
server.ListenAndServe()
}
// startPeriodicHealthCheck starts a timer that will check
// the health of the given MicroService given an interval and
// a dialer which is used to ping the instances/endpoints
func startPeriodicHealthCheck(m *MicroService, interval time.Duration, d *net.Dialer) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for t := range ticker.C {
logVerbose("Checking health of Service:", m.Route, " ---tick:", t)
handleHealthCheck(m, d)
}
}
// handleHealthCheck checks whether every instance of the given
// MicroService is UP or DOWN. Performed by the given Dialer
func handleHealthCheck(m *MicroService, d *net.Dialer) bool {
change := false
for i, inst := range m.Instances {
_, err := d.Dial("tcp", inst.Host)
if err != nil {
if !m.isBlacklisted(i) {
m.blackList(i, true)
logInfo("Instance: " + inst.Host + " is now marked as DOWN")
change = true
}
} else {
if m.isBlacklisted(i) {
m.blackList(i, false)
logInfo("Instance: " + inst.Host + " is now marked as UP")
change = true
}
}
}
return change
}
// NewPylonHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will handle incoming requests to the given Pylon
func NewPylonHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
//route, err := p.getRoute(r.URL.Path)
//if err != nil {
// logError(err)
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
//}
m, err := p.getMicroServiceFromRoute(r.URL.Path)
if err != nil || m == nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
inst, _, err := m.getLoadBalancedInstance()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
m.ReqCount <- 1
inst.ReqCount <- 1
logVerbose("Serving " + r.URL.Path + r.URL.RawQuery + ", current request count: " + strconv.Itoa(len(m.ReqCount)))
logVerbose("Instance is " + inst.Host)
proxy := proxyPool.Get()
setUpProxy(proxy, m, inst.Host)
proxy.ServeHTTP(w, r)
proxyPool.Put(proxy)
<-inst.ReqCount
<-m.ReqCount
logVerbose("Request served, count: " + strconv.Itoa(len(m.ReqCount)))
}
}
// NewPylonHealthHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will collect and render some stats about the given Pylon:
// (Name / Strategy / Current request count)
// For every instance: (UP or DOWN / Host / Weight / Current request count)
func NewPylonHealthHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
t, err := template.New("PylonHealthTemplate").Parse(pylonTemplate)
if err != nil {
logError(err.Error())
}
if err := t.Execute(w, getRenders(p)); err != nil {
logError("Could not render the HTML template")
}
logDebug("Served heath page HTML")
}
}
// getMicroServiceFromRoute returns the first MicroService
// that matches the given route (nil and an error if no
// MicroService could match that route)
func (p *Pylon) getMicroServiceFromRoute(path string) (*MicroService, error) {
for _, ser := range p.Services {
switch ser.Route.Type() {
case Regex:
reg := ser.Route.Data().(*regexp.Regexp)
if reg.Match([]byte(path)) {
return ser, nil
}
case Prefix:
pref := ser.Route.Data().(string)
if strings.HasPrefix(path, pref) {
return ser, nil
}
default:
return nil, ErrInvalidRouteType
}
}
return nil, NewError(ErrRouteNoRouteCode, "No route available for path " + path)
}
// getLoadBalancedInstance will return a load balanced Instance
// according to the MicroService strategy and current state
func (m *MicroService) getLoadBalancedInstance() (*Instance, int, error) {
instCount := len(m.Instances)
if instCount == 0 {
return nil, -1, ErrServiceNoInstance
}
if len(m.BlackList) == instCount {
return nil, -1, ErrAllInstancesDown
}
instances := make([]*Instance, instCount)
copy(instances, m.Instances)
var idx int
var err error
for {
switch m.Strategy {
case RoundRobin:
idx, err = getRoundRobinInstIdx(instances, m.LastUsedIdx.Get())
case LeastConnected:
idx = getLeastConInstIdx(instances)
case Random:
idx = getRandomInstIdx(instances)
default:
return nil, -1, NewError(ErrInvalidStrategyCode, "Unexpected strategy " + string(m.Strategy))
}
if err != nil {
return nil, -1, err
}
if m.isBlacklisted(idx) {
instances[idx] = nil
} else {
m.LastUsedIdx.Set(idx)
return instances[idx], idx, nil
}
}
}
// getRoundRobinInstIdx returns the index of the Instance that should be
// picked according to round robin rules and a given slice of Instance
func getRoundRobinInstIdx(instances []*Instance, idx int) (int, error) {
tryCount := 1
instCount := len(instances)
lastNonNil := -1
for {
inst := instances[idx]
if inst != nil {
if inst.isRoundRobinPicked() {
break
}
lastNonNil = idx
}
idx++
tryCount++
if tryCount > instCount | {
if lastNonNil != -1 {
return lastNonNil, nil
}
return -1, ErrFailedRoundRobin
} | conditional_block | |
pylon.go | var weight float32 = 1
if inst.Weight > 0 {
weight = inst.Weight
}
weightSum += weight
m.Instances = append(m.Instances, &Instance{
inst.Host,
weight,
make(chan int, maxCon),
NewSharedInt(0),
})
}
m.Name = s.Name
m.Strategy = s.Strategy
m.Mutex = &sync.RWMutex{}
m.BlackList = make(map[int]bool, len(s.Instances))
m.LastUsedIdx = NewSharedInt(0)
m.ReqCount = make(chan int, maxCon)
m.WeightSum = weightSum
m.HealthCheck = s.HealthCheck
if m.HealthCheck.Interval == 0 {
m.HealthCheck.Interval = defaultHealthCheckInterval
}
return m, nil
}
// serve serves a Pylon with all of its MicroServices given
// a port to listen to and a route that will be used to access
// some stats about this very Pylon
func serve(p *Pylon, port int, healthRoute string) {
mux := http.NewServeMux()
mux.Handle("/", NewPylonHandler(p))
mux.Handle(healthRoute, NewPylonHealthHandler(p))
server := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: mux,
ReadTimeout: 20 * time.Second,
WriteTimeout: 20 * time.Second,
MaxHeaderBytes: 1 << 20,
}
for _, s := range p.Services {
logDebug("Starting initial health check of service: " + s.Name)
d := &net.Dialer{
Timeout: defaultDialerTimeout,
}
if s.HealthCheck.DialTO != 0 {
d.Timeout = time.Second * time.Duration(s.HealthCheck.DialTO)
}
// Do an initial health check
go handleHealthCheck(s, d)
if s.HealthCheck.Enabled {
go startPeriodicHealthCheck(s, time.Second * time.Duration(s.HealthCheck.Interval), d)
logDebug("Periodic Health checks started for service: " + s.Name)
}
}
logInfo("Serving on " + strconv.Itoa(port))
server.ListenAndServe()
}
// startPeriodicHealthCheck starts a timer that will check
// the health of the given MicroService given an interval and
// a dialer which is used to ping the instances/endpoints
func startPeriodicHealthCheck(m *MicroService, interval time.Duration, d *net.Dialer) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for t := range ticker.C {
logVerbose("Checking health of Service:", m.Route, " ---tick:", t)
handleHealthCheck(m, d)
}
}
// handleHealthCheck checks whether every instance of the given
// MicroService is UP or DOWN. Performed by the given Dialer
func handleHealthCheck(m *MicroService, d *net.Dialer) bool {
change := false
for i, inst := range m.Instances {
_, err := d.Dial("tcp", inst.Host)
if err != nil {
if !m.isBlacklisted(i) {
m.blackList(i, true)
logInfo("Instance: " + inst.Host + " is now marked as DOWN")
change = true
}
} else {
if m.isBlacklisted(i) {
m.blackList(i, false)
logInfo("Instance: " + inst.Host + " is now marked as UP")
change = true
}
}
}
return change
}
// NewPylonHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will handle incoming requests to the given Pylon
func NewPylonHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
//route, err := p.getRoute(r.URL.Path)
//if err != nil {
// logError(err)
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
//}
m, err := p.getMicroServiceFromRoute(r.URL.Path)
if err != nil || m == nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
inst, _, err := m.getLoadBalancedInstance()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
m.ReqCount <- 1
inst.ReqCount <- 1
logVerbose("Serving " + r.URL.Path + r.URL.RawQuery + ", current request count: " + strconv.Itoa(len(m.ReqCount)))
logVerbose("Instance is " + inst.Host)
proxy := proxyPool.Get()
setUpProxy(proxy, m, inst.Host)
proxy.ServeHTTP(w, r)
proxyPool.Put(proxy)
<-inst.ReqCount
<-m.ReqCount
logVerbose("Request served, count: " + strconv.Itoa(len(m.ReqCount)))
}
}
// NewPylonHealthHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will collect and render some stats about the given Pylon:
// (Name / Strategy / Current request count)
// For every instance: (UP or DOWN / Host / Weight / Current request count)
func NewPylonHealthHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
t, err := template.New("PylonHealthTemplate").Parse(pylonTemplate)
if err != nil {
logError(err.Error())
}
if err := t.Execute(w, getRenders(p)); err != nil {
logError("Could not render the HTML template")
}
logDebug("Served heath page HTML")
}
}
// getMicroServiceFromRoute returns the first MicroService
// that matches the given route (nil and an error if no
// MicroService could match that route)
func (p *Pylon) getMicroServiceFromRoute(path string) (*MicroService, error) {
for _, ser := range p.Services {
switch ser.Route.Type() {
case Regex:
reg := ser.Route.Data().(*regexp.Regexp)
if reg.Match([]byte(path)) {
return ser, nil
}
case Prefix:
pref := ser.Route.Data().(string)
if strings.HasPrefix(path, pref) {
return ser, nil
}
default:
return nil, ErrInvalidRouteType
}
}
return nil, NewError(ErrRouteNoRouteCode, "No route available for path " + path)
}
// getLoadBalancedInstance will return a load balanced Instance
// according to the MicroService strategy and current state
func (m *MicroService) getLoadBalancedInstance() (*Instance, int, error) {
instCount := len(m.Instances)
if instCount == 0 {
return nil, -1, ErrServiceNoInstance
}
if len(m.BlackList) == instCount {
return nil, -1, ErrAllInstancesDown
}
instances := make([]*Instance, instCount)
copy(instances, m.Instances)
var idx int
var err error
for {
switch m.Strategy {
case RoundRobin:
idx, err = getRoundRobinInstIdx(instances, m.LastUsedIdx.Get())
case LeastConnected:
idx = getLeastConInstIdx(instances)
case Random:
idx = getRandomInstIdx(instances)
default:
return nil, -1, NewError(ErrInvalidStrategyCode, "Unexpected strategy " + string(m.Strategy))
}
if err != nil {
return nil, -1, err
}
if m.isBlacklisted(idx) {
instances[idx] = nil
} else {
m.LastUsedIdx.Set(idx)
return instances[idx], idx, nil
}
}
}
// getRoundRobinInstIdx returns the index of the Instance that should be
// picked according to round robin rules and a given slice of Instance
func getRoundRobinInstIdx(instances []*Instance, idx int) (int, error) {
tryCount := 1
instCount := len(instances)
lastNonNil := -1
for {
inst := instances[idx]
if inst != nil {
if inst.isRoundRobinPicked() {
break
}
lastNonNil = idx
}
idx++
tryCount++
if tryCount > instCount {
if lastNonNil != -1 {
return lastNonNil, nil
}
return -1, ErrFailedRoundRobin
}
if idx >= instCount {
idx = 0
}
}
return idx, nil
}
// getLeastConInstIdx returns the index of the Instance that should be
// picked according to the least connected rules and a given slice of Instance
// Least Connected returns the least loaded instance, which is
// computed by the current request count divided by the weight of the instance
func getLeastConInstIdx(instances []*Instance) int | {
minLoad := maxFloat32
idx := 0
for i, inst := range instances {
if inst == nil {
continue
}
load := float32(len(inst.ReqCount)) / inst.Weight
if load < minLoad {
minLoad = load
idx = i
}
}
return idx
} | identifier_body | |
pylon.go | RegexRoute struct {
Regex *regexp.Regexp
}
type PrefixRoute struct {
Prefix string
}
func (r RegexRoute) Type() RouteType {
return Regex
}
func (r RegexRoute) Data() interface{} {
return r.Regex
}
func (p PrefixRoute) | () RouteType {
return Prefix
}
func (p PrefixRoute) Data() interface{} {
return p.Prefix
}
// ListenAndServe tries to parse the config at the given path and serve it
func ListenAndServe(p string) error {
jsonParser := JSONConfigParser{}
c, err := jsonParser.ParseFromPath(p)
if err != nil {
return err
}
return ListenAndServeConfig(c)
}
// ListenAndServeConfig converts a given config to an exploitable
// structure (MicroService) and serves them
func ListenAndServeConfig(c *Config) error {
wg := sync.WaitGroup{}
// Initializing the pool before hand in case one server
// Gets a request as soon as it's served
poolSize := 0
for _, s := range c.Servers {
for _, ser := range s.Services {
if ser.MaxCon == 0 {
poolSize += defaultProxyPoolCapacity
} else {
poolSize += ser.MaxCon
}
}
}
logDebug("Pool size is", poolSize)
proxyPool = NewProxyPool(poolSize)
wg.Add(len(c.Servers))
for _, s := range c.Servers {
p, err := NewPylon(&s)
if err != nil {
return err
}
healthRoute := defaultHealthRoute
if s.HealthRoute != "" {
healthRoute = s.HealthRoute
}
go func() {
defer wg.Done()
serve(p, s.Port, healthRoute)
}()
}
wg.Wait()
return nil
}
// NewPylon returns a new Pylon object given a Server
func NewPylon(s *Server) (*Pylon, error) {
p := &Pylon{}
for _, ser := range s.Services {
m, err := NewMicroService(&ser)
if err != nil {
return nil, err
}
p.Services = append(p.Services, m)
}
return p, nil
}
// NewMicroService returns a new MicroService object given a Service
func NewMicroService(s *Service) (*MicroService, error) {
m := &MicroService{}
if s.Pattern != "" {
reg, err := regexp.Compile(s.Pattern)
if err != nil {
return nil, err
}
m.Route = RegexRoute{
Regex: reg,
}
} else if s.Prefix != "" {
m.Route = PrefixRoute{
Prefix: s.Prefix,
}
} else {
return nil, ErrServiceNoRoute
}
maxCon := defaultMaxCon
if s.MaxCon > 0 {
maxCon = s.MaxCon
}
var weightSum float32 = 0.0
for _, inst := range s.Instances {
var weight float32 = 1
if inst.Weight > 0 {
weight = inst.Weight
}
weightSum += weight
m.Instances = append(m.Instances, &Instance{
inst.Host,
weight,
make(chan int, maxCon),
NewSharedInt(0),
})
}
m.Name = s.Name
m.Strategy = s.Strategy
m.Mutex = &sync.RWMutex{}
m.BlackList = make(map[int]bool, len(s.Instances))
m.LastUsedIdx = NewSharedInt(0)
m.ReqCount = make(chan int, maxCon)
m.WeightSum = weightSum
m.HealthCheck = s.HealthCheck
if m.HealthCheck.Interval == 0 {
m.HealthCheck.Interval = defaultHealthCheckInterval
}
return m, nil
}
// serve serves a Pylon with all of its MicroServices given
// a port to listen to and a route that will be used to access
// some stats about this very Pylon
func serve(p *Pylon, port int, healthRoute string) {
mux := http.NewServeMux()
mux.Handle("/", NewPylonHandler(p))
mux.Handle(healthRoute, NewPylonHealthHandler(p))
server := &http.Server{
Addr: ":" + strconv.Itoa(port),
Handler: mux,
ReadTimeout: 20 * time.Second,
WriteTimeout: 20 * time.Second,
MaxHeaderBytes: 1 << 20,
}
for _, s := range p.Services {
logDebug("Starting initial health check of service: " + s.Name)
d := &net.Dialer{
Timeout: defaultDialerTimeout,
}
if s.HealthCheck.DialTO != 0 {
d.Timeout = time.Second * time.Duration(s.HealthCheck.DialTO)
}
// Do an initial health check
go handleHealthCheck(s, d)
if s.HealthCheck.Enabled {
go startPeriodicHealthCheck(s, time.Second * time.Duration(s.HealthCheck.Interval), d)
logDebug("Periodic Health checks started for service: " + s.Name)
}
}
logInfo("Serving on " + strconv.Itoa(port))
server.ListenAndServe()
}
// startPeriodicHealthCheck starts a timer that will check
// the health of the given MicroService given an interval and
// a dialer which is used to ping the instances/endpoints
func startPeriodicHealthCheck(m *MicroService, interval time.Duration, d *net.Dialer) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for t := range ticker.C {
logVerbose("Checking health of Service:", m.Route, " ---tick:", t)
handleHealthCheck(m, d)
}
}
// handleHealthCheck checks whether every instance of the given
// MicroService is UP or DOWN. Performed by the given Dialer
func handleHealthCheck(m *MicroService, d *net.Dialer) bool {
change := false
for i, inst := range m.Instances {
_, err := d.Dial("tcp", inst.Host)
if err != nil {
if !m.isBlacklisted(i) {
m.blackList(i, true)
logInfo("Instance: " + inst.Host + " is now marked as DOWN")
change = true
}
} else {
if m.isBlacklisted(i) {
m.blackList(i, false)
logInfo("Instance: " + inst.Host + " is now marked as UP")
change = true
}
}
}
return change
}
// NewPylonHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will handle incoming requests to the given Pylon
func NewPylonHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
//route, err := p.getRoute(r.URL.Path)
//if err != nil {
// logError(err)
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
//}
m, err := p.getMicroServiceFromRoute(r.URL.Path)
if err != nil || m == nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
inst, _, err := m.getLoadBalancedInstance()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
m.ReqCount <- 1
inst.ReqCount <- 1
logVerbose("Serving " + r.URL.Path + r.URL.RawQuery + ", current request count: " + strconv.Itoa(len(m.ReqCount)))
logVerbose("Instance is " + inst.Host)
proxy := proxyPool.Get()
setUpProxy(proxy, m, inst.Host)
proxy.ServeHTTP(w, r)
proxyPool.Put(proxy)
<-inst.ReqCount
<-m.ReqCount
logVerbose("Request served, count: " + strconv.Itoa(len(m.ReqCount)))
}
}
// NewPylonHealthHandler returns a func(w http.ResponseWriter, r *http.Request)
// that will collect and render some stats about the given Pylon:
// (Name / Strategy / Current request count)
// For every instance: (UP or DOWN / Host / Weight / Current request count)
func NewPylonHealthHandler(p *Pylon) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
t, err := template.New("PylonHealthTemplate").Parse(pylonTemplate)
if err != nil {
logError(err.Error())
}
if err := t.Execute(w, getRenders(p)); err != nil {
logError("Could not render the HTML template")
}
logDebug("Served heath page HTML")
}
}
// getMicroServiceFromRoute returns the first MicroService
// that matches the given route (nil and an error if no
// MicroService could match that route)
func (p *Pylon) getMicroServiceFromRoute(path string) (*MicroService, error) {
for _, ser := range p.Services {
switch ser.Route.Type() {
case Regex:
reg := ser.Route.Data().(*regexp.Regexp)
if reg.Match([]byte(path)) {
return ser, nil
}
case Prefix:
pref := ser.Route.Data().(string)
if strings.HasPrefix(path, pref) {
| Type | identifier_name |
trial_purpose.py | plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'trialPurpose_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&trial_purpose')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'trialPurpose')
self.toolbar.setObjectName(u'trialPurpose')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('trialPurpose', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = trialPurposeDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToVectorMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/trialPurpose/icon.png'
self.add_action(
icon_path,
text=self.tr(u'trial'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginVectorMenu(
self.tr(u'&trial_purpose'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def run(self):
layers = QgsMapLayerRegistry.instance().mapLayers().values()
layer_list = []
for layer in layers:
layer_list.append(layer.name())
print(layer_list)
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
self.dlg.qlkbtn1.clicked.connect(self.onClickBtn1)
self.dlg.qlkbtn2.clicked.connect(self.onClickBtn2)
self.dlg.qlkbtn3.clicked.connect(self.onClickBtn3)
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
polygonNameList=[]
polygonTypeList = []
placemarksNameList = []
if db.open():
query = db.exec_(""" SELECT name FROM Final_poly ORDER BY name """)
while query.next():
record = query.record()
polygonNameList.append(record.value(0))
query = db.exec_(""" SELECT distinct btype FROM final_poly where btype NOT NULL """)
while query.next():
record = query.record()
polygonTypeList.append(record.value(0))
#print(polygonTypeList)
query = db.exec_(""" SELECT distinct name FROM Final_point ORDER BY name """)
while query.next():
record = query.record()
placemarksNameList.append(record.value(0))
#self.dlg.polygonCombo.clear()
self.dlg.qcombo1.clear()
self.dlg.qcombo2.clear()
self.dlg.q2combo.clear()
self.dlg.q3combo1.clear()
self.dlg.q3combo2.clear()
self.dlg.qcombo1.addItems(polygonNameList)
self.dlg.qcombo2.addItems(polygonTypeList)
self.dlg.q2combo.addItems(polygonNameList)
self.dlg.q3combo1.addItems(placemarksNameList)
self.dlg.q3combo2.addItems(placemarksNameList)
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
pass
def onClickBtn1(self):
print('onClickBtn1')
dist_range = self.dlg.numberOfElements.text()
aptName = self.dlg.qcombo1.currentText()
aptType = self.dlg.qcombo2.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open():
query = db.exec_("SELECT src.id,src.name From Final_poly src, Final_poly dest where st_contains(st_buffer(st_transform(dest.geom, 3857), {0}), st_transform(src.geom,3857)) AND dest.name='{1}' AND src.Btype='{2}' ".format(dist_range,aptName,aptType));
while query.next():
record = query.record()
myList.append(record.value(0))
print(myList)
selectedLayerIndex = "final_poly"
self.iface.setActiveLayer(QgsMapLayerRegistry.instance().mapLayersByName( selectedLayerIndex )[0])
layer = self.iface.activeLayer()
self.iface.mapCanvas().setSelectionColor( QColor('red'))
layer.selectByIds(myList)
def onClickBtn2(self):
print('onClickBtn2')
aptName = self.dlg.q2combo.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open():
query = db.exec_("Select id from Final_poly where name='{0}'".format(aptName));
while query.next():
record = query.record()
myList.append(record.value(0))
print(myList)
selectedLayerIndex = "final_poly"
self.iface.setActiveLayer(QgsMapLayerRegistry.instance().mapLayersByName( selectedLayerIndex )[0])
layer = self.iface.activeLayer()
self.iface.mapCanvas().setSelectionColor( QColor('green'))
layer.selectByIds(myList)
def find_shortest_path(self,graph, start, end, path =[]):
path = path + [start]
if start == end:
return path
shortest = None
for node in graph[start]:
if node not in path:
newpath = self.find_shortest_path(graph, node, end, path)
if newpath:
if not shortest or len(newpath) < len(shortest):
shortest = newpath | return shortest
| random_line_split | |
trial_purpose.py | .py
import resources
# Import the code for the dialog
from trial_purpose_dialog import trialPurposeDialog
import os.path
from qgis.core import QgsMapLayerRegistry
from qgis.core import QgsVectorLayer, QgsDataSourceURI
from qgis.core import *
from PyQt4.QtGui import *
import qgis
from PyQt4.QtSql import QSqlDatabase
from qgis.gui import *
from qgis.networkanalysis import *
from collections import *
try:
from PyQt4.QtCore import QString
except ImportError:
# we are using Python3 so QString is not defined
QString = type("")
rbDict = {}
class trialPurpose:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgisInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'trialPurpose_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&trial_purpose')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'trialPurpose')
self.toolbar.setObjectName(u'trialPurpose')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('trialPurpose', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = trialPurposeDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToVectorMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/trialPurpose/icon.png'
self.add_action(
icon_path,
text=self.tr(u'trial'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginVectorMenu(
self.tr(u'&trial_purpose'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def run(self):
layers = QgsMapLayerRegistry.instance().mapLayers().values()
layer_list = []
for layer in layers:
layer_list.append(layer.name())
print(layer_list)
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
self.dlg.qlkbtn1.clicked.connect(self.onClickBtn1)
self.dlg.qlkbtn2.clicked.connect(self.onClickBtn2)
self.dlg.qlkbtn3.clicked.connect(self.onClickBtn3)
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
polygonNameList=[]
polygonTypeList = []
placemarksNameList = []
if db.open():
query = db.exec_(""" SELECT name FROM Final_poly ORDER BY name """)
while query.next():
record = query.record()
polygonNameList.append(record.value(0))
query = db.exec_(""" SELECT distinct btype FROM final_poly where btype NOT NULL """)
while query.next():
record = query.record()
polygonTypeList.append(record.value(0))
#print(polygonTypeList)
query = db.exec_(""" SELECT distinct name FROM Final_point ORDER BY name """)
while query.next():
record = query.record()
placemarksNameList.append(record.value(0))
#self.dlg.polygonCombo.clear()
self.dlg.qcombo1.clear()
self.dlg.qcombo2.clear()
self.dlg.q2combo.clear()
self.dlg.q3combo1.clear()
self.dlg.q3combo2.clear()
self.dlg.qcombo1.addItems(polygonNameList)
self.dlg.qcombo2.addItems(polygonTypeList)
self.dlg.q2combo.addItems(polygonNameList)
self.dlg.q3combo1.addItems(placemarksNameList)
self.dlg.q3combo2.addItems(placemarksNameList)
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
pass
def onClickBtn1(self):
print('onClickBtn1')
dist_range = self.dlg.numberOfElements.text()
aptName = self.dlg.qcombo1.currentText()
aptType = self.dlg.qcombo2.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open():
query = db.exec_("SELECT src.id,src.name From Final_poly src, Final_poly dest where st_contains(st_buffer(st_transform(dest.geom, 3857), {0}), st_transform(src.geom,3857)) AND dest.name='{1}' AND src.Btype='{2}' ".format(dist_range,aptName,aptType));
while query.next():
record = query.record()
myList.append(record.value(0))
print(myList)
selectedLayerIndex = "final_poly"
self.iface.setActiveLayer(QgsMapLayerRegistry.instance().mapLayersByName( selectedLayerIndex )[0])
layer = self.iface.activeLayer()
self.iface.mapCanvas().setSelectionColor( QColor('red'))
layer.selectByIds(myList)
def | (self):
print('onClickBtn2')
aptName = self.dlg.q2combo.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open | onClickBtn2 | identifier_name |
trial_purpose.py | .py
import resources
# Import the code for the dialog
from trial_purpose_dialog import trialPurposeDialog
import os.path
from qgis.core import QgsMapLayerRegistry
from qgis.core import QgsVectorLayer, QgsDataSourceURI
from qgis.core import *
from PyQt4.QtGui import *
import qgis
from PyQt4.QtSql import QSqlDatabase
from qgis.gui import *
from qgis.networkanalysis import *
from collections import *
try:
from PyQt4.QtCore import QString
except ImportError:
# we are using Python3 so QString is not defined
QString = type("")
rbDict = {}
class trialPurpose:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgisInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'trialPurpose_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&trial_purpose')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'trialPurpose')
self.toolbar.setObjectName(u'trialPurpose')
# noinspection PyMethodMayBeStatic
def tr(self, message):
|
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = trialPurposeDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToVectorMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/trialPurpose/icon.png'
self.add_action(
icon_path,
text=self.tr(u'trial'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginVectorMenu(
self.tr(u'&trial_purpose'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def run(self):
layers = QgsMapLayerRegistry.instance().mapLayers().values()
layer_list = []
for layer in layers:
layer_list.append(layer.name())
print(layer_list)
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
self.dlg.qlkbtn1.clicked.connect(self.onClickBtn1)
self.dlg.qlkbtn2.clicked.connect(self.onClickBtn2)
self.dlg.qlkbtn3.clicked.connect(self.onClickBtn3)
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
polygonNameList=[]
polygonTypeList = []
placemarksNameList = []
if db.open():
query = db.exec_(""" SELECT name FROM Final_poly ORDER BY name """)
while query.next():
record = query.record()
polygonNameList.append(record.value(0))
query = db.exec_(""" SELECT distinct btype FROM final_poly where btype NOT NULL """)
while query.next():
record = query.record()
polygonTypeList.append(record.value(0))
#print(polygonTypeList)
query = db.exec_(""" SELECT distinct name FROM Final_point ORDER BY name """)
while query.next():
record = query.record()
placemarksNameList.append(record.value(0))
#self.dlg.polygonCombo.clear()
self.dlg.qcombo1.clear()
self.dlg.qcombo2.clear()
self.dlg.q2combo.clear()
self.dlg.q3combo1.clear()
self.dlg.q3combo2.clear()
self.dlg.qcombo1.addItems(polygonNameList)
self.dlg.qcombo2.addItems(polygonTypeList)
self.dlg.q2combo.addItems(polygonNameList)
self.dlg.q3combo1.addItems(placemarksNameList)
self.dlg.q3combo2.addItems(placemarksNameList)
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
pass
def onClickBtn1(self):
print('onClickBtn1')
dist_range = self.dlg.numberOfElements.text()
aptName = self.dlg.qcombo1.currentText()
aptType = self.dlg.qcombo2.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open():
query = db.exec_("SELECT src.id,src.name From Final_poly src, Final_poly dest where st_contains(st_buffer(st_transform(dest.geom, 3857), {0}), st_transform(src.geom,3857)) AND dest.name='{1}' AND src.Btype='{2}' ".format(dist_range,aptName,aptType));
while query.next():
record = query.record()
myList.append(record.value(0))
print(myList)
selectedLayerIndex = "final_poly"
self.iface.setActiveLayer(QgsMapLayerRegistry.instance().mapLayersByName( selectedLayerIndex )[0])
layer = self.iface.activeLayer()
self.iface.mapCanvas().setSelectionColor( QColor('red'))
layer.selectByIds(myList)
def onClickBtn2(self):
print('onClickBtn2')
aptName = self.dlg.q2combo.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open | """Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('trialPurpose', message) | identifier_body |
trial_purpose.py | using Python3 so QString is not defined
QString = type("")
rbDict = {}
class trialPurpose:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgisInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'trialPurpose_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&trial_purpose')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'trialPurpose')
self.toolbar.setObjectName(u'trialPurpose')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('trialPurpose', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = trialPurposeDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToVectorMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/trialPurpose/icon.png'
self.add_action(
icon_path,
text=self.tr(u'trial'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginVectorMenu(
self.tr(u'&trial_purpose'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def run(self):
layers = QgsMapLayerRegistry.instance().mapLayers().values()
layer_list = []
for layer in layers:
layer_list.append(layer.name())
print(layer_list)
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
self.dlg.qlkbtn1.clicked.connect(self.onClickBtn1)
self.dlg.qlkbtn2.clicked.connect(self.onClickBtn2)
self.dlg.qlkbtn3.clicked.connect(self.onClickBtn3)
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
polygonNameList=[]
polygonTypeList = []
placemarksNameList = []
if db.open():
query = db.exec_(""" SELECT name FROM Final_poly ORDER BY name """)
while query.next():
record = query.record()
polygonNameList.append(record.value(0))
query = db.exec_(""" SELECT distinct btype FROM final_poly where btype NOT NULL """)
while query.next():
record = query.record()
polygonTypeList.append(record.value(0))
#print(polygonTypeList)
query = db.exec_(""" SELECT distinct name FROM Final_point ORDER BY name """)
while query.next():
record = query.record()
placemarksNameList.append(record.value(0))
#self.dlg.polygonCombo.clear()
self.dlg.qcombo1.clear()
self.dlg.qcombo2.clear()
self.dlg.q2combo.clear()
self.dlg.q3combo1.clear()
self.dlg.q3combo2.clear()
self.dlg.qcombo1.addItems(polygonNameList)
self.dlg.qcombo2.addItems(polygonTypeList)
self.dlg.q2combo.addItems(polygonNameList)
self.dlg.q3combo1.addItems(placemarksNameList)
self.dlg.q3combo2.addItems(placemarksNameList)
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
# substitute with your code.
pass
def onClickBtn1(self):
print('onClickBtn1')
dist_range = self.dlg.numberOfElements.text()
aptName = self.dlg.qcombo1.currentText()
aptType = self.dlg.qcombo2.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open():
query = db.exec_("SELECT src.id,src.name From Final_poly src, Final_poly dest where st_contains(st_buffer(st_transform(dest.geom, 3857), {0}), st_transform(src.geom,3857)) AND dest.name='{1}' AND src.Btype='{2}' ".format(dist_range,aptName,aptType));
while query.next():
record = query.record()
myList.append(record.value(0))
print(myList)
selectedLayerIndex = "final_poly"
self.iface.setActiveLayer(QgsMapLayerRegistry.instance().mapLayersByName( selectedLayerIndex )[0])
layer = self.iface.activeLayer()
self.iface.mapCanvas().setSelectionColor( QColor('red'))
layer.selectByIds(myList)
def onClickBtn2(self):
print('onClickBtn2')
aptName = self.dlg.q2combo.currentText()
# Reuse the path to DB to set database name
uri = QgsDataSourceURI()
uri.setDatabase('F:/UTA/SEM 5/Adv DB/UTA_Final_DB/test.sqlite')
db = QSqlDatabase.addDatabase('QSPATIALITE');
db.setDatabaseName(uri.database())
myList=[]
if db.open():
| query = db.exec_("Select id from Final_poly where name='{0}'".format(aptName));
while query.next():
record = query.record()
myList.append(record.value(0))
print(myList)
selectedLayerIndex = "final_poly"
self.iface.setActiveLayer(QgsMapLayerRegistry.instance().mapLayersByName( selectedLayerIndex )[0])
layer = self.iface.activeLayer()
self.iface.mapCanvas().setSelectionColor( QColor('green'))
layer.selectByIds(myList) | conditional_block | |
key_bundle_v2.go | bfscrypto.TLFEphemeralPublicKey{},
fmt.Errorf("Invalid key in %s with index %d >= %d",
keyLocation, index, keyCount)
}
return keyLocation, index, publicKeys[index], nil
}
// DeviceKeyInfoMapV2 is a map from a user devices (identified by the
// KID of the corresponding device CryptPublicKey) to the
// TLF's symmetric secret key information.
type DeviceKeyInfoMapV2 map[keybase1.KID]TLFCryptKeyInfo
func (dkimV2 DeviceKeyInfoMapV2) fillInDeviceInfos(
uid keybase1.UID, tlfCryptKey kbfscrypto.TLFCryptKey,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey, ePubIndex int,
updatedDeviceKeys DevicePublicKeys) (
serverHalves DeviceKeyServerHalves, err error) {
serverHalves = make(DeviceKeyServerHalves, len(updatedDeviceKeys))
// TODO: parallelize
for k := range updatedDeviceKeys {
// Skip existing entries, and only fill in new ones.
if _, ok := dkimV2[k.KID()]; ok {
continue
}
clientInfo, serverHalf, err := splitTLFCryptKey(
uid, tlfCryptKey, ePrivKey, ePubIndex, k)
if err != nil {
return nil, err
}
dkimV2[k.KID()] = clientInfo
serverHalves[k] = serverHalf
}
return serverHalves, nil
}
func (dkimV2 DeviceKeyInfoMapV2) | () DevicePublicKeys {
publicKeys := make(DevicePublicKeys, len(dkimV2))
for kid := range dkimV2 {
publicKeys[kbfscrypto.MakeCryptPublicKey(kid)] = true
}
return publicKeys
}
// UserDeviceKeyInfoMapV2 maps a user's keybase UID to their
// DeviceKeyInfoMapV2.
type UserDeviceKeyInfoMapV2 map[keybase1.UID]DeviceKeyInfoMapV2
// ToPublicKeys converts this object to a UserDevicePublicKeys object.
func (udkimV2 UserDeviceKeyInfoMapV2) ToPublicKeys() UserDevicePublicKeys {
publicKeys := make(UserDevicePublicKeys, len(udkimV2))
for u, dkimV2 := range udkimV2 {
publicKeys[u] = dkimV2.toPublicKeys()
}
return publicKeys
}
// RemoveDevicesNotIn removes any info for any device that is not
// contained in the given map of users and devices.
func (udkimV2 UserDeviceKeyInfoMapV2) RemoveDevicesNotIn(
updatedUserKeys UserDevicePublicKeys) ServerHalfRemovalInfo {
removalInfo := make(ServerHalfRemovalInfo)
for uid, dkim := range udkimV2 {
userRemoved := false
deviceServerHalfIDs := make(DeviceServerHalfRemovalInfo)
if deviceKeys, ok := updatedUserKeys[uid]; ok {
for kid, info := range dkim {
key := kbfscrypto.MakeCryptPublicKey(kid)
if !deviceKeys[key] {
delete(dkim, kid)
deviceServerHalfIDs[key] = append(
deviceServerHalfIDs[key],
info.ServerHalfID)
}
}
if len(deviceServerHalfIDs) == 0 {
continue
}
} else {
// The user was completely removed, which
// shouldn't happen but might as well make it
// work just in case.
userRemoved = true
for kid, info := range dkim {
key := kbfscrypto.MakeCryptPublicKey(kid)
deviceServerHalfIDs[key] = append(
deviceServerHalfIDs[key],
info.ServerHalfID)
}
delete(udkimV2, uid)
}
removalInfo[uid] = UserServerHalfRemovalInfo{
UserRemoved: userRemoved,
DeviceServerHalfIDs: deviceServerHalfIDs,
}
}
return removalInfo
}
// FillInUserInfos fills in this map from the given info.
func (udkimV2 UserDeviceKeyInfoMapV2) FillInUserInfos(
newIndex int, updatedUserKeys UserDevicePublicKeys,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
tlfCryptKey kbfscrypto.TLFCryptKey) (
serverHalves UserDeviceKeyServerHalves, err error) {
serverHalves = make(UserDeviceKeyServerHalves, len(updatedUserKeys))
for u, updatedDeviceKeys := range updatedUserKeys {
if _, ok := udkimV2[u]; !ok {
udkimV2[u] = DeviceKeyInfoMapV2{}
}
deviceServerHalves, err := udkimV2[u].fillInDeviceInfos(
u, tlfCryptKey, ePrivKey, newIndex,
updatedDeviceKeys)
if err != nil {
return nil, err
}
if len(deviceServerHalves) > 0 {
serverHalves[u] = deviceServerHalves
}
}
return serverHalves, nil
}
// All section references below are to https://keybase.io/docs/crypto/kbfs
// (version 1.8).
// TLFWriterKeyBundleV2 is a bundle of all the writer keys for a top-level
// folder.
type TLFWriterKeyBundleV2 struct {
// Maps from each writer to their crypt key bundle.
WKeys UserDeviceKeyInfoMapV2
// M_f as described in § 4.1.1.
TLFPublicKey kbfscrypto.TLFPublicKey `codec:"pubKey"`
// M_e as described in § 4.1.1. Because devices can be added
// into the key generation after it is initially created (so
// those devices can get access to existing data), we track
// multiple ephemeral public keys; the one used by a
// particular device is specified by EPubKeyIndex in its
// TLFCryptoKeyInfo struct.
TLFEphemeralPublicKeys kbfscrypto.TLFEphemeralPublicKeys `codec:"ePubKey"`
codec.UnknownFieldSetHandler
}
// IsWriter returns true if the given user device is in the writer set.
func (wkb TLFWriterKeyBundleV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
_, ok := wkb.WKeys[user][deviceKey.KID()]
return ok
}
// TLFWriterKeyGenerationsV2 stores a slice of TLFWriterKeyBundleV2,
// where the last element is the current generation.
type TLFWriterKeyGenerationsV2 []TLFWriterKeyBundleV2
// LatestKeyGeneration returns the current key generation for this TLF.
func (wkg TLFWriterKeyGenerationsV2) LatestKeyGeneration() KeyGen {
return KeyGen(len(wkg))
}
// IsWriter returns whether or not the user+device is an authorized writer
// for the latest generation.
func (wkg TLFWriterKeyGenerationsV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
keyGen := wkg.LatestKeyGeneration()
if keyGen < 1 {
return false
}
return wkg[keyGen-1].IsWriter(user, deviceKey)
}
// ToTLFWriterKeyBundleV3 converts a TLFWriterKeyGenerationsV2 to a TLFWriterKeyBundleV3.
func (wkg TLFWriterKeyGenerationsV2) ToTLFWriterKeyBundleV3(
codec kbfscodec.Codec,
tlfCryptKeyGetter func() ([]kbfscrypto.TLFCryptKey, error)) (
TLFWriterKeyBundleV2, TLFWriterKeyBundleV3, error) {
keyGen := wkg.LatestKeyGeneration()
if keyGen < FirstValidKeyGen {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
errors.New("No key generations to convert")
}
// Copy the latest UserDeviceKeyInfoMap.
wkbV2 := wkg[keyGen-FirstValidKeyGen]
ePubKeyCount := len(wkbV2.TLFEphemeralPublicKeys)
udkimV3, err := writerUDKIMV2ToV3(codec, wkbV2.WKeys, ePubKeyCount)
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
wkbV3 := TLFWriterKeyBundleV3{
Keys: udkimV3,
TLFEphemeralPublicKeys: make(
kbfscrypto.TLFEphemeralPublicKeys, ePubKeyCount),
TLFPublicKey: wkbV2.TLFPublicKey,
}
// Copy all of the TLFEphemeralPublicKeys at this generation.
copy(wkbV3.TLFEphemeralPublicKeys[:], wkbV2.TLFEphemeralPublicKeys)
if keyGen > FirstValidKeyGen {
// Fetch all of the TLFCryptKeys.
keys, err := tlfCryptKeyGetter()
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriter | toPublicKeys | identifier_name |
key_bundle_v2.go | ) {
serverHalves = make(DeviceKeyServerHalves, len(updatedDeviceKeys))
// TODO: parallelize
for k := range updatedDeviceKeys {
// Skip existing entries, and only fill in new ones.
if _, ok := dkimV2[k.KID()]; ok {
continue
}
clientInfo, serverHalf, err := splitTLFCryptKey(
uid, tlfCryptKey, ePrivKey, ePubIndex, k)
if err != nil {
return nil, err
}
dkimV2[k.KID()] = clientInfo
serverHalves[k] = serverHalf
}
return serverHalves, nil
}
func (dkimV2 DeviceKeyInfoMapV2) toPublicKeys() DevicePublicKeys {
publicKeys := make(DevicePublicKeys, len(dkimV2))
for kid := range dkimV2 {
publicKeys[kbfscrypto.MakeCryptPublicKey(kid)] = true
}
return publicKeys
}
// UserDeviceKeyInfoMapV2 maps a user's keybase UID to their
// DeviceKeyInfoMapV2.
type UserDeviceKeyInfoMapV2 map[keybase1.UID]DeviceKeyInfoMapV2
// ToPublicKeys converts this object to a UserDevicePublicKeys object.
func (udkimV2 UserDeviceKeyInfoMapV2) ToPublicKeys() UserDevicePublicKeys {
publicKeys := make(UserDevicePublicKeys, len(udkimV2))
for u, dkimV2 := range udkimV2 {
publicKeys[u] = dkimV2.toPublicKeys()
}
return publicKeys
}
// RemoveDevicesNotIn removes any info for any device that is not
// contained in the given map of users and devices.
func (udkimV2 UserDeviceKeyInfoMapV2) RemoveDevicesNotIn(
updatedUserKeys UserDevicePublicKeys) ServerHalfRemovalInfo {
removalInfo := make(ServerHalfRemovalInfo)
for uid, dkim := range udkimV2 {
userRemoved := false
deviceServerHalfIDs := make(DeviceServerHalfRemovalInfo)
if deviceKeys, ok := updatedUserKeys[uid]; ok {
for kid, info := range dkim {
key := kbfscrypto.MakeCryptPublicKey(kid)
if !deviceKeys[key] {
delete(dkim, kid)
deviceServerHalfIDs[key] = append(
deviceServerHalfIDs[key],
info.ServerHalfID)
}
}
if len(deviceServerHalfIDs) == 0 {
continue
}
} else {
// The user was completely removed, which
// shouldn't happen but might as well make it
// work just in case.
userRemoved = true
for kid, info := range dkim {
key := kbfscrypto.MakeCryptPublicKey(kid)
deviceServerHalfIDs[key] = append(
deviceServerHalfIDs[key],
info.ServerHalfID)
}
delete(udkimV2, uid)
}
removalInfo[uid] = UserServerHalfRemovalInfo{
UserRemoved: userRemoved,
DeviceServerHalfIDs: deviceServerHalfIDs,
}
}
return removalInfo
}
// FillInUserInfos fills in this map from the given info.
func (udkimV2 UserDeviceKeyInfoMapV2) FillInUserInfos(
newIndex int, updatedUserKeys UserDevicePublicKeys,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
tlfCryptKey kbfscrypto.TLFCryptKey) (
serverHalves UserDeviceKeyServerHalves, err error) {
serverHalves = make(UserDeviceKeyServerHalves, len(updatedUserKeys))
for u, updatedDeviceKeys := range updatedUserKeys {
if _, ok := udkimV2[u]; !ok {
udkimV2[u] = DeviceKeyInfoMapV2{}
}
deviceServerHalves, err := udkimV2[u].fillInDeviceInfos(
u, tlfCryptKey, ePrivKey, newIndex,
updatedDeviceKeys)
if err != nil {
return nil, err
}
if len(deviceServerHalves) > 0 {
serverHalves[u] = deviceServerHalves
}
}
return serverHalves, nil
}
// All section references below are to https://keybase.io/docs/crypto/kbfs
// (version 1.8).
// TLFWriterKeyBundleV2 is a bundle of all the writer keys for a top-level
// folder.
type TLFWriterKeyBundleV2 struct {
// Maps from each writer to their crypt key bundle.
WKeys UserDeviceKeyInfoMapV2
// M_f as described in § 4.1.1.
TLFPublicKey kbfscrypto.TLFPublicKey `codec:"pubKey"`
// M_e as described in § 4.1.1. Because devices can be added
// into the key generation after it is initially created (so
// those devices can get access to existing data), we track
// multiple ephemeral public keys; the one used by a
// particular device is specified by EPubKeyIndex in its
// TLFCryptoKeyInfo struct.
TLFEphemeralPublicKeys kbfscrypto.TLFEphemeralPublicKeys `codec:"ePubKey"`
codec.UnknownFieldSetHandler
}
// IsWriter returns true if the given user device is in the writer set.
func (wkb TLFWriterKeyBundleV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
_, ok := wkb.WKeys[user][deviceKey.KID()]
return ok
}
// TLFWriterKeyGenerationsV2 stores a slice of TLFWriterKeyBundleV2,
// where the last element is the current generation.
type TLFWriterKeyGenerationsV2 []TLFWriterKeyBundleV2
// LatestKeyGeneration returns the current key generation for this TLF.
func (wkg TLFWriterKeyGenerationsV2) LatestKeyGeneration() KeyGen {
return KeyGen(len(wkg))
}
// IsWriter returns whether or not the user+device is an authorized writer
// for the latest generation.
func (wkg TLFWriterKeyGenerationsV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
keyGen := wkg.LatestKeyGeneration()
if keyGen < 1 {
return false
}
return wkg[keyGen-1].IsWriter(user, deviceKey)
}
// ToTLFWriterKeyBundleV3 converts a TLFWriterKeyGenerationsV2 to a TLFWriterKeyBundleV3.
func (wkg TLFWriterKeyGenerationsV2) ToTLFWriterKeyBundleV3(
codec kbfscodec.Codec,
tlfCryptKeyGetter func() ([]kbfscrypto.TLFCryptKey, error)) (
TLFWriterKeyBundleV2, TLFWriterKeyBundleV3, error) {
keyGen := wkg.LatestKeyGeneration()
if keyGen < FirstValidKeyGen {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
errors.New("No key generations to convert")
}
// Copy the latest UserDeviceKeyInfoMap.
wkbV2 := wkg[keyGen-FirstValidKeyGen]
ePubKeyCount := len(wkbV2.TLFEphemeralPublicKeys)
udkimV3, err := writerUDKIMV2ToV3(codec, wkbV2.WKeys, ePubKeyCount)
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
wkbV3 := TLFWriterKeyBundleV3{
Keys: udkimV3,
TLFEphemeralPublicKeys: make(
kbfscrypto.TLFEphemeralPublicKeys, ePubKeyCount),
TLFPublicKey: wkbV2.TLFPublicKey,
}
// Copy all of the TLFEphemeralPublicKeys at this generation.
copy(wkbV3.TLFEphemeralPublicKeys[:], wkbV2.TLFEphemeralPublicKeys)
if keyGen > FirstValidKeyGen {
// Fetch all of the TLFCryptKeys.
keys, err := tlfCryptKeyGetter()
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
// Sanity check.
if len(keys) != int(keyGen) {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
fmt.Errorf("expected %d keys, found %d", keyGen, len(keys))
}
// Save the current key.
currKey := keys[len(keys)-1]
// Get rid of the most current generation as that's in the UserDeviceKeyInfoMap already.
keys = keys[:len(keys)-1]
// Encrypt the historic keys with the current key.
wkbV3.EncryptedHistoricTLFCryptKeys, err = kbfscrypto.EncryptTLFCryptKeys(codec, keys, currKey) | if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
} | random_line_split | |
key_bundle_v2.go | info for any device that is not
// contained in the given map of users and devices.
func (udkimV2 UserDeviceKeyInfoMapV2) RemoveDevicesNotIn(
updatedUserKeys UserDevicePublicKeys) ServerHalfRemovalInfo {
removalInfo := make(ServerHalfRemovalInfo)
for uid, dkim := range udkimV2 {
userRemoved := false
deviceServerHalfIDs := make(DeviceServerHalfRemovalInfo)
if deviceKeys, ok := updatedUserKeys[uid]; ok {
for kid, info := range dkim {
key := kbfscrypto.MakeCryptPublicKey(kid)
if !deviceKeys[key] {
delete(dkim, kid)
deviceServerHalfIDs[key] = append(
deviceServerHalfIDs[key],
info.ServerHalfID)
}
}
if len(deviceServerHalfIDs) == 0 {
continue
}
} else {
// The user was completely removed, which
// shouldn't happen but might as well make it
// work just in case.
userRemoved = true
for kid, info := range dkim {
key := kbfscrypto.MakeCryptPublicKey(kid)
deviceServerHalfIDs[key] = append(
deviceServerHalfIDs[key],
info.ServerHalfID)
}
delete(udkimV2, uid)
}
removalInfo[uid] = UserServerHalfRemovalInfo{
UserRemoved: userRemoved,
DeviceServerHalfIDs: deviceServerHalfIDs,
}
}
return removalInfo
}
// FillInUserInfos fills in this map from the given info.
func (udkimV2 UserDeviceKeyInfoMapV2) FillInUserInfos(
newIndex int, updatedUserKeys UserDevicePublicKeys,
ePrivKey kbfscrypto.TLFEphemeralPrivateKey,
tlfCryptKey kbfscrypto.TLFCryptKey) (
serverHalves UserDeviceKeyServerHalves, err error) {
serverHalves = make(UserDeviceKeyServerHalves, len(updatedUserKeys))
for u, updatedDeviceKeys := range updatedUserKeys {
if _, ok := udkimV2[u]; !ok {
udkimV2[u] = DeviceKeyInfoMapV2{}
}
deviceServerHalves, err := udkimV2[u].fillInDeviceInfos(
u, tlfCryptKey, ePrivKey, newIndex,
updatedDeviceKeys)
if err != nil {
return nil, err
}
if len(deviceServerHalves) > 0 {
serverHalves[u] = deviceServerHalves
}
}
return serverHalves, nil
}
// All section references below are to https://keybase.io/docs/crypto/kbfs
// (version 1.8).
// TLFWriterKeyBundleV2 is a bundle of all the writer keys for a top-level
// folder.
type TLFWriterKeyBundleV2 struct {
// Maps from each writer to their crypt key bundle.
WKeys UserDeviceKeyInfoMapV2
// M_f as described in § 4.1.1.
TLFPublicKey kbfscrypto.TLFPublicKey `codec:"pubKey"`
// M_e as described in § 4.1.1. Because devices can be added
// into the key generation after it is initially created (so
// those devices can get access to existing data), we track
// multiple ephemeral public keys; the one used by a
// particular device is specified by EPubKeyIndex in its
// TLFCryptoKeyInfo struct.
TLFEphemeralPublicKeys kbfscrypto.TLFEphemeralPublicKeys `codec:"ePubKey"`
codec.UnknownFieldSetHandler
}
// IsWriter returns true if the given user device is in the writer set.
func (wkb TLFWriterKeyBundleV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
_, ok := wkb.WKeys[user][deviceKey.KID()]
return ok
}
// TLFWriterKeyGenerationsV2 stores a slice of TLFWriterKeyBundleV2,
// where the last element is the current generation.
type TLFWriterKeyGenerationsV2 []TLFWriterKeyBundleV2
// LatestKeyGeneration returns the current key generation for this TLF.
func (wkg TLFWriterKeyGenerationsV2) LatestKeyGeneration() KeyGen {
return KeyGen(len(wkg))
}
// IsWriter returns whether or not the user+device is an authorized writer
// for the latest generation.
func (wkg TLFWriterKeyGenerationsV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
keyGen := wkg.LatestKeyGeneration()
if keyGen < 1 {
return false
}
return wkg[keyGen-1].IsWriter(user, deviceKey)
}
// ToTLFWriterKeyBundleV3 converts a TLFWriterKeyGenerationsV2 to a TLFWriterKeyBundleV3.
func (wkg TLFWriterKeyGenerationsV2) ToTLFWriterKeyBundleV3(
codec kbfscodec.Codec,
tlfCryptKeyGetter func() ([]kbfscrypto.TLFCryptKey, error)) (
TLFWriterKeyBundleV2, TLFWriterKeyBundleV3, error) {
keyGen := wkg.LatestKeyGeneration()
if keyGen < FirstValidKeyGen {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
errors.New("No key generations to convert")
}
// Copy the latest UserDeviceKeyInfoMap.
wkbV2 := wkg[keyGen-FirstValidKeyGen]
ePubKeyCount := len(wkbV2.TLFEphemeralPublicKeys)
udkimV3, err := writerUDKIMV2ToV3(codec, wkbV2.WKeys, ePubKeyCount)
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
wkbV3 := TLFWriterKeyBundleV3{
Keys: udkimV3,
TLFEphemeralPublicKeys: make(
kbfscrypto.TLFEphemeralPublicKeys, ePubKeyCount),
TLFPublicKey: wkbV2.TLFPublicKey,
}
// Copy all of the TLFEphemeralPublicKeys at this generation.
copy(wkbV3.TLFEphemeralPublicKeys[:], wkbV2.TLFEphemeralPublicKeys)
if keyGen > FirstValidKeyGen {
// Fetch all of the TLFCryptKeys.
keys, err := tlfCryptKeyGetter()
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
// Sanity check.
if len(keys) != int(keyGen) {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
fmt.Errorf("expected %d keys, found %d", keyGen, len(keys))
}
// Save the current key.
currKey := keys[len(keys)-1]
// Get rid of the most current generation as that's in the UserDeviceKeyInfoMap already.
keys = keys[:len(keys)-1]
// Encrypt the historic keys with the current key.
wkbV3.EncryptedHistoricTLFCryptKeys, err = kbfscrypto.EncryptTLFCryptKeys(codec, keys, currKey)
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
}
return wkbV2, wkbV3, nil
}
// TLFReaderKeyBundleV2 stores all the reader keys with reader
// permissions on a TLF.
type TLFReaderKeyBundleV2 struct {
RKeys UserDeviceKeyInfoMapV2
// M_e as described in § 4.1.1. Because devices can be added
// into the key generation after it is initially created (so
// those devices can get access to existing data), we track
// multiple ephemeral public keys; the one used by a
// particular device is specified by EPubKeyIndex in its
// TLFCryptoKeyInfo struct. This list is needed so a reader
// rekey doesn't modify the writer metadata.
TLFReaderEphemeralPublicKeys kbfscrypto.TLFEphemeralPublicKeys `codec:"readerEPubKey,omitempty"`
codec.UnknownFieldSetHandler
}
// IsReader returns true if the given user device is in the reader set.
func (trb TLFReaderKeyBundleV2) IsReader(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
_, ok := trb.RKeys[user][deviceKey.KID()]
return ok
}
// TLFReaderKeyGenerationsV2 stores a slice of TLFReaderKeyBundleV2,
// where the last element is the current generation.
type TLFReaderKeyGenerationsV2 []TLFReaderKeyBundleV2
// LatestKeyGeneration returns the current key generation for this TLF.
func (rkg TLFReaderKeyGenerationsV2) LatestKeyGeneration() KeyGen {
| return KeyGen(len(rkg))
}
/ | identifier_body | |
key_bundle_v2.go | it is initially created (so
// those devices can get access to existing data), we track
// multiple ephemeral public keys; the one used by a
// particular device is specified by EPubKeyIndex in its
// TLFCryptoKeyInfo struct.
TLFEphemeralPublicKeys kbfscrypto.TLFEphemeralPublicKeys `codec:"ePubKey"`
codec.UnknownFieldSetHandler
}
// IsWriter returns true if the given user device is in the writer set.
func (wkb TLFWriterKeyBundleV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
_, ok := wkb.WKeys[user][deviceKey.KID()]
return ok
}
// TLFWriterKeyGenerationsV2 stores a slice of TLFWriterKeyBundleV2,
// where the last element is the current generation.
type TLFWriterKeyGenerationsV2 []TLFWriterKeyBundleV2
// LatestKeyGeneration returns the current key generation for this TLF.
func (wkg TLFWriterKeyGenerationsV2) LatestKeyGeneration() KeyGen {
return KeyGen(len(wkg))
}
// IsWriter returns whether or not the user+device is an authorized writer
// for the latest generation.
func (wkg TLFWriterKeyGenerationsV2) IsWriter(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
keyGen := wkg.LatestKeyGeneration()
if keyGen < 1 {
return false
}
return wkg[keyGen-1].IsWriter(user, deviceKey)
}
// ToTLFWriterKeyBundleV3 converts a TLFWriterKeyGenerationsV2 to a TLFWriterKeyBundleV3.
func (wkg TLFWriterKeyGenerationsV2) ToTLFWriterKeyBundleV3(
codec kbfscodec.Codec,
tlfCryptKeyGetter func() ([]kbfscrypto.TLFCryptKey, error)) (
TLFWriterKeyBundleV2, TLFWriterKeyBundleV3, error) {
keyGen := wkg.LatestKeyGeneration()
if keyGen < FirstValidKeyGen {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
errors.New("No key generations to convert")
}
// Copy the latest UserDeviceKeyInfoMap.
wkbV2 := wkg[keyGen-FirstValidKeyGen]
ePubKeyCount := len(wkbV2.TLFEphemeralPublicKeys)
udkimV3, err := writerUDKIMV2ToV3(codec, wkbV2.WKeys, ePubKeyCount)
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
wkbV3 := TLFWriterKeyBundleV3{
Keys: udkimV3,
TLFEphemeralPublicKeys: make(
kbfscrypto.TLFEphemeralPublicKeys, ePubKeyCount),
TLFPublicKey: wkbV2.TLFPublicKey,
}
// Copy all of the TLFEphemeralPublicKeys at this generation.
copy(wkbV3.TLFEphemeralPublicKeys[:], wkbV2.TLFEphemeralPublicKeys)
if keyGen > FirstValidKeyGen {
// Fetch all of the TLFCryptKeys.
keys, err := tlfCryptKeyGetter()
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
// Sanity check.
if len(keys) != int(keyGen) {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{},
fmt.Errorf("expected %d keys, found %d", keyGen, len(keys))
}
// Save the current key.
currKey := keys[len(keys)-1]
// Get rid of the most current generation as that's in the UserDeviceKeyInfoMap already.
keys = keys[:len(keys)-1]
// Encrypt the historic keys with the current key.
wkbV3.EncryptedHistoricTLFCryptKeys, err = kbfscrypto.EncryptTLFCryptKeys(codec, keys, currKey)
if err != nil {
return TLFWriterKeyBundleV2{}, TLFWriterKeyBundleV3{}, err
}
}
return wkbV2, wkbV3, nil
}
// TLFReaderKeyBundleV2 stores all the reader keys with reader
// permissions on a TLF.
type TLFReaderKeyBundleV2 struct {
RKeys UserDeviceKeyInfoMapV2
// M_e as described in § 4.1.1. Because devices can be added
// into the key generation after it is initially created (so
// those devices can get access to existing data), we track
// multiple ephemeral public keys; the one used by a
// particular device is specified by EPubKeyIndex in its
// TLFCryptoKeyInfo struct. This list is needed so a reader
// rekey doesn't modify the writer metadata.
TLFReaderEphemeralPublicKeys kbfscrypto.TLFEphemeralPublicKeys `codec:"readerEPubKey,omitempty"`
codec.UnknownFieldSetHandler
}
// IsReader returns true if the given user device is in the reader set.
func (trb TLFReaderKeyBundleV2) IsReader(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
_, ok := trb.RKeys[user][deviceKey.KID()]
return ok
}
// TLFReaderKeyGenerationsV2 stores a slice of TLFReaderKeyBundleV2,
// where the last element is the current generation.
type TLFReaderKeyGenerationsV2 []TLFReaderKeyBundleV2
// LatestKeyGeneration returns the current key generation for this TLF.
func (rkg TLFReaderKeyGenerationsV2) LatestKeyGeneration() KeyGen {
return KeyGen(len(rkg))
}
// IsReader returns whether or not the user+device is an authorized reader
// for the latest generation.
func (rkg TLFReaderKeyGenerationsV2) IsReader(user keybase1.UID, deviceKey kbfscrypto.CryptPublicKey) bool {
keyGen := rkg.LatestKeyGeneration()
if keyGen < 1 {
return false
}
return rkg[keyGen-1].IsReader(user, deviceKey)
}
// ToTLFReaderKeyBundleV3 converts a TLFReaderKeyGenerationsV2 to a TLFReaderkeyBundleV3.
func (rkg TLFReaderKeyGenerationsV2) ToTLFReaderKeyBundleV3(
codec kbfscodec.Codec, wkb TLFWriterKeyBundleV2) (
TLFReaderKeyBundleV3, error) {
keyGen := rkg.LatestKeyGeneration()
if keyGen < FirstValidKeyGen {
return TLFReaderKeyBundleV3{}, errors.New("No key generations to convert")
}
rkbV3 := TLFReaderKeyBundleV3{
Keys: make(UserDeviceKeyInfoMapV3),
}
// Copy the latest UserDeviceKeyInfoMap.
rkb := rkg[keyGen-FirstValidKeyGen]
// Copy all of the TLFReaderEphemeralPublicKeys.
rkbV3.TLFEphemeralPublicKeys = make(kbfscrypto.TLFEphemeralPublicKeys,
len(rkb.TLFReaderEphemeralPublicKeys))
copy(rkbV3.TLFEphemeralPublicKeys[:], rkb.TLFReaderEphemeralPublicKeys)
// Track a mapping of old writer ephemeral pubkey index to new
// reader ephemeral pubkey index.
pubKeyIndicesMap := make(map[int]int)
// We need to copy these in a slightly annoying way to work around
// the negative index hack. In V3 readers always have their ePubKey
// in the TLFReaderEphemeralPublicKeys list. In V2 they only do if
// the index is negative. Otherwise it's in the writer's list.
for uid, dkim := range rkb.RKeys {
dkimV3 := make(DeviceKeyInfoMapV3)
for kid, info := range dkim {
var infoCopy TLFCryptKeyInfo
err := kbfscodec.Update(codec, &infoCopy, info)
if err != nil {
return TLFReaderKeyBundleV3{}, err
}
keyLocation, index, ePubKey, err :=
GetEphemeralPublicKeyInfoV2(info, wkb, rkb)
if err != nil {
return TLFReaderKeyBundleV3{}, err
}
switch keyLocation {
case WriterEPubKeys:
// Map the old index in the writer list to a new index
// at the end of the reader list.
newIndex, ok := pubKeyIndicesMap[index]
if !ok {
| rkbV3.TLFEphemeralPublicKeys =
append(rkbV3.TLFEphemeralPublicKeys, ePubKey)
// TODO: This index depends on
// map iteration order, which
// varies. Impose a consistent
// order on these indices.
newIndex = len(rkbV3.TLFEphemeralPublicKeys) - 1
pubKeyIndicesMap[index] = newIndex
}
| conditional_block | |
redisstore.go |
conn := rs.GetConn()
defer conn.Close()
//Todo protect for duped names
memberKey := rs.keyJoiner(rs.runContext, "_redis", "_member", config.MyMachineID)
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
rs.runContext.Lock()
config.MyType = types.TypeSporeLeader
rs.runContext.Config.MyType = types.TypeSporeLeader
rs.runContext.Unlock()
}
spore := types.Spore{ID: config.MyMachineID, MemberIP: config.MyIP.String(), MemberType: config.MyType}
err = rs.Set(spore, spore.ID, types.SentinelEnd)
if err == types.ErrIDExists {
err = rs.Update(spore, spore.ID, types.SentinelEnd)
utils.HandleError(err)
} else if err == nil {
types.EventStoreSporeAdded.EmitAll(rs.runContext)
} else {
utils.HandleError(err)
}
_, err = conn.Do("SET", memberKey, config.MyMachineID, "PX", CheckinExpireMs)
utils.HandleError(err)
}
func newRedisConnPool(server string) *redis.Pool {
return &redis.Pool{
MaxIdle: 10,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
}
func (rs *RedisStore) Run(context *types.RunContext) {
rs.mu.Lock()
rs.stopCast = utils.SignalCast{}
exit, _ := rs.stopCast.Listen()
rs.mu.Unlock()
rs.runLeaderElection()
rs.runCheckIn()
for {
select {
case <-time.After(time.Millisecond * CheckinEveryMs):
rs.runLeaderElection()
rs.runCheckIn()
rs.runPruning()
case <-exit:
return
}
}
}
func (rs *RedisStore) Init(runContext *types.RunContext) {
rs.initOnce.Do(func() {
rs.runContext = runContext
if runContext.Config.ConnectionString == "" {
utils.HandleError(types.ErrConnectionStringNotSet)
}
connectionString := strings.TrimPrefix(runContext.Config.ConnectionString, "redis://")
rs.connPool = newRedisConnPool(connectionString)
runContext.Lock()
runContext.Store = rs
runContext.Unlock()
})
return
}
func (rs *RedisStore) GetConn() redis.Conn {
conn := rs.connPool.Get()
if conn.Err() != nil {
utils.HandleError(conn.Err())
}
return conn
}
func (rs *RedisStore) Stop() {
rs.stopCastMu.Lock()
defer rs.stopCastMu.Unlock()
rs.stopCast.Signal()
}
func (rs *RedisStore) ProcName() string {
return "RedisStore"
}
func (rs *RedisStore) ShouldRun(context *types.RunContext) bool {
return true
}
func (rs *RedisStore) Get(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HGET", rs.typeKey(rs.runContext, v), id)
data, err := redis.String(resp, err)
if err != nil {
return wrapError(err)
}
err = utils.Unmarshall(data, v)
if err != nil {
return wrapError(err)
}
return nil
}
func (rs *RedisStore) Exists(v interface{}, id string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HEXISTS", rs.typeKey(rs.runContext, v), id)
exists, err := redis.Bool(resp, err)
if err != nil {
return false, wrapError(err)
}
return exists, nil
}
func (rs *RedisStore) GetAll(v interface{}, start int, end int) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HVALS", rs.typeKey(rs.runContext, v))
data, err := redis.Strings(resp, err)
if err != nil {
return wrapError(err)
}
if end == types.SentinelEnd {
end = len(data)
}
joined := utils.JsonListFromObjects(data[start:end]...)
err = utils.Unmarshall(joined, v)
if err != nil {
return err
}
return nil
}
func (rs *RedisStore) safeSet(v interface{}, id string, logTrim int, update bool) error {
conn := rs.GetConn()
defer conn.Close()
typeKey := rs.typeKey(rs.runContext, v)
data, err := utils.Marshall(v)
if err != nil {
return wrapError(err)
}
if id == "" {
return types.ErrIDEmpty
}
op := "HSET"
if !update {
op = "HSETNX"
}
wasSet, err := redis.Int(conn.Do(op, typeKey, id, data))
if err != nil {
return wrapError(err)
}
if wasSet == 1 || update {
meta, err := types.NewMeta(v)
utils.HandleError(err)
action := types.StoreActionUpdate
if !update {
action = types.StoreActionCreate
}
types.StoreEvent(action, meta).EmitAll(rs.runContext)
logKey := rs.typeKey(rs.runContext, v, "__log")
_, err = conn.Do("LPUSH", logKey, data)
if err != nil {
return wrapError(err)
}
if logTrim != types.SentinelEnd {
_, err = conn.Do("LTRIM", logKey, 0, logTrim)
if err != nil {
return wrapError(err)
}
}
return nil
} else {
return types.ErrIDExists
}
}
func (rs *RedisStore) Set(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, false)
}
func (rs *RedisStore) Update(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, true)
}
func (rs *RedisStore) Delete(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
exists, err := redis.Int(conn.Do("HDEL", rs.typeKey(rs.runContext, v), id))
if exists != 1 {
return types.ErrNoneFound
}
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDelete, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) DeleteAll(v interface{}) error {
conn := rs.GetConn()
defer conn.Close()
_, err := conn.Do("DEL", rs.typeKey(rs.runContext, v))
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDeleteAll, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) IsHealthy(sporeName string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
memberKey := rs.keyJoiner(rs.runContext, "_redis", "_member", sporeName)
resp, err := conn.Do("EXISTS", memberKey)
exists, err := redis.Bool(resp, err)
if err != nil {
return false, wrapError(err)
}
return exists, nil
}
func (rs *RedisStore) LeaderName() (string, error) {
leaderKey := rs.keyJoiner(rs.runContext, "_redis", "_leader")
conn := rs.GetConn()
defer conn.Close()
name, err := redis.String(conn.Do("GET", leaderKey))
return name, wrapError(err)
}
func (rs *RedisStore) Publish(v interface{}, channels ...string) error {
// TODO(parham): if no listeners send later.
dump, err := utils.Marshall(v)
if err != nil {
return err
}
conn := rs.GetConn()
defer conn.Close()
for _, channel := range channels {
fullChanName := rs.keyJoiner(rs.runContext, PubSubChannelNamePrefix, channel)
conn.Send("PUBLISH", fullChanName, dump)
}
conn.Flush()
return err
}
func (rs *RedisStore) Subscribe(channel string) (*types.SubscriptionManager, error) {
messages := make(chan string)
sm := &types.SubscriptionManager{ID: utils.GenGuid(), Messages: messages, Exit: utils.SignalCast{}}
conn := rs.GetConn()
psc := redis.PubSubConn{conn}
fullChanName := rs.keyJoiner(rs.runContext, PubSubChannelNamePrefix, channel)
err := psc.Subscribe(fullChanName)
if err != nil {
return nil, err
}
go func() {
defer psc.Close()
data := make(chan interface{})
go func() {
exit, _ := sm.Exit.Listen()
for {
select {
case <-time.Tick(time.Millisecond * 200):
dat := psc.Receive()
select {
case data <- dat:
default:
return
}
case <-exit: | return
} | random_line_split | |
redisstore.go | string {
items := append(runContext.NamespacePrefixParts(), parts...)
return strings.Join(items, ":")
}
func (rs RedisStore) typeKey(runContext *types.RunContext, v interface{}, parts ...string) string {
meta, err := types.NewMeta(v)
utils.HandleError(err)
parts = append([]string{meta.TypeName}, parts...)
return rs.keyJoiner(runContext, parts...)
}
func (rs *RedisStore) runLeaderElection() {
config := rs.runContext.Config
if config.MyType != types.TypeSporeWatcher {
leaderKey := rs.keyJoiner(rs.runContext, "_redis", "_leader")
conn := rs.GetConn()
defer conn.Close()
leaderChange, err := redis.String(conn.Do("SET", leaderKey, config.MyMachineID, "NX", "PX", LeadershipExpireMs))
if err != redis.ErrNil {
utils.HandleError(err)
}
if leaderChange == "OK" {
types.EventStoreLeaderChange.EmitAll(rs.runContext)
}
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
_, err = conn.Do("PEXPIRE", leaderKey, LeadershipExpireMs)
utils.HandleError(err)
}
}
}
func (rs *RedisStore) runPruning() {
spores := []types.Spore{}
err := rs.GetAll(&spores, 0, types.SentinelEnd)
utils.HandleError(err)
for _, spore := range spores {
healthy, err := rs.IsHealthy(spore.ID)
utils.HandleError(err)
if !healthy {
utils.LogWarn("Spore" + spore.ID + "looks dead, purning.")
err := rs.Delete(spore, spore.ID)
utils.HandleError(err)
types.EventStoreSporeExit.EmitAll(rs.runContext)
}
}
}
func (rs *RedisStore) runCheckIn() {
config := rs.runContext.Config
conn := rs.GetConn()
defer conn.Close()
//Todo protect for duped names
memberKey := rs.keyJoiner(rs.runContext, "_redis", "_member", config.MyMachineID)
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
rs.runContext.Lock()
config.MyType = types.TypeSporeLeader
rs.runContext.Config.MyType = types.TypeSporeLeader
rs.runContext.Unlock()
}
spore := types.Spore{ID: config.MyMachineID, MemberIP: config.MyIP.String(), MemberType: config.MyType}
err = rs.Set(spore, spore.ID, types.SentinelEnd)
if err == types.ErrIDExists {
err = rs.Update(spore, spore.ID, types.SentinelEnd)
utils.HandleError(err)
} else if err == nil {
types.EventStoreSporeAdded.EmitAll(rs.runContext)
} else {
utils.HandleError(err)
}
_, err = conn.Do("SET", memberKey, config.MyMachineID, "PX", CheckinExpireMs)
utils.HandleError(err)
}
func | (server string) *redis.Pool {
return &redis.Pool{
MaxIdle: 10,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
}
func (rs *RedisStore) Run(context *types.RunContext) {
rs.mu.Lock()
rs.stopCast = utils.SignalCast{}
exit, _ := rs.stopCast.Listen()
rs.mu.Unlock()
rs.runLeaderElection()
rs.runCheckIn()
for {
select {
case <-time.After(time.Millisecond * CheckinEveryMs):
rs.runLeaderElection()
rs.runCheckIn()
rs.runPruning()
case <-exit:
return
}
}
}
func (rs *RedisStore) Init(runContext *types.RunContext) {
rs.initOnce.Do(func() {
rs.runContext = runContext
if runContext.Config.ConnectionString == "" {
utils.HandleError(types.ErrConnectionStringNotSet)
}
connectionString := strings.TrimPrefix(runContext.Config.ConnectionString, "redis://")
rs.connPool = newRedisConnPool(connectionString)
runContext.Lock()
runContext.Store = rs
runContext.Unlock()
})
return
}
func (rs *RedisStore) GetConn() redis.Conn {
conn := rs.connPool.Get()
if conn.Err() != nil {
utils.HandleError(conn.Err())
}
return conn
}
func (rs *RedisStore) Stop() {
rs.stopCastMu.Lock()
defer rs.stopCastMu.Unlock()
rs.stopCast.Signal()
}
func (rs *RedisStore) ProcName() string {
return "RedisStore"
}
func (rs *RedisStore) ShouldRun(context *types.RunContext) bool {
return true
}
func (rs *RedisStore) Get(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HGET", rs.typeKey(rs.runContext, v), id)
data, err := redis.String(resp, err)
if err != nil {
return wrapError(err)
}
err = utils.Unmarshall(data, v)
if err != nil {
return wrapError(err)
}
return nil
}
func (rs *RedisStore) Exists(v interface{}, id string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HEXISTS", rs.typeKey(rs.runContext, v), id)
exists, err := redis.Bool(resp, err)
if err != nil {
return false, wrapError(err)
}
return exists, nil
}
func (rs *RedisStore) GetAll(v interface{}, start int, end int) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HVALS", rs.typeKey(rs.runContext, v))
data, err := redis.Strings(resp, err)
if err != nil {
return wrapError(err)
}
if end == types.SentinelEnd {
end = len(data)
}
joined := utils.JsonListFromObjects(data[start:end]...)
err = utils.Unmarshall(joined, v)
if err != nil {
return err
}
return nil
}
func (rs *RedisStore) safeSet(v interface{}, id string, logTrim int, update bool) error {
conn := rs.GetConn()
defer conn.Close()
typeKey := rs.typeKey(rs.runContext, v)
data, err := utils.Marshall(v)
if err != nil {
return wrapError(err)
}
if id == "" {
return types.ErrIDEmpty
}
op := "HSET"
if !update {
op = "HSETNX"
}
wasSet, err := redis.Int(conn.Do(op, typeKey, id, data))
if err != nil {
return wrapError(err)
}
if wasSet == 1 || update {
meta, err := types.NewMeta(v)
utils.HandleError(err)
action := types.StoreActionUpdate
if !update {
action = types.StoreActionCreate
}
types.StoreEvent(action, meta).EmitAll(rs.runContext)
logKey := rs.typeKey(rs.runContext, v, "__log")
_, err = conn.Do("LPUSH", logKey, data)
if err != nil {
return wrapError(err)
}
if logTrim != types.SentinelEnd {
_, err = conn.Do("LTRIM", logKey, 0, logTrim)
if err != nil {
return wrapError(err)
}
}
return nil
} else {
return types.ErrIDExists
}
}
func (rs *RedisStore) Set(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, false)
}
func (rs *RedisStore) Update(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, true)
}
func (rs *RedisStore) Delete(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
exists, err := redis.Int(conn.Do("HDEL", rs.typeKey(rs.runContext, v), id))
if exists != 1 {
return types.ErrNoneFound
}
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDelete, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) DeleteAll(v interface{}) error {
conn := rs.GetConn()
defer conn.Close()
_, err := conn.Do("DEL", rs.typeKey(rs.runContext, v))
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDeleteAll, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) IsHealthy(sporeName string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
memberKey := rs.keyJoiner(rs.runContext, | newRedisConnPool | identifier_name |
redisstore.go | string |
func (rs RedisStore) typeKey(runContext *types.RunContext, v interface{}, parts ...string) string {
meta, err := types.NewMeta(v)
utils.HandleError(err)
parts = append([]string{meta.TypeName}, parts...)
return rs.keyJoiner(runContext, parts...)
}
func (rs *RedisStore) runLeaderElection() {
config := rs.runContext.Config
if config.MyType != types.TypeSporeWatcher {
leaderKey := rs.keyJoiner(rs.runContext, "_redis", "_leader")
conn := rs.GetConn()
defer conn.Close()
leaderChange, err := redis.String(conn.Do("SET", leaderKey, config.MyMachineID, "NX", "PX", LeadershipExpireMs))
if err != redis.ErrNil {
utils.HandleError(err)
}
if leaderChange == "OK" {
types.EventStoreLeaderChange.EmitAll(rs.runContext)
}
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
_, err = conn.Do("PEXPIRE", leaderKey, LeadershipExpireMs)
utils.HandleError(err)
}
}
}
func (rs *RedisStore) runPruning() {
spores := []types.Spore{}
err := rs.GetAll(&spores, 0, types.SentinelEnd)
utils.HandleError(err)
for _, spore := range spores {
healthy, err := rs.IsHealthy(spore.ID)
utils.HandleError(err)
if !healthy {
utils.LogWarn("Spore" + spore.ID + "looks dead, purning.")
err := rs.Delete(spore, spore.ID)
utils.HandleError(err)
types.EventStoreSporeExit.EmitAll(rs.runContext)
}
}
}
func (rs *RedisStore) runCheckIn() {
config := rs.runContext.Config
conn := rs.GetConn()
defer conn.Close()
//Todo protect for duped names
memberKey := rs.keyJoiner(rs.runContext, "_redis", "_member", config.MyMachineID)
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
rs.runContext.Lock()
config.MyType = types.TypeSporeLeader
rs.runContext.Config.MyType = types.TypeSporeLeader
rs.runContext.Unlock()
}
spore := types.Spore{ID: config.MyMachineID, MemberIP: config.MyIP.String(), MemberType: config.MyType}
err = rs.Set(spore, spore.ID, types.SentinelEnd)
if err == types.ErrIDExists {
err = rs.Update(spore, spore.ID, types.SentinelEnd)
utils.HandleError(err)
} else if err == nil {
types.EventStoreSporeAdded.EmitAll(rs.runContext)
} else {
utils.HandleError(err)
}
_, err = conn.Do("SET", memberKey, config.MyMachineID, "PX", CheckinExpireMs)
utils.HandleError(err)
}
func newRedisConnPool(server string) *redis.Pool {
return &redis.Pool{
MaxIdle: 10,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
}
func (rs *RedisStore) Run(context *types.RunContext) {
rs.mu.Lock()
rs.stopCast = utils.SignalCast{}
exit, _ := rs.stopCast.Listen()
rs.mu.Unlock()
rs.runLeaderElection()
rs.runCheckIn()
for {
select {
case <-time.After(time.Millisecond * CheckinEveryMs):
rs.runLeaderElection()
rs.runCheckIn()
rs.runPruning()
case <-exit:
return
}
}
}
func (rs *RedisStore) Init(runContext *types.RunContext) {
rs.initOnce.Do(func() {
rs.runContext = runContext
if runContext.Config.ConnectionString == "" {
utils.HandleError(types.ErrConnectionStringNotSet)
}
connectionString := strings.TrimPrefix(runContext.Config.ConnectionString, "redis://")
rs.connPool = newRedisConnPool(connectionString)
runContext.Lock()
runContext.Store = rs
runContext.Unlock()
})
return
}
func (rs *RedisStore) GetConn() redis.Conn {
conn := rs.connPool.Get()
if conn.Err() != nil {
utils.HandleError(conn.Err())
}
return conn
}
func (rs *RedisStore) Stop() {
rs.stopCastMu.Lock()
defer rs.stopCastMu.Unlock()
rs.stopCast.Signal()
}
func (rs *RedisStore) ProcName() string {
return "RedisStore"
}
func (rs *RedisStore) ShouldRun(context *types.RunContext) bool {
return true
}
func (rs *RedisStore) Get(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HGET", rs.typeKey(rs.runContext, v), id)
data, err := redis.String(resp, err)
if err != nil {
return wrapError(err)
}
err = utils.Unmarshall(data, v)
if err != nil {
return wrapError(err)
}
return nil
}
func (rs *RedisStore) Exists(v interface{}, id string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HEXISTS", rs.typeKey(rs.runContext, v), id)
exists, err := redis.Bool(resp, err)
if err != nil {
return false, wrapError(err)
}
return exists, nil
}
func (rs *RedisStore) GetAll(v interface{}, start int, end int) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HVALS", rs.typeKey(rs.runContext, v))
data, err := redis.Strings(resp, err)
if err != nil {
return wrapError(err)
}
if end == types.SentinelEnd {
end = len(data)
}
joined := utils.JsonListFromObjects(data[start:end]...)
err = utils.Unmarshall(joined, v)
if err != nil {
return err
}
return nil
}
func (rs *RedisStore) safeSet(v interface{}, id string, logTrim int, update bool) error {
conn := rs.GetConn()
defer conn.Close()
typeKey := rs.typeKey(rs.runContext, v)
data, err := utils.Marshall(v)
if err != nil {
return wrapError(err)
}
if id == "" {
return types.ErrIDEmpty
}
op := "HSET"
if !update {
op = "HSETNX"
}
wasSet, err := redis.Int(conn.Do(op, typeKey, id, data))
if err != nil {
return wrapError(err)
}
if wasSet == 1 || update {
meta, err := types.NewMeta(v)
utils.HandleError(err)
action := types.StoreActionUpdate
if !update {
action = types.StoreActionCreate
}
types.StoreEvent(action, meta).EmitAll(rs.runContext)
logKey := rs.typeKey(rs.runContext, v, "__log")
_, err = conn.Do("LPUSH", logKey, data)
if err != nil {
return wrapError(err)
}
if logTrim != types.SentinelEnd {
_, err = conn.Do("LTRIM", logKey, 0, logTrim)
if err != nil {
return wrapError(err)
}
}
return nil
} else {
return types.ErrIDExists
}
}
func (rs *RedisStore) Set(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, false)
}
func (rs *RedisStore) Update(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, true)
}
func (rs *RedisStore) Delete(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
exists, err := redis.Int(conn.Do("HDEL", rs.typeKey(rs.runContext, v), id))
if exists != 1 {
return types.ErrNoneFound
}
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDelete, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) DeleteAll(v interface{}) error {
conn := rs.GetConn()
defer conn.Close()
_, err := conn.Do("DEL", rs.typeKey(rs.runContext, v))
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDeleteAll, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) IsHealthy(sporeName string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
memberKey := rs.keyJoiner(rs.runContext | {
items := append(runContext.NamespacePrefixParts(), parts...)
return strings.Join(items, ":")
} | identifier_body |
redisstore.go | string {
items := append(runContext.NamespacePrefixParts(), parts...)
return strings.Join(items, ":")
}
func (rs RedisStore) typeKey(runContext *types.RunContext, v interface{}, parts ...string) string {
meta, err := types.NewMeta(v)
utils.HandleError(err)
parts = append([]string{meta.TypeName}, parts...)
return rs.keyJoiner(runContext, parts...)
}
func (rs *RedisStore) runLeaderElection() {
config := rs.runContext.Config
if config.MyType != types.TypeSporeWatcher {
leaderKey := rs.keyJoiner(rs.runContext, "_redis", "_leader")
conn := rs.GetConn()
defer conn.Close()
leaderChange, err := redis.String(conn.Do("SET", leaderKey, config.MyMachineID, "NX", "PX", LeadershipExpireMs))
if err != redis.ErrNil {
utils.HandleError(err)
}
if leaderChange == "OK" {
types.EventStoreLeaderChange.EmitAll(rs.runContext)
}
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
_, err = conn.Do("PEXPIRE", leaderKey, LeadershipExpireMs)
utils.HandleError(err)
}
}
}
func (rs *RedisStore) runPruning() {
spores := []types.Spore{}
err := rs.GetAll(&spores, 0, types.SentinelEnd)
utils.HandleError(err)
for _, spore := range spores {
healthy, err := rs.IsHealthy(spore.ID)
utils.HandleError(err)
if !healthy {
utils.LogWarn("Spore" + spore.ID + "looks dead, purning.")
err := rs.Delete(spore, spore.ID)
utils.HandleError(err)
types.EventStoreSporeExit.EmitAll(rs.runContext)
}
}
}
func (rs *RedisStore) runCheckIn() {
config := rs.runContext.Config
conn := rs.GetConn()
defer conn.Close()
//Todo protect for duped names
memberKey := rs.keyJoiner(rs.runContext, "_redis", "_member", config.MyMachineID)
leader, err := rs.LeaderName()
utils.HandleError(err)
if leader == config.MyMachineID {
rs.runContext.Lock()
config.MyType = types.TypeSporeLeader
rs.runContext.Config.MyType = types.TypeSporeLeader
rs.runContext.Unlock()
}
spore := types.Spore{ID: config.MyMachineID, MemberIP: config.MyIP.String(), MemberType: config.MyType}
err = rs.Set(spore, spore.ID, types.SentinelEnd)
if err == types.ErrIDExists {
err = rs.Update(spore, spore.ID, types.SentinelEnd)
utils.HandleError(err)
} else if err == nil {
types.EventStoreSporeAdded.EmitAll(rs.runContext)
} else {
utils.HandleError(err)
}
_, err = conn.Do("SET", memberKey, config.MyMachineID, "PX", CheckinExpireMs)
utils.HandleError(err)
}
func newRedisConnPool(server string) *redis.Pool {
return &redis.Pool{
MaxIdle: 10,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
return c, err
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
}
}
func (rs *RedisStore) Run(context *types.RunContext) {
rs.mu.Lock()
rs.stopCast = utils.SignalCast{}
exit, _ := rs.stopCast.Listen()
rs.mu.Unlock()
rs.runLeaderElection()
rs.runCheckIn()
for {
select {
case <-time.After(time.Millisecond * CheckinEveryMs):
rs.runLeaderElection()
rs.runCheckIn()
rs.runPruning()
case <-exit:
return
}
}
}
func (rs *RedisStore) Init(runContext *types.RunContext) {
rs.initOnce.Do(func() {
rs.runContext = runContext
if runContext.Config.ConnectionString == "" {
utils.HandleError(types.ErrConnectionStringNotSet)
}
connectionString := strings.TrimPrefix(runContext.Config.ConnectionString, "redis://")
rs.connPool = newRedisConnPool(connectionString)
runContext.Lock()
runContext.Store = rs
runContext.Unlock()
})
return
}
func (rs *RedisStore) GetConn() redis.Conn {
conn := rs.connPool.Get()
if conn.Err() != nil {
utils.HandleError(conn.Err())
}
return conn
}
func (rs *RedisStore) Stop() {
rs.stopCastMu.Lock()
defer rs.stopCastMu.Unlock()
rs.stopCast.Signal()
}
func (rs *RedisStore) ProcName() string {
return "RedisStore"
}
func (rs *RedisStore) ShouldRun(context *types.RunContext) bool {
return true
}
func (rs *RedisStore) Get(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HGET", rs.typeKey(rs.runContext, v), id)
data, err := redis.String(resp, err)
if err != nil {
return wrapError(err)
}
err = utils.Unmarshall(data, v)
if err != nil {
return wrapError(err)
}
return nil
}
func (rs *RedisStore) Exists(v interface{}, id string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HEXISTS", rs.typeKey(rs.runContext, v), id)
exists, err := redis.Bool(resp, err)
if err != nil {
return false, wrapError(err)
}
return exists, nil
}
func (rs *RedisStore) GetAll(v interface{}, start int, end int) error {
conn := rs.GetConn()
defer conn.Close()
resp, err := conn.Do("HVALS", rs.typeKey(rs.runContext, v))
data, err := redis.Strings(resp, err)
if err != nil {
return wrapError(err)
}
if end == types.SentinelEnd {
end = len(data)
}
joined := utils.JsonListFromObjects(data[start:end]...)
err = utils.Unmarshall(joined, v)
if err != nil {
return err
}
return nil
}
func (rs *RedisStore) safeSet(v interface{}, id string, logTrim int, update bool) error {
conn := rs.GetConn()
defer conn.Close()
typeKey := rs.typeKey(rs.runContext, v)
data, err := utils.Marshall(v)
if err != nil {
return wrapError(err)
}
if id == "" {
return types.ErrIDEmpty
}
op := "HSET"
if !update |
wasSet, err := redis.Int(conn.Do(op, typeKey, id, data))
if err != nil {
return wrapError(err)
}
if wasSet == 1 || update {
meta, err := types.NewMeta(v)
utils.HandleError(err)
action := types.StoreActionUpdate
if !update {
action = types.StoreActionCreate
}
types.StoreEvent(action, meta).EmitAll(rs.runContext)
logKey := rs.typeKey(rs.runContext, v, "__log")
_, err = conn.Do("LPUSH", logKey, data)
if err != nil {
return wrapError(err)
}
if logTrim != types.SentinelEnd {
_, err = conn.Do("LTRIM", logKey, 0, logTrim)
if err != nil {
return wrapError(err)
}
}
return nil
} else {
return types.ErrIDExists
}
}
func (rs *RedisStore) Set(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, false)
}
func (rs *RedisStore) Update(v interface{}, id string, logTrim int) error {
return rs.safeSet(v, id, logTrim, true)
}
func (rs *RedisStore) Delete(v interface{}, id string) error {
conn := rs.GetConn()
defer conn.Close()
exists, err := redis.Int(conn.Do("HDEL", rs.typeKey(rs.runContext, v), id))
if exists != 1 {
return types.ErrNoneFound
}
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDelete, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) DeleteAll(v interface{}) error {
conn := rs.GetConn()
defer conn.Close()
_, err := conn.Do("DEL", rs.typeKey(rs.runContext, v))
if err != nil {
return wrapError(err)
}
meta, err := types.NewMeta(v)
utils.HandleError(err)
types.StoreEvent(types.StoreActionDeleteAll, meta).EmitAll(rs.runContext)
return nil
}
func (rs *RedisStore) IsHealthy(sporeName string) (bool, error) {
conn := rs.GetConn()
defer conn.Close()
memberKey := rs.keyJoiner(rs.runContext | {
op = "HSETNX"
} | conditional_block |
InteractiveFormsTest.go | Page.AnnotPushBack(signature1)
// CheckBox Widget Creation
// Create a check box widget that is not checked.
check1 := CheckBoxWidgetCreate(doc, NewRect(140.0, 490.0, 170.0, 520.0))
check1.RefreshAppearance()
blankPage.AnnotPushBack(check1)
// Create a check box widget that is checked.
check2 := CheckBoxWidgetCreate(doc, NewRect(190.0, 490.0, 250.0, 540.0), "employee.name.check1")
check2.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
check2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Check the widget (by default it is unchecked).
check2.SetChecked(true)
check2.RefreshAppearance()
blankPage.AnnotPushBack(check2)
// PushButton Widget Creation
pushbutton1 := PushButtonWidgetCreate(doc, NewRect(380.0, 490.0, 520.0, 540.0))
pushbutton1.SetTextColor(NewColorPt(1.0, 1.0, 1.0), 3)
pushbutton1.SetFontSize(36)
pushbutton1.SetBackgroundColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Add a caption for the pushbutton.
pushbutton1.SetStaticCaptionText("PushButton")
pushbutton1.RefreshAppearance()
blankPage.AnnotPushBack(pushbutton1)
// ComboBox Widget Creation
combo1 := ComboBoxWidgetCreate(doc, NewRect(280.0, 560.0, 580.0, 610.0));
// Add options to the combobox widget.
combo1.AddOption("Combo Box No.1")
combo1.AddOption("Combo Box No.2")
combo1.AddOption("Combo Box No.3")
// Make one of the options in the combo box selected by default.
combo1.SetSelectedOption("Combo Box No.2")
combo1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
combo1.SetFontSize(28)
combo1.RefreshAppearance()
blankPage.AnnotPushBack(combo1)
// ListBox Widget Creation
list1 := ListBoxWidgetCreate(doc, NewRect(400.0, 620.0, 580.0, 730.0))
// Add one option to the listbox widget.
list1.AddOption("List Box No.1")
// Add multiple options to the listbox widget in a batch.
listOptions := NewVectorString()
listOptions.Add("List Box No.2")
listOptions.Add("List Box No.3")
list1.AddOptions(listOptions)
// Select some of the options in list box as default options
list1.SetSelectedOptions(listOptions)
// Enable list box to have multi-select when editing.
list1.GetField().SetFlag(FieldE_multiselect, true)
list1.SetFont(FontCreate(doc.GetSDFDoc(),FontE_times_italic))
list1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
list1.SetFontSize(28)
list1.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
list1.RefreshAppearance()
blankPage.AnnotPushBack(list1)
// RadioButton Widget Creation
// Create a radio button group and Add three radio buttons in it.
radioGroup := RadioButtonGroupCreate(doc, "RadioGroup")
radiobutton1 := radioGroup.Add(NewRect(140.0, 410.0, 190.0, 460.0))
radiobutton1.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3)
radiobutton1.RefreshAppearance()
radiobutton2 := radioGroup.Add(NewRect(310.0, 410.0, 360.0, 460.0))
radiobutton2.SetBackgroundColor(NewColorPt(0.0, 1.0, 0.0), 3)
radiobutton2.RefreshAppearance()
radiobutton3 := radioGroup.Add(NewRect(480.0, 410.0, 530.0, 460.0))
// Enable the third radio button. By default the first one is selected
radiobutton3.EnableButton()
radiobutton3.SetBackgroundColor(NewColorPt(0.0, 1.0, 1.0), 3)
radiobutton3.RefreshAppearance()
radioGroup.AddGroupButtonsToPage(blankPage)
// Custom push button annotation creation
customPushbutton1 := PushButtonWidgetCreate(doc, NewRect(260.0, 320.0, 360.0, 360.0))
// Set the annotation appearance.
customPushbutton1.SetAppearance(CreateCustomButtonAppearance(doc, false), AnnotE_normal)
// Create 'SubmitForm' action. The action will be linked to the button.
url := FileSpecCreateURL(doc.GetSDFDoc(), "http://www.pdftron.com")
buttonAction := ActionCreateSubmitForm(url)
// Associate the above action with 'Down' event in annotations action dictionary.
annotAction := customPushbutton1.GetSDFObj().PutDict("AA")
annotAction.Put("D", buttonAction.GetSDFObj())
blankPage.AnnotPushBack(customPushbutton1)
// Add the page as the last page in the document.
doc.PagePushBack(blankPage)
// If you are not satisfied with the look of default auto-generated appearance
// streams you can delete "AP" entry from the Widget annotation and set
// "NeedAppearances" flag in AcroForm dictionary:
// doc.GetAcroForm().PutBool("NeedAppearances", true);
// This will force the viewer application to auto-generate new appearance streams
// every time the document is opened.
//
// Alternatively you can generate custom annotation appearance using ElementWriter
// and then set the "AP" entry in the widget dictionary to the new appearance
// stream.
//
// Yet another option is to pre-populate field entries with dummy text. When
// you edit the field values using PDFNet the new field appearances will match
// the old ones.
//doc.GetAcroForm().PutBool("NeedAppearances", true)
doc.RefreshFieldAppearances()
doc.Save(outputPath + "forms_test1.pdf", uint(0))
doc.Close()
fmt.Println("Done.")
//----------------------------------------------------------------------------------
// Example 2:
// Fill-in forms / Modify values of existing fields.
// Traverse all form fields in the document (and sys.stdout.write(out their names).
// Search for specific fields in the document.
//----------------------------------------------------------------------------------
doc = NewPDFDoc(outputPath + "forms_test1.pdf")
doc.InitSecurityHandler()
itr := doc.GetFieldIterator()
for itr.HasNext(){
curFieldName := itr.Current().GetName()
// Add one to the count for this field name for later processing
if val, found := fieldNames[curFieldName]; found{
fieldNames[curFieldName] = val + 1
}else{
fieldNames[curFieldName] = 1
}
fmt.Println("Field name: " + itr.Current().GetName())
fmt.Println("Field partial name: " + itr.Current().GetPartialName())
os.Stdout.Write([]byte("Field type: "))
fieldType := itr.Current().GetType()
strVal := itr.Current().GetValueAsString()
if (fieldType == FieldE_button){
os.Stdout.Write([]byte("Button\n"))
}else if (fieldType == FieldE_radio){
os.Stdout.Write([]byte("Radio button: Value = " + strVal + "\n"))
}else if (fieldType == FieldE_check){
itr.Current().SetValue(true)
os.Stdout.Write([]byte("Check box: Value = " + strVal + "\n"))
}else if (fieldType == FieldE_text){
os.Stdout.Write([]byte("Text" + "\n"))
// Edit all variable text in the document
if itr.Current().GetValue().GetMp_obj().Swigcptr() != 0 {
old_value := itr.Current().GetValueAsString();
itr.Current().SetValue("This is a new value. The old one was: " + old_value)
}
}else if (fieldType == FieldE_choice){
os.Stdout.Write([]byte("Choice" + "\n"))
}else if (fieldType == FieldE_signature){
os.Stdout.Write([]byte("Signature" + "\n"))
}
fmt.Println("------------------------------")
itr.Next()
}
// Search for a specific field
f := doc.GetField("employee.name.first")
if f.GetMp_field().Swigcptr() != 0 | {
fmt.Println("Field search for " + f.GetName() + " was successful")
} | conditional_block | |
InteractiveFormsTest.go | ())
stm := writer.End()
// Set the bounding box
stm.PutRect("BBox", 0.0, 0.0, 101.0, 37.0)
stm.PutName("Subtype","Form")
return stm
}
func | (){
PDFNetInitialize()
// The map (vector) used to store the name and count of all fields.
// This is used later on to clone the fields
fieldNames:= make(map[string]int)
//----------------------------------------------------------------------------------
// Example 1: Programatically create new Form Fields and Widget Annotations.
//----------------------------------------------------------------------------------
doc := NewPDFDoc()
// Create a blank new page and Add some form fields.
blankPage := doc.PageCreate()
// Text Widget Creation
// Create an empty text widget with black text.
text1 := TextWidgetCreate(doc, NewRect(110.0, 700.0, 380.0, 730.0))
text1.SetText("Basic Text Field")
text1.RefreshAppearance()
blankPage.AnnotPushBack(text1)
// Create a vertical text widget with blue text and a yellow background.
text2 := TextWidgetCreate(doc, NewRect(50.0, 400.0, 90.0, 730.0))
text2.SetRotation(90)
// Set the text content.
text2.SetText(" ****Lucky Stars!****");
// Set the font type, text color, font size, border color and background color.
text2.SetFont(FontCreate(doc.GetSDFDoc(), FontE_helvetica_oblique))
text2.SetFontSize(28)
text2.SetTextColor(NewColorPt(0.0, 0.0, 1.0), 3)
text2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
text2.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3)
text2.RefreshAppearance()
// Add the annotation to the page.
blankPage.AnnotPushBack(text2)
// Create two new text widget with Field names employee.name.first and employee.name.last
// This logic shows how these widgets can be created using either a field name string or
// a Field object
text3 := TextWidgetCreate(doc, NewRect(110.0, 660.0, 380.0, 690.0), "employee.name.first")
text3.SetText("Levi")
text3.SetFont(FontCreate(doc.GetSDFDoc(), FontE_times_bold))
text3.RefreshAppearance()
blankPage.AnnotPushBack(text3)
empLastName := doc.FieldCreate("employee.name.last", FieldE_text, "Ackerman")
text4 := TextWidgetCreate(doc, NewRect(110.0, 620.0, 380.0, 650.0), empLastName)
text4.SetFont(FontCreate(doc.GetSDFDoc(), FontE_times_bold))
text4.RefreshAppearance()
blankPage.AnnotPushBack(text4)
// Signature Widget Creation (unsigned)
signature1 := SignatureWidgetCreate(doc, NewRect(110.0, 560.0, 260.0, 610.0))
signature1.RefreshAppearance()
blankPage.AnnotPushBack(signature1)
// CheckBox Widget Creation
// Create a check box widget that is not checked.
check1 := CheckBoxWidgetCreate(doc, NewRect(140.0, 490.0, 170.0, 520.0))
check1.RefreshAppearance()
blankPage.AnnotPushBack(check1)
// Create a check box widget that is checked.
check2 := CheckBoxWidgetCreate(doc, NewRect(190.0, 490.0, 250.0, 540.0), "employee.name.check1")
check2.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
check2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Check the widget (by default it is unchecked).
check2.SetChecked(true)
check2.RefreshAppearance()
blankPage.AnnotPushBack(check2)
// PushButton Widget Creation
pushbutton1 := PushButtonWidgetCreate(doc, NewRect(380.0, 490.0, 520.0, 540.0))
pushbutton1.SetTextColor(NewColorPt(1.0, 1.0, 1.0), 3)
pushbutton1.SetFontSize(36)
pushbutton1.SetBackgroundColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Add a caption for the pushbutton.
pushbutton1.SetStaticCaptionText("PushButton")
pushbutton1.RefreshAppearance()
blankPage.AnnotPushBack(pushbutton1)
// ComboBox Widget Creation
combo1 := ComboBoxWidgetCreate(doc, NewRect(280.0, 560.0, 580.0, 610.0));
// Add options to the combobox widget.
combo1.AddOption("Combo Box No.1")
combo1.AddOption("Combo Box No.2")
combo1.AddOption("Combo Box No.3")
// Make one of the options in the combo box selected by default.
combo1.SetSelectedOption("Combo Box No.2")
combo1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
combo1.SetFontSize(28)
combo1.RefreshAppearance()
blankPage.AnnotPushBack(combo1)
// ListBox Widget Creation
list1 := ListBoxWidgetCreate(doc, NewRect(400.0, 620.0, 580.0, 730.0))
// Add one option to the listbox widget.
list1.AddOption("List Box No.1")
// Add multiple options to the listbox widget in a batch.
listOptions := NewVectorString()
listOptions.Add("List Box No.2")
listOptions.Add("List Box No.3")
list1.AddOptions(listOptions)
// Select some of the options in list box as default options
list1.SetSelectedOptions(listOptions)
// Enable list box to have multi-select when editing.
list1.GetField().SetFlag(FieldE_multiselect, true)
list1.SetFont(FontCreate(doc.GetSDFDoc(),FontE_times_italic))
list1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
list1.SetFontSize(28)
list1.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
list1.RefreshAppearance()
blankPage.AnnotPushBack(list1)
// RadioButton Widget Creation
// Create a radio button group and Add three radio buttons in it.
radioGroup := RadioButtonGroupCreate(doc, "RadioGroup")
radiobutton1 := radioGroup.Add(NewRect(140.0, 410.0, 190.0, 460.0))
radiobutton1.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3)
radiobutton1.RefreshAppearance()
radiobutton2 := radioGroup.Add(NewRect(310.0, 410.0, 360.0, 460.0))
radiobutton2.SetBackgroundColor(NewColorPt(0.0, 1.0, 0.0), 3)
radiobutton2.RefreshAppearance()
radiobutton3 := radioGroup.Add(NewRect(480.0, 410.0, 530.0, 460.0))
// Enable the third radio button. By default the first one is selected
radiobutton3.EnableButton()
radiobutton3.SetBackgroundColor(NewColorPt(0.0, 1.0, 1.0), 3)
radiobutton3.RefreshAppearance()
radioGroup.AddGroupButtonsToPage(blankPage)
// Custom push button annotation creation
customPushbutton1 := PushButtonWidgetCreate(doc, NewRect(260.0, 320.0, 360.0, 360.0))
// Set the annotation appearance.
customPushbutton1.SetAppearance(CreateCustomButtonAppearance(doc, false), AnnotE_normal)
// Create 'SubmitForm' action. The action will be linked to the button.
url := FileSpecCreateURL(doc.GetSDFDoc(), "http://www.pdftron.com")
buttonAction := ActionCreateSubmitForm(url)
// Associate the above action with 'Down' event in annotations action dictionary.
annotAction := customPushbutton1.GetSDFObj().PutDict("AA")
annotAction.Put("D", buttonAction.GetSDFObj())
blankPage.AnnotPushBack(customPushbutton1)
// Add the page as the last | main | identifier_name |
InteractiveFormsTest.go | ())
stm := writer.End()
// Set the bounding box
stm.PutRect("BBox", 0.0, 0.0, 101.0, 37.0)
stm.PutName("Subtype","Form")
return stm
}
func main(){
PDFNetInitialize()
// The map (vector) used to store the name and count of all fields.
// This is used later on to clone the fields
fieldNames:= make(map[string]int)
//----------------------------------------------------------------------------------
// Example 1: Programatically create new Form Fields and Widget Annotations.
//----------------------------------------------------------------------------------
doc := NewPDFDoc()
// Create a blank new page and Add some form fields.
blankPage := doc.PageCreate()
// Text Widget Creation
// Create an empty text widget with black text.
text1 := TextWidgetCreate(doc, NewRect(110.0, 700.0, 380.0, 730.0))
text1.SetText("Basic Text Field")
text1.RefreshAppearance()
blankPage.AnnotPushBack(text1)
// Create a vertical text widget with blue text and a yellow background.
text2 := TextWidgetCreate(doc, NewRect(50.0, 400.0, 90.0, 730.0))
text2.SetRotation(90)
// Set the text content.
text2.SetText(" ****Lucky Stars!****");
// Set the font type, text color, font size, border color and background color.
text2.SetFont(FontCreate(doc.GetSDFDoc(), FontE_helvetica_oblique))
text2.SetFontSize(28)
text2.SetTextColor(NewColorPt(0.0, 0.0, 1.0), 3)
text2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
text2.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3)
text2.RefreshAppearance()
// Add the annotation to the page.
blankPage.AnnotPushBack(text2)
// Create two new text widget with Field names employee.name.first and employee.name.last
// This logic shows how these widgets can be created using either a field name string or
// a Field object
text3 := TextWidgetCreate(doc, NewRect(110.0, 660.0, 380.0, 690.0), "employee.name.first")
text3.SetText("Levi")
text3.SetFont(FontCreate(doc.GetSDFDoc(), FontE_times_bold))
text3.RefreshAppearance()
blankPage.AnnotPushBack(text3)
empLastName := doc.FieldCreate("employee.name.last", FieldE_text, "Ackerman")
text4 := TextWidgetCreate(doc, NewRect(110.0, 620.0, 380.0, 650.0), empLastName)
text4.SetFont(FontCreate(doc.GetSDFDoc(), FontE_times_bold))
text4.RefreshAppearance()
blankPage.AnnotPushBack(text4)
// Signature Widget Creation (unsigned)
signature1 := SignatureWidgetCreate(doc, NewRect(110.0, 560.0, 260.0, 610.0))
signature1.RefreshAppearance()
blankPage.AnnotPushBack(signature1)
// CheckBox Widget Creation
// Create a check box widget that is not checked.
check1 := CheckBoxWidgetCreate(doc, NewRect(140.0, 490.0, 170.0, 520.0))
check1.RefreshAppearance()
blankPage.AnnotPushBack(check1)
// Create a check box widget that is checked.
check2 := CheckBoxWidgetCreate(doc, NewRect(190.0, 490.0, 250.0, 540.0), "employee.name.check1")
check2.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
check2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Check the widget (by default it is unchecked).
check2.SetChecked(true)
check2.RefreshAppearance()
blankPage.AnnotPushBack(check2)
// PushButton Widget Creation
pushbutton1 := PushButtonWidgetCreate(doc, NewRect(380.0, 490.0, 520.0, 540.0))
pushbutton1.SetTextColor(NewColorPt(1.0, 1.0, 1.0), 3)
pushbutton1.SetFontSize(36)
pushbutton1.SetBackgroundColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Add a caption for the pushbutton.
pushbutton1.SetStaticCaptionText("PushButton")
pushbutton1.RefreshAppearance()
blankPage.AnnotPushBack(pushbutton1)
// ComboBox Widget Creation
combo1 := ComboBoxWidgetCreate(doc, NewRect(280.0, 560.0, 580.0, 610.0));
// Add options to the combobox widget.
combo1.AddOption("Combo Box No.1")
combo1.AddOption("Combo Box No.2")
combo1.AddOption("Combo Box No.3")
// Make one of the options in the combo box selected by default.
combo1.SetSelectedOption("Combo Box No.2")
combo1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
combo1.SetFontSize(28)
combo1.RefreshAppearance()
blankPage.AnnotPushBack(combo1)
// ListBox Widget Creation
list1 := ListBoxWidgetCreate(doc, NewRect(400.0, 620.0, 580.0, 730.0))
// Add one option to the listbox widget.
list1.AddOption("List Box No.1")
// Add multiple options to the listbox widget in a batch.
listOptions := NewVectorString()
listOptions.Add("List Box No.2")
listOptions.Add("List Box No.3")
list1.AddOptions(listOptions)
// Select some of the options in list box as default options
list1.SetSelectedOptions(listOptions)
// Enable list box to have multi-select when editing.
list1.GetField().SetFlag(FieldE_multiselect, true)
list1.SetFont(FontCreate(doc.GetSDFDoc(),FontE_times_italic))
list1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
list1.SetFontSize(28)
list1.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
list1.RefreshAppearance()
blankPage.AnnotPushBack(list1)
| radiobutton1.RefreshAppearance()
radiobutton2 := radioGroup.Add(NewRect(310.0, 410.0, 360.0, 460.0))
radiobutton2.SetBackgroundColor(NewColorPt(0.0, 1.0, 0.0), 3)
radiobutton2.RefreshAppearance()
radiobutton3 := radioGroup.Add(NewRect(480.0, 410.0, 530.0, 460.0))
// Enable the third radio button. By default the first one is selected
radiobutton3.EnableButton()
radiobutton3.SetBackgroundColor(NewColorPt(0.0, 1.0, 1.0), 3)
radiobutton3.RefreshAppearance()
radioGroup.AddGroupButtonsToPage(blankPage)
// Custom push button annotation creation
customPushbutton1 := PushButtonWidgetCreate(doc, NewRect(260.0, 320.0, 360.0, 360.0))
// Set the annotation appearance.
customPushbutton1.SetAppearance(CreateCustomButtonAppearance(doc, false), AnnotE_normal)
// Create 'SubmitForm' action. The action will be linked to the button.
url := FileSpecCreateURL(doc.GetSDFDoc(), "http://www.pdftron.com")
buttonAction := ActionCreateSubmitForm(url)
// Associate the above action with 'Down' event in annotations action dictionary.
annotAction := customPushbutton1.GetSDFObj().PutDict("AA")
annotAction.Put("D", buttonAction.GetSDFObj())
blankPage.AnnotPushBack(customPushbutton1)
// Add the page as the last page | // RadioButton Widget Creation
// Create a radio button group and Add three radio buttons in it.
radioGroup := RadioButtonGroupCreate(doc, "RadioGroup")
radiobutton1 := radioGroup.Add(NewRect(140.0, 410.0, 190.0, 460.0))
radiobutton1.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3) | random_line_split |
InteractiveFormsTest.go | if buttonDown{
element.SetTextMatrix(1.0, 0.0, 0.0, 1.0, 33.0, 10.0)
}else{
element.SetTextMatrix(1.0, 0.0, 0.0, 1.0, 30.0, 13.0)
}
writer.WriteElement(element)
writer.WritePlacedElement(build.CreateTextEnd())
stm := writer.End()
// Set the bounding box
stm.PutRect("BBox", 0.0, 0.0, 101.0, 37.0)
stm.PutName("Subtype","Form")
return stm
}
func main(){
PDFNetInitialize()
// The map (vector) used to store the name and count of all fields.
// This is used later on to clone the fields
fieldNames:= make(map[string]int)
//----------------------------------------------------------------------------------
// Example 1: Programatically create new Form Fields and Widget Annotations.
//----------------------------------------------------------------------------------
doc := NewPDFDoc()
// Create a blank new page and Add some form fields.
blankPage := doc.PageCreate()
// Text Widget Creation
// Create an empty text widget with black text.
text1 := TextWidgetCreate(doc, NewRect(110.0, 700.0, 380.0, 730.0))
text1.SetText("Basic Text Field")
text1.RefreshAppearance()
blankPage.AnnotPushBack(text1)
// Create a vertical text widget with blue text and a yellow background.
text2 := TextWidgetCreate(doc, NewRect(50.0, 400.0, 90.0, 730.0))
text2.SetRotation(90)
// Set the text content.
text2.SetText(" ****Lucky Stars!****");
// Set the font type, text color, font size, border color and background color.
text2.SetFont(FontCreate(doc.GetSDFDoc(), FontE_helvetica_oblique))
text2.SetFontSize(28)
text2.SetTextColor(NewColorPt(0.0, 0.0, 1.0), 3)
text2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
text2.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3)
text2.RefreshAppearance()
// Add the annotation to the page.
blankPage.AnnotPushBack(text2)
// Create two new text widget with Field names employee.name.first and employee.name.last
// This logic shows how these widgets can be created using either a field name string or
// a Field object
text3 := TextWidgetCreate(doc, NewRect(110.0, 660.0, 380.0, 690.0), "employee.name.first")
text3.SetText("Levi")
text3.SetFont(FontCreate(doc.GetSDFDoc(), FontE_times_bold))
text3.RefreshAppearance()
blankPage.AnnotPushBack(text3)
empLastName := doc.FieldCreate("employee.name.last", FieldE_text, "Ackerman")
text4 := TextWidgetCreate(doc, NewRect(110.0, 620.0, 380.0, 650.0), empLastName)
text4.SetFont(FontCreate(doc.GetSDFDoc(), FontE_times_bold))
text4.RefreshAppearance()
blankPage.AnnotPushBack(text4)
// Signature Widget Creation (unsigned)
signature1 := SignatureWidgetCreate(doc, NewRect(110.0, 560.0, 260.0, 610.0))
signature1.RefreshAppearance()
blankPage.AnnotPushBack(signature1)
// CheckBox Widget Creation
// Create a check box widget that is not checked.
check1 := CheckBoxWidgetCreate(doc, NewRect(140.0, 490.0, 170.0, 520.0))
check1.RefreshAppearance()
blankPage.AnnotPushBack(check1)
// Create a check box widget that is checked.
check2 := CheckBoxWidgetCreate(doc, NewRect(190.0, 490.0, 250.0, 540.0), "employee.name.check1")
check2.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
check2.SetBorderColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Check the widget (by default it is unchecked).
check2.SetChecked(true)
check2.RefreshAppearance()
blankPage.AnnotPushBack(check2)
// PushButton Widget Creation
pushbutton1 := PushButtonWidgetCreate(doc, NewRect(380.0, 490.0, 520.0, 540.0))
pushbutton1.SetTextColor(NewColorPt(1.0, 1.0, 1.0), 3)
pushbutton1.SetFontSize(36)
pushbutton1.SetBackgroundColor(NewColorPt(0.0, 0.0, 0.0), 3)
// Add a caption for the pushbutton.
pushbutton1.SetStaticCaptionText("PushButton")
pushbutton1.RefreshAppearance()
blankPage.AnnotPushBack(pushbutton1)
// ComboBox Widget Creation
combo1 := ComboBoxWidgetCreate(doc, NewRect(280.0, 560.0, 580.0, 610.0));
// Add options to the combobox widget.
combo1.AddOption("Combo Box No.1")
combo1.AddOption("Combo Box No.2")
combo1.AddOption("Combo Box No.3")
// Make one of the options in the combo box selected by default.
combo1.SetSelectedOption("Combo Box No.2")
combo1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
combo1.SetFontSize(28)
combo1.RefreshAppearance()
blankPage.AnnotPushBack(combo1)
// ListBox Widget Creation
list1 := ListBoxWidgetCreate(doc, NewRect(400.0, 620.0, 580.0, 730.0))
// Add one option to the listbox widget.
list1.AddOption("List Box No.1")
// Add multiple options to the listbox widget in a batch.
listOptions := NewVectorString()
listOptions.Add("List Box No.2")
listOptions.Add("List Box No.3")
list1.AddOptions(listOptions)
// Select some of the options in list box as default options
list1.SetSelectedOptions(listOptions)
// Enable list box to have multi-select when editing.
list1.GetField().SetFlag(FieldE_multiselect, true)
list1.SetFont(FontCreate(doc.GetSDFDoc(),FontE_times_italic))
list1.SetTextColor(NewColorPt(1.0, 0.0, 0.0), 3)
list1.SetFontSize(28)
list1.SetBackgroundColor(NewColorPt(1.0, 1.0, 1.0), 3)
list1.RefreshAppearance()
blankPage.AnnotPushBack(list1)
// RadioButton Widget Creation
// Create a radio button group and Add three radio buttons in it.
radioGroup := RadioButtonGroupCreate(doc, "RadioGroup")
radiobutton1 := radioGroup.Add(NewRect(140.0, 410.0, 190.0, 460.0))
radiobutton1.SetBackgroundColor(NewColorPt(1.0, 1.0, 0.0), 3)
radiobutton1.RefreshAppearance()
radiobutton2 := radioGroup.Add(NewRect(310.0, 410.0, 360.0, 460.0))
radiobutton2.SetBackgroundColor(NewColorPt(0.0, 1.0, 0.0), | {
// Create a button appearance stream ------------------------------------
build := NewElementBuilder()
writer := NewElementWriter()
writer.Begin(doc.GetSDFDoc())
// Draw background
element := build.CreateRect(0.0, 0.0, 101.0, 37.0)
element.SetPathFill(true)
element.SetPathStroke(false)
element.GetGState().SetFillColorSpace(ColorSpaceCreateDeviceGray())
element.GetGState().SetFillColor(NewColorPt(0.75, 0.0, 0.0))
writer.WriteElement(element)
// Draw 'Submit' text
writer.WriteElement(build.CreateTextBegin())
text := "Submit"
element = build.CreateTextRun(text, FontCreate(doc.GetSDFDoc(), FontE_helvetica_bold), 12.0)
element.GetGState().SetFillColor(NewColorPt(0.0, 0.0, 0.0))
| identifier_body | |
agollo.go | ReleaseKey(cachedReleaseKey.(string)),
)
if err != nil {
return true
}
if status == http.StatusOK {
oldValue := a.getNamespace(namespaceStr)
a.cache.Store(namespace, config.Configurations)
a.releaseKeyMap.Store(namespace, config.ReleaseKey)
if err = a.backup(namespaceStr, config.Configurations); err != nil {
a.log("BackupFile", a.opts.BackupFile, "Namespace", namespace,
"Action", "Backup", "Error", err)
}
a.sendWatchCh(namespaceStr, oldValue, config.Configurations)
a.notificationMap.Store(namespaceStr, config.ReleaseKey)
}
return true
})
}
func (a *agollo) shouldStop() bool {
select {
case <-a.stopCh:
return true
default:
return false
}
}
func (a *agollo) longPoll() {
localNotifications := a.getLocalNotifications()
// 这里有个问题是非预加载的namespace,如果在Start开启监听后才被initNamespace
// 需要等待90秒后的下一次轮训才能收到事件通知
notifications, err := a.getRemoteNotifications(localNotifications)
if err != nil {
a.sendErrorsCh("", nil, "", err)
return
}
// HTTP Status: 200时,正常返回notifications数据,数组含有需要更新namespace和notificationID
// HTTP Status: 304时,上报的namespace没有更新的修改,返回notifications为空数组,遍历空数组跳过
for _, notification := range notifications {
// 读取旧缓存用来给监听队列
oldValue := a.getNamespace(notification.NamespaceName)
// 更新namespace
status, newValue, err := a.reloadNamespace(notification.NamespaceName)
if err == nil {
// Notifications 有更新,但是 GetConfigsFromNonCache 返回 304,
// 可能是请求恰好打在尚未同步的节点上,不更新 NotificationID,等待下次再更新
if status == http.StatusNotModified {
continue
}
if len(oldValue.Different(newValue)) == 0 {
// case 可能是apollo集群搭建问题
// GetConfigsFromNonCache 返回了了一模一样的数据,但是http.status code == 200
// 导致NotificationID更新了,但是真实的配置没有更新,而后续也不会获取到新配置,除非有新的变更触发
continue
}
// 发送到监听channel
a.sendWatchCh(notification.NamespaceName, oldValue, newValue)
// 仅在无异常的情况下更新NotificationID,
// 极端情况下,提前设置notificationID,reloadNamespace还未更新配置并将配置备份,
// 访问apollo失败导致notificationid已是最新,而配置不是最新
a.notificationMap.Store(notification.NamespaceName, notification.NotificationID)
} else {
a.sendErrorsCh("", notifications, notification.NamespaceName, err)
}
}
}
func (a *agollo) Stop() {
a.stopLock.Lock()
defer a.stopLock.Unlock()
if a.stop {
return
}
if a.opts.Balancer != nil {
a.opts.Balancer.Stop()
}
a.stop = true
close(a.stopCh)
}
func (a *agollo) Watch() <-chan *ApolloResponse {
if a.watchCh == nil {
a.watchCh = make(chan *ApolloResponse)
}
return a.watchCh
}
func (a *agollo) WatchNamespace(namespace string, stop chan bool) <-chan *ApolloResponse {
watchNamespace := fixWatchNamespace(namespace)
watchCh, exists := a.watchNamespaceChMap.LoadOrStore(watchNamespace, make(chan *ApolloResponse))
if !exists {
go func() {
// 非预加载以外的namespace,初始化基础meta信息,否则没有longpoll
err := a.initNamespace(namespace)
if err != nil {
watchCh.(chan *ApolloResponse) <- &ApolloResponse{
Namespace: namespace,
Error: err,
}
}
if stop != nil {
select {
case <-a.stopCh:
case <-stop:
}
a.watchNamespaceChMap.Delete(watchNamespace)
}
}()
}
return watchCh.(chan *ApolloResponse)
}
func fixWatchNamespace(namespace string) string {
// fix: 传给apollo类似test.properties这种namespace
// 通知回来的NamespaceName却没有.properties后缀,追加.properties后缀来修正此问题
ext := path.Ext(namespace)
if ext == "" {
namespace = namespace + "." + defaultConfigType
}
return namespace
}
func (a *agollo) sendWatchCh(namespace string, oldVal, newVal Configurations) {
changes := oldVal.Different(newVal)
if len(changes) == 0 {
return
}
resp := &ApolloResponse{
Namespace: namespace,
OldValue: oldVal,
NewValue: newVal,
Changes: changes,
}
timer := time.NewTimer(defaultWatchTimeout)
for _, watchCh := range a.getWatchChs(namespace) {
select {
case watchCh <- resp:
case <-timer.C: // 防止创建全局监听或者某个namespace监听却不消费死锁问题
timer.Reset(defaultWatchTimeout)
}
}
}
func (a *agollo) getWatchChs(namespace string) []chan *ApolloResponse {
var chs []chan *ApolloResponse
if a.watchCh != nil {
chs = append(chs, a.watchCh)
}
watchNamespace := fixWatchNamespace(namespace)
if watchNamespaceCh, found := a.watchNamespaceChMap.Load(watchNamespace); found {
chs = append(chs, watchNamespaceCh.(chan *ApolloResponse))
}
return chs
}
// sendErrorsCh 发送轮训时发生的错误信息channel,如果使用者不监听消费channel,错误会被丢弃
// 改成负载均衡机制后,不太好获取每个api使用的configServerURL有点蛋疼
func (a *agollo) sendErrorsCh(configServerURL string, notifications []Notification, namespace string, err error) {
longPollerError := &LongPollerError{
ConfigServerURL: configServerURL,
AppID: a.opts.AppID,
Cluster: a.opts.Cluster,
Notifications: notifications,
Namespace: namespace,
Err: err,
}
select {
case a.errorsCh <- longPollerError:
default:
}
}
func (a *agollo) log(kvs ...interface{}) {
a.opts.Logger.Log(
append([]interface{}{
"[Agollo]", "",
"AppID", a.opts.AppID,
"Cluster", a.opts.Cluster,
},
kvs...,
)...,
)
}
func (a *agollo) backup(namespace string, config Configurations) error {
backup, err := a.loadBackup()
if err != nil {
backup = map[string]Configurations{}
}
a.backupLock.Lock()
defer a.backupLock.Unlock()
backup[namespace] = config
data, err := json.Marshal(backup)
if err != nil {
return err
}
dir := filepath.Dir(a.opts.BackupFile)
if _, err = os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err
}
}
return ioutil.WriteFile(a.opts.BackupFile, data, 0644)
}
func (a *agollo) loadBackup() (map[string]Configurations, error) {
a.backupLock.RLock()
defer a.backupLock.RUnlock()
if _, err := os.Stat(a.opts.BackupFile); err != nil {
return nil, err
}
data, err := ioutil.ReadFile(a.opts.BackupFile)
if err != nil {
return nil, err
}
backup := map[string]Configurations{}
err = json.Unmarshal(data, &backup)
if err != nil {
return nil, err
}
return backup, nil
}
func (a *agollo) loadBackupByNamespace(namespace string) (Configurations, error) {
backup, err := a.loadBackup()
if err != nil {
return nil, err
}
return backup[namespace], nil
}
// getRemoteNotifications
// 立即返回的情况:
// 1. 请求中的namespace任意一个在apollo服务器中有更新的ID会立即返回结果
// 请求被hold 90秒的情况:
// 1. 请求的notificationID和apollo服务器中的ID相等
// 2. 请求的namespace都是在apollo中不存在的
func (a *agollo) getRemoteNotifications(req []Notification) ([]Notification, error) {
configServerURL, err := a.opts.Balancer.Select()
if err != nil {
a.log("ConfigServerUrl", configServerURL, "Error", err, "Action", "Balancer.Select")
return nil, err
}
status, notifications, err := a.opts.ApolloClient.Notifications(
configServerURL,
a.opts.AppID,
a.opts.Cluster,
req,
)
if err != nil {
a.log("ConfigServerUrl", configServerURL,
"Notifications", | req, "ServerResponseStatus", status,
"Error", err, "Action", "LongPoll")
return nil, | conditional_block | |
agollo.go | Namespace", namespace,
"Action", "Backup", "Error", err)
}
a.sendWatchCh(namespaceStr, oldValue, config.Configurations)
a.notificationMap.Store(namespaceStr, config.ReleaseKey)
}
return true
})
}
func (a *agollo) shouldStop() bool {
select {
case <-a.stopCh:
return true
default:
return false
}
}
func (a *agollo) longPoll() {
localNotifications := a.getLocalNotifications()
// 这里有个问题是非预加载的namespace,如果在Start开启监听后才被initNamespace
// 需要等待90秒后的下一次轮训才能收到事件通知
notifications, err := a.getRemoteNotifications(localNotifications)
if err != nil {
a.sendErrorsCh("", nil, "", err)
return
}
// HTTP Status: 200时,正常返回notifications数据,数组含有需要更新namespace和notificationID
// HTTP Status: 304时,上报的namespace没有更新的修改,返回notifications为空数组,遍历空数组跳过
for _, notification := range notifications {
// 读取旧缓存用来给监听队列
oldValue := a.getNamespace(notification.NamespaceName)
// 更新namespace
status, newValue, err := a.reloadNamespace(notification.NamespaceName)
if err == nil {
// Notifications 有更新,但是 GetConfigsFromNonCache 返回 304,
// 可能是请求恰好打在尚未同步的节点上,不更新 NotificationID,等待下次再更新
if status == http.StatusNotModified {
continue
}
if len(oldValue.Different(newValue)) == 0 {
// case 可能是apollo集群搭建问题
// GetConfigsFromNonCache 返回了了一模一样的数据,但是http.status code == 200
// 导致NotificationID更新了,但是真实的配置没有更新,而后续也不会获取到新配置,除非有新的变更触发
continue
}
// 发送到监听channel
a.sendWatchCh(notification.NamespaceName, oldValue, newValue)
// 仅在无异常的情况下更新NotificationID,
// 极端情况下,提前设置notificationID,reloadNamespace还未更新配置并将配置备份,
// 访问apollo失败导致notificationid已是最新,而配置不是最新
a.notificationMap.Store(notification.NamespaceName, notification.NotificationID)
} else {
a.sendErrorsCh("", notifications, notification.NamespaceName, err)
}
}
}
func (a *agollo) Stop() {
a.stopLock.Lock()
defer a.stopLock.Unlock()
if a.stop {
return
}
if a.opts.Balancer != nil {
a.opts.Balancer.Stop()
}
a.stop = true
close(a.stopCh)
}
func (a *agollo) Watch() <-chan *ApolloResponse {
if a.watchCh == nil {
a.watchCh = make(chan *ApolloResponse)
}
return a.watchCh
}
func (a *agollo) WatchNamespace(namespace string, stop chan bool) <-chan *ApolloResponse {
watchNamespace := fixWatchNamespace(namespace)
watchCh, exists := a.watchNamespaceChMap.LoadOrStore(watchNamespace, make(chan *ApolloResponse))
if !exists {
go func() {
// 非预加载以外的namespace,初始化基础meta信息,否则没有longpoll
err := a.initNamespace(namespace)
if err != nil {
watchCh.(chan *ApolloResponse) <- &ApolloResponse{
Namespace: namespace,
Error: err,
}
}
if stop != nil {
select {
case <-a.stopCh:
case <-stop:
}
a.watchNamespaceChMap.Delete(watchNamespace)
}
}()
}
return watchCh.(chan *ApolloResponse)
}
func fixWatchNamespace(namespace string) string {
// fix: 传给apollo类似test.properties这种namespace
// 通知回来的NamespaceName却没有.properties后缀,追加.properties后缀来修正此问题
ext := path.Ext(namespace)
if ext == "" {
namespace = namespace + "." + defaultConfigType
}
return namespace
}
func (a *agollo) sendWatchCh(namespace string, oldVal, newVal Configurations) {
changes := oldVal.Different(newVal)
if len(changes) == 0 {
return
}
resp := &ApolloResponse{
Namespace: namespace,
OldValue: oldVal,
NewValue: newVal,
Changes: changes,
}
timer := time.NewTimer(defaultWatchTimeout)
for _, watchCh := range a.getWatchChs(namespace) {
select {
case watchCh <- resp:
case <-timer.C: // 防止创建全局监听或者某个namespace监听却不消费死锁问题
timer.Reset(defaultWatchTimeout)
}
}
}
func (a *agollo) getWatchChs(namespace string) []chan *ApolloResponse {
var chs []chan *ApolloResponse
if a.watchCh != nil {
chs = append(chs, a.watchCh)
}
watchNamespace := fixWatchNamespace(namespace)
if watchNamespaceCh, found := a.watchNamespaceChMap.Load(watchNamespace); found {
chs = append(chs, watchNamespaceCh.(chan *ApolloResponse))
}
return chs
}
// sendErrorsCh 发送轮训时发生的错误信息channel,如果使用者不监听消费channel,错误会被丢弃
// 改成负载均衡机制后,不太好获取每个api使用的configServerURL有点蛋疼
func (a *agollo) sendErrorsCh(configServerURL string, notifications []Notification, namespace string, err error) {
longPollerError := &LongPollerError{
ConfigServerURL: configServerURL,
AppID: a.opts.AppID,
Cluster: a.opts.Cluster,
Notifications: notifications,
Namespace: namespace,
Err: err,
}
select {
case a.errorsCh <- longPollerError:
default:
}
}
func (a *agollo) log(kvs ...interface{}) {
a.opts.Logger.Log(
append([]interface{}{
"[Agollo]", "",
"AppID", a.opts.AppID,
"Cluster", a.opts.Cluster,
},
kvs...,
)...,
)
}
func (a *agollo) backup(namespace string, config Configurations) error {
backup, err := a.loadBackup()
if err != nil {
backup = map[string]Configurations{}
}
a.backupLock.Lock()
defer a.backupLock.Unlock()
backup[namespace] = config
data, err := json.Marshal(backup)
if err != nil {
return err
}
dir := filepath.Dir(a.opts.BackupFile)
if _, err = os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err
}
}
return ioutil.WriteFile(a.opts.BackupFile, data, 0644)
}
func (a *agollo) loadBackup() (map[string]Configurations, error) {
a.backupLock.RLock()
defer a.backupLock.RUnlock()
if _, err := os.Stat(a.opts.BackupFile); err != nil {
return nil, err
}
data, err := ioutil.ReadFile(a.opts.BackupFile)
if err != nil {
return nil, err
}
backup := map[string]Configurations{}
err = json.Unmarshal(data, &backup)
if err != nil {
return nil, err
}
return backup, nil
}
func (a *agollo) loadBackupByNamespace(namespace string) (Configurations, error) {
backup, err := a.loadBackup()
if err != nil {
return nil, err
}
return backup[namespace], nil
}
// getRemoteNotifications
// 立即返回的情况:
// 1. 请求中的namespace任意一个在apollo服务器中有更新的ID会立即返回结果
// 请求被hold 90秒的情况:
// 1. 请求的notificationID和apollo服务器中的ID相等
// 2. 请求的namespace都是在apollo中不存在的
func (a *agollo) getRemoteNotifications(req []Notification) ([]Notification, error) {
configServerURL, err := a.opts.Balancer.Select()
if err != nil {
a.log("ConfigServerUrl", configServerURL, "Error", err, "Action", "Balancer.Select")
return nil, err
}
status, notifications, err := a.opts.ApolloClient.Notifications(
configServerURL,
a.opts.AppID,
a.opts.Cluster,
req,
)
if err != nil {
a.log("ConfigServerUrl", configServerURL,
"Notifications", req, "ServerResponseStatus", status,
"Error", err, "Action", "LongPoll")
return nil, err
}
return notifications, nil
}
func (a *agollo) getLocalNotifications() []Notification {
var notifications []Notification
a.notificationMap.Range(func(key, val interface{}) bool {
k, _ := key.(string)
v, _ := val.(int)
notifications = append(notifications, Notification{
NamespaceName: k,
NotificationID: v,
})
return true
})
return notifications
} | random_line_split | ||
agollo.go | 取备份
if a.opts.FailTolerantOnBackupExists {
backupConfig, lerr := a.loadBackupByNamespace(namespace)
if lerr != nil {
a.log("BackupFile", a.opts.BackupFile, "Namespace", namespace,
"Action", "loadBackupByNamespace", "Error", lerr)
return
}
a.cache.Store(namespace, backupConfig)
conf = backupConfig
err = nil
return
}
}
return
}
func (a *agollo) Get(key string, opts ...GetOption) string {
getOpts := a.opts.newGetOptions(opts...)
val, found := a.GetNameSpace(getOpts.Namespace)[key]
if !found {
return getOpts.DefaultValue
}
v, _ := ToStringE(val)
return v
}
func (a *agollo) GetNameSpace(namespace string) Configurations {
config, found := a.cache.LoadOrStore(namespace, Configurations{})
if !found && a.opts.AutoFetchOnCacheMiss {
err := a.initNamespace(namespace)
if err != nil {
a.log("Action", "InitNamespace", "Error", err)
}
return a.getNamespace(namespace)
}
return config.(Configurations)
}
func (a *agollo) getNamespace(namespace string) Configurations {
v, ok := a.cache.Load(namespace)
if !ok {
return Configurations{}
}
return v.(Configurations)
}
func (a *agollo) Options() Options {
return a.opts
}
// 启动goroutine去轮训apollo通知接口
func (a *agollo) Start() <-chan *LongPollerError {
a.runOnce.Do(func() {
go func() {
timer := time.NewTimer(a.opts.LongPollerInterval)
defer timer.Stop()
for !a.shouldStop() {
select {
case <-timer.C:
a.longPoll()
timer.Reset(a.opts.LongPollerInterval)
case <-a.stopCh:
return
}
}
}()
})
if a.opts.EnableHeartBeat {
a.runHeartBeat.Do(func() {
go func() {
timer := time.NewTimer(a.opts.HeartBeatInterval)
defer timer.Stop()
for !a.shouldStop() {
select {
case <-timer.C:
a.heartBeat()
timer.Reset(a.opts.HeartBeatInterval)
case <-a.stopCh:
return
}
}
}()
})
}
return a.errorsCh
}
func (a *agollo) heartBeat() {
var configServerURL string
configServerURL, err := a.opts.Balancer.Select()
if err != nil {
a.log("Action", "BalancerSelect", "Error", err)
return
}
a.releaseKeyMap.Range(func(namespace, cachedReleaseKey interface{}) bool {
var config *Config
namespaceStr := namespace.(string)
status, config, err := a.opts.ApolloClient.GetConfigsFromNonCache(
configServerURL,
a.opts.AppID,
a.opts.Cluster,
namespaceStr,
ReleaseKey(cachedReleaseKey.(string)),
)
if err != nil {
return true
}
if status == http.StatusOK {
oldValue := a.getNamespace(namespaceStr)
a.cache.Store(namespace, config.Configurations)
a.releaseKeyMap.Store(namespace, config.ReleaseKey)
if err = a.backup(namespaceStr, config.Configurations); err != nil {
a.log("BackupFile", a.opts.BackupFile, "Namespace", namespace,
"Action", "Backup", "Error", err)
}
a.sendWatchCh(namespaceStr, oldValue, config.Configurations)
a.notificationMap.Store(namespaceStr, config.ReleaseKey)
}
return true
})
}
func (a *agollo) shouldStop() bool {
select {
case <-a.stopCh:
return true
default:
return false
}
}
func (a *agollo) longPoll() {
localNotifications := a.getLocalNotifications()
// 这里有个问题是非预加载的namespace,如果在Start开启监听后才被initNamespace
// 需要等待90秒后的下一次轮训才能收到事件通知
notifications, err := a.getRemoteNotifications(localNotifications)
if err != nil {
a.sendErrorsCh("", nil, "", err)
return
}
// HTTP Status: 200时,正常返回notifications数据,数组含有需要更新namespace和notificationID
// HTTP Status: 304时,上报的namespace没有更新的修改,返回notifications为空数组,遍历空数组跳过
for _, notification := range notifications {
// 读取旧缓存用来给监听队列
oldValue := a.getNamespace(notification.NamespaceName)
// 更新namespace
status, newValue, err := a.reloadNamespace(notification.NamespaceName)
if err == nil {
// Notifications 有更新,但是 GetConfigsFromNonCache 返回 304,
// 可能是请求恰好打在尚未同步的节点上,不更新 NotificationID,等待下次再更新
if status == http.StatusNotModified {
continue
}
if len(oldValue.Different(newValue)) == 0 {
// case 可能是apollo集群搭建问题
// GetConfigsFromNonCache 返回了了一模一样的数据,但是http.status code == 200
// 导致NotificationID更新了,但是真实的配置没有更新,而后续也不会获取到新配置,除非有新的变更触发
continue
}
// 发送到监听channel
a.sendWatchCh(notification.NamespaceName, oldValue, newValue)
// 仅在无异常的情况下更新NotificationID,
// 极端情况下,提前设置notificationID,reloadNamespace还未更新配置并将配置备份,
// 访问apollo失败导致notificationid已是最新,而配置不是最新
a.notificationMap.Store(notification.NamespaceName, notification.NotificationID)
} else {
a.sendErrorsCh("", notifications, notification.NamespaceName, err)
}
}
}
func (a *agollo) Stop() {
a.stopLock.Lock()
defer a.stopLock.Unlock()
if a.stop {
return
}
if a.opts.Balancer != nil {
a.opts.Balancer.Stop()
}
a.stop = true
close(a.stopCh)
}
func (a *agollo) Watch() <-chan *ApolloResponse {
if a.watchCh == nil {
a.watchCh = make(chan *ApolloResponse)
}
return a.watchCh
}
func (a *agollo) WatchNamespace(namespace string, stop chan bool) <-chan *ApolloResponse {
watchNamespace := fixWatchNamespace(namespace)
watchCh, exists := a.watchNamespaceChMap.LoadOrStore(watchNamespace, make(chan *ApolloResponse))
if !exists {
go func() {
// 非预加载以外的namespace,初始化基础meta信息,否则没有longpoll
err := a.initNamespace(namespace)
if err != nil {
watchCh.(chan *ApolloResponse) <- &ApolloResponse{
Namespace: namespace,
Error: err,
}
}
if stop != nil {
select {
case <-a.stopCh:
case <-stop:
}
a.watchNamespaceChMap.Delete(watchNamespace)
}
}()
}
return watchCh.(chan *ApolloResponse)
}
func fixWatchNamespace(namespace string) string {
// fix: 传给apollo类似test.properties这种namespace
// 通知回来的NamespaceName却没有.properties后缀,追加.properties后缀来修正此问题
ext := path.Ext(namespace)
if ext == "" {
namespace = namespace + "." + defaultConfigType
}
return namespace
}
func (a *agollo) sendWatchCh(namespace string, oldVal, newVal Configurations) {
changes := oldVal.Different(newVal)
if len(changes) == 0 {
return
}
resp := &ApolloResponse{
Namespace: namespace,
OldValue: oldVal,
NewValue: newVal,
Changes: changes,
}
timer := time.NewTimer(defaultWatchTimeout)
for _, watchCh := range a.getWatchChs(namespace) {
select {
case watchCh <- resp:
case <-timer.C: // 防止创建全局监听或者某个namespace监听却不消费死锁问题
timer.Reset(defaultWatchTimeout)
}
}
}
func (a *agollo) getWatchChs(namespace string) []chan *ApolloResponse {
var chs []chan *ApolloResponse
if a.watchCh != nil {
chs = append(chs, a.watchCh)
}
watchNamespace := fixWatchNamespace(namespace)
if watchNamespaceCh, found := a.watchNamespaceChMap.Load(watchNamespace); found {
chs = append(chs, watchNamespaceCh.(chan *ApolloResponse))
}
return chs
}
// sendErrorsCh 发送轮训时发生的错误信息channel,如果使用者不监听消费channel,错误会被丢弃
// 改成负载均衡机制后,不太好获取每个api使用的configServerU | RL有点蛋疼
func (a *agollo) sendErrorsCh(configServerURL string, notifications []Notification, namespace string, err error) {
longPollerError := &LongPollerError{
ConfigServerURL: configServerURL,
AppID: a.opts.AppID,
Cluster: a.opts.Cluster,
Notifications | identifier_body | |
agollo.go | 的修改,返回notifications为空数组,遍历空数组跳过
for _, notification := range notifications {
// 读取旧缓存用来给监听队列
oldValue := a.getNamespace(notification.NamespaceName)
// 更新namespace
status, newValue, err := a.reloadNamespace(notification.NamespaceName)
if err == nil {
// Notifications 有更新,但是 GetConfigsFromNonCache 返回 304,
// 可能是请求恰好打在尚未同步的节点上,不更新 NotificationID,等待下次再更新
if status == http.StatusNotModified {
continue
}
if len(oldValue.Different(newValue)) == 0 {
// case 可能是apollo集群搭建问题
// GetConfigsFromNonCache 返回了了一模一样的数据,但是http.status code == 200
// 导致NotificationID更新了,但是真实的配置没有更新,而后续也不会获取到新配置,除非有新的变更触发
continue
}
// 发送到监听channel
a.sendWatchCh(notification.NamespaceName, oldValue, newValue)
// 仅在无异常的情况下更新NotificationID,
// 极端情况下,提前设置notificationID,reloadNamespace还未更新配置并将配置备份,
// 访问apollo失败导致notificationid已是最新,而配置不是最新
a.notificationMap.Store(notification.NamespaceName, notification.NotificationID)
} else {
a.sendErrorsCh("", notifications, notification.NamespaceName, err)
}
}
}
func (a *agollo) Stop() {
a.stopLock.Lock()
defer a.stopLock.Unlock()
if a.stop {
return
}
if a.opts.Balancer != nil {
a.opts.Balancer.Stop()
}
a.stop = true
close(a.stopCh)
}
func (a *agollo) Watch() <-chan *ApolloResponse {
if a.watchCh == nil {
a.watchCh = make(chan *ApolloResponse)
}
return a.watchCh
}
func (a *agollo) WatchNamespace(namespace string, stop chan bool) <-chan *ApolloResponse {
watchNamespace := fixWatchNamespace(namespace)
watchCh, exists := a.watchNamespaceChMap.LoadOrStore(watchNamespace, make(chan *ApolloResponse))
if !exists {
go func() {
// 非预加载以外的namespace,初始化基础meta信息,否则没有longpoll
err := a.initNamespace(namespace)
if err != nil {
watchCh.(chan *ApolloResponse) <- &ApolloResponse{
Namespace: namespace,
Error: err,
}
}
if stop != nil {
select {
case <-a.stopCh:
case <-stop:
}
a.watchNamespaceChMap.Delete(watchNamespace)
}
}()
}
return watchCh.(chan *ApolloResponse)
}
func fixWatchNamespace(namespace string) string {
// fix: 传给apollo类似test.properties这种namespace
// 通知回来的NamespaceName却没有.properties后缀,追加.properties后缀来修正此问题
ext := path.Ext(namespace)
if ext == "" {
namespace = namespace + "." + defaultConfigType
}
return namespace
}
func (a *agollo) sendWatchCh(namespace string, oldVal, newVal Configurations) {
changes := oldVal.Different(newVal)
if len(changes) == 0 {
return
}
resp := &ApolloResponse{
Namespace: namespace,
OldValue: oldVal,
NewValue: newVal,
Changes: changes,
}
timer := time.NewTimer(defaultWatchTimeout)
for _, watchCh := range a.getWatchChs(namespace) {
select {
case watchCh <- resp:
case <-timer.C: // 防止创建全局监听或者某个namespace监听却不消费死锁问题
timer.Reset(defaultWatchTimeout)
}
}
}
func (a *agollo) getWatchChs(namespace string) []chan *ApolloResponse {
var chs []chan *ApolloResponse
if a.watchCh != nil {
chs = append(chs, a.watchCh)
}
watchNamespace := fixWatchNamespace(namespace)
if watchNamespaceCh, found := a.watchNamespaceChMap.Load(watchNamespace); found {
chs = append(chs, watchNamespaceCh.(chan *ApolloResponse))
}
return chs
}
// sendErrorsCh 发送轮训时发生的错误信息channel,如果使用者不监听消费channel,错误会被丢弃
// 改成负载均衡机制后,不太好获取每个api使用的configServerURL有点蛋疼
func (a *agollo) sendErrorsCh(configServerURL string, notifications []Notification, namespace string, err error) {
longPollerError := &LongPollerError{
ConfigServerURL: configServerURL,
AppID: a.opts.AppID,
Cluster: a.opts.Cluster,
Notifications: notifications,
Namespace: namespace,
Err: err,
}
select {
case a.errorsCh <- longPollerError:
default:
}
}
func (a *agollo) log(kvs ...interface{}) {
a.opts.Logger.Log(
append([]interface{}{
"[Agollo]", "",
"AppID", a.opts.AppID,
"Cluster", a.opts.Cluster,
},
kvs...,
)...,
)
}
func (a *agollo) backup(namespace string, config Configurations) error {
backup, err := a.loadBackup()
if err != nil {
backup = map[string]Configurations{}
}
a.backupLock.Lock()
defer a.backupLock.Unlock()
backup[namespace] = config
data, err := json.Marshal(backup)
if err != nil {
return err
}
dir := filepath.Dir(a.opts.BackupFile)
if _, err = os.Stat(dir); os.IsNotExist(err) {
err = os.MkdirAll(dir, 0755)
if err != nil && !os.IsExist(err) {
return err
}
}
return ioutil.WriteFile(a.opts.BackupFile, data, 0644)
}
func (a *agollo) loadBackup() (map[string]Configurations, error) {
a.backupLock.RLock()
defer a.backupLock.RUnlock()
if _, err := os.Stat(a.opts.BackupFile); err != nil {
return nil, err
}
data, err := ioutil.ReadFile(a.opts.BackupFile)
if err != nil {
return nil, err
}
backup := map[string]Configurations{}
err = json.Unmarshal(data, &backup)
if err != nil {
return nil, err
}
return backup, nil
}
func (a *agollo) loadBackupByNamespace(namespace string) (Configurations, error) {
backup, err := a.loadBackup()
if err != nil {
return nil, err
}
return backup[namespace], nil
}
// getRemoteNotifications
// 立即返回的情况:
// 1. 请求中的namespace任意一个在apollo服务器中有更新的ID会立即返回结果
// 请求被hold 90秒的情况:
// 1. 请求的notificationID和apollo服务器中的ID相等
// 2. 请求的namespace都是在apollo中不存在的
func (a *agollo) getRemoteNotifications(req []Notification) ([]Notification, error) {
configServerURL, err := a.opts.Balancer.Select()
if err != nil {
a.log("ConfigServerUrl", configServerURL, "Error", err, "Action", "Balancer.Select")
return nil, err
}
status, notifications, err := a.opts.ApolloClient.Notifications(
configServerURL,
a.opts.AppID,
a.opts.Cluster,
req,
)
if err != nil {
a.log("ConfigServerUrl", configServerURL,
"Notifications", req, "ServerResponseStatus", status,
"Error", err, "Action", "LongPoll")
return nil, err
}
return notifications, nil
}
func (a *agollo) getLocalNotifications() []Notification {
var notifications []Notification
a.notificationMap.Range(func(key, val interface{}) bool {
k, _ := key.(string)
v, _ := val.(int)
notifications = append(notifications, Notification{
NamespaceName: k,
NotificationID: v,
})
return true
})
return notifications
}
func Init(configServerURL, appID string, opts ...Option) (err error) {
defaultAgollo, err = New(configServerURL, appID, opts...)
return
}
func InitWithConfigFile(configFilePath string, opts ...Option) (err error) {
defaultAgollo, err = NewWithConfigFile(configFilePath, opts...)
return
}
func InitWithDefaultConfigFile(opts ...Option) error {
return InitWithConfigFile(defaultConfigFilePath, opts...)
}
func Start() <-chan *LongPollerError {
return defaultAgollo.Start()
}
func Stop() {
defaultAgollo.Stop()
}
func Get(key string, opts ...GetOption) string {
return defaultAgollo.Get(key, opts...)
}
func GetNameSpace(namespace string) Configurations {
return defaultAgollo.GetNameSpace(namespace)
}
func Watch() <-chan *ApolloResponse {
return defaultAgollo.Watch()
}
func WatchNamespace(namespace string, stop chan bool) <-chan *ApolloResponse {
return defaultAgollo.WatchNamespace(namespace, stop)
}
func GetAgollo() Agollo {
return defaultAgollo
}
| identifier_name | ||
main.js | a plugin (remember note) */
//Validation - Email
$('input.email').each(function() {
initializeInput($('input.email'), $("input.email").formance('validate_email'));
});
$('input.email').on('keyup change', function() {
validate($(this), $(this).formance('validate_email'), "has-warning", $("#invalid-email").text());
$('div.alert-error').fadeOut();
});
$('input.email').on('blur', function () {
validate($(this), $(this).formance('validate_email'), "has-error", $("#invalid-email").text());
});
//Validation - Minimum characters: 1
if ($('input.min-1').get().length > 0)
initializeInput($('input.min-1'), inBounds($("input.min-1").val().length, 1));
$('input.min-1').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 1), "has-warning", $("#min-1").text());
$('div.alert-error').fadeOut();
});
$('input.min-1').on('blur', function () {
validate($(this), inBounds($(this).val().length, 1), "has-error", $("#min-1").text());
});
//Validation - Minimum characters: 4
$('input.min-4').each(function() {
initializeInput($(this), inBounds($(this).val().length, 4));
});
$('input.min-4').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 4), "has-warning", $("#min-4").text());
$('div.alert-error').fadeOut();
});
$('input.min-4').on('blur', function () {
validate($(this), inBounds($(this).val().length, 4), "has-error", $("#min-4").text());
});
//Validation - Password confirmation
$('input.pass-confirm').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().parent().parent().find("p.error").children().text() == $("#diff-pass").text()) {
initializeInput($(this), false);
$(this).parent().prev().html($("#diff-pass").text());
}
//is("p.error")
});
$('input.pass-confirm').on('keyup change', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-warning", $("#diff-pass").text());
$('div.alert-error').fadeOut();
});
$('input.pass-confirm').on('blur', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-error", $("#diff-pass").text());
});
//Validation - Password
$('input.password').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().prev().text().length > 0) {
initializeInput($(this), false);
}
//is("p.error")
});
});
function validate(input, valid, invalid, message) {
input.parent().removeClass("has-success has-warning has-error");
input.parent().prev().fadeOut();
if (valid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().addClass(invalid);
input.siblings().fadeOut();
if (invalid === "has-error") {
input.parent().prev().html(message);
input.parent().prev().fadeIn();
}
}
}
function initializeInput(input, isValid) {
if (input.parent().parent().parent().children().hasClass("error")) {
if (isValid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().prev().show();
input.parent().addClass('has-error');
input.siblings().fadeOut();
}
}
}
// Returns boolean whether the input has the minimum size required
function inBounds(actual, min) {
if (actual < min)
return false;
else
return true;
}
function confirmPass(actual, current) {
if (actual != current || current == "")
return false;
else
return true;
}
//Fills options with flags only
function teamOption(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 100,
imagePosition: "left"
});
}
//Fills options with flags and label
function teamOptionLabel(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 300,
imagePosition: "left"
});
}
function optionLabel(element, data) {
for (var i in data) {
element
.append($("<option></option>")
.attr("value", data[i].id)
.text(data[i].name));
}
element.selectric();
}
function matchList(element, data) {
var matches = data;
$(".inner-separator").css("height", Object.size(matches) * 120 + "px");
element.empty();
for (var i in matches) {
var match = matches[i]
var datetime = Date.parseString(match.datetime, "dd/MM/yyyy HH:mm");
var $match = $("<li>", {id: match.id, class: "match col-1-1"});
var $datetime = $("<div>", {class: "date col-1-3"});
var $day = $("<div>", {class: "day", text: datetime.getUTCDate()});
var $monthYear = $("<div>", {class: "month-year"});
var $month = $("<div>", {class: "month", text: monthsEN[datetime.getUTCMonth()]});
var $year = $("<div>", {class: "year", text: datetime.getUTCFullYear()});
var hours = datetime.getHours();
if (hours < 10)
hours = "0" + hours;
var minutes = datetime.getMinutes();
if (minutes < 10)
minutes = "0" + minutes;
var $time = $("<div>", {class: "time", text: hours + "h" + minutes})
$monthYear.append($month, $year);
$datetime.append($day, $monthYear, $time);
var title = stages[match.stage].name;
if (matches[i].group != "")
title += " " + groups[match.group].name;
var $stage = $("<div>", {class: "stage col-2-3", text: title});
var teamA = teams[match.matchTeamA.team];
var teamB = teams[match.matchTeamB.team];
var $teams = $("<div>", {class: "teams col-1-1"});
var $teamAImage = $("<img>", {src: "/assets/images/external/flags/" + teamA.logo});
$teamAImage.tooltip({
placement: "bottom",
title: teamA.name
});
var $teamBImage = $("<img>", {src: "/assets/images/external/flags/" + teamB.logo});
var $teamA = $("<div>", {class: "teamA"});
var $teamB = $("<div>", {class: "teamB"});
$teamBImage.tooltip({
placement: "bottom",
title: teamB.name
});
$teamA.append($teamAImage);
$teamB.append($teamBImage);
$teams.append($teamA, "X", $teamB);
//
$match.append($datetime, $stage, $teams); | element.append($match);
}
}
//Dialog
function messageDialog(type, message) {
switch(type) {
case "success":
$("#dialog-message")
.removeClass("error")
.text(message)
.addClass("success");
$(".bs-example-modal-sm").modal("show");
break;
case "error":
$("#dialog-message")
.removeClass("success")
.text(message)
.addClass("error");
$(".bs-example-modal-sm").modal("show");
break;
default:
alert("error");
}
}
//Ajax call + loaders (GET)
function getTeams() {
var teams = | random_line_split | |
main.js | plugin (remember note) */
//Validation - Email
$('input.email').each(function() {
initializeInput($('input.email'), $("input.email").formance('validate_email'));
});
$('input.email').on('keyup change', function() {
validate($(this), $(this).formance('validate_email'), "has-warning", $("#invalid-email").text());
$('div.alert-error').fadeOut();
});
$('input.email').on('blur', function () {
validate($(this), $(this).formance('validate_email'), "has-error", $("#invalid-email").text());
});
//Validation - Minimum characters: 1
if ($('input.min-1').get().length > 0)
initializeInput($('input.min-1'), inBounds($("input.min-1").val().length, 1));
$('input.min-1').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 1), "has-warning", $("#min-1").text());
$('div.alert-error').fadeOut();
});
$('input.min-1').on('blur', function () {
validate($(this), inBounds($(this).val().length, 1), "has-error", $("#min-1").text());
});
//Validation - Minimum characters: 4
$('input.min-4').each(function() {
initializeInput($(this), inBounds($(this).val().length, 4));
});
$('input.min-4').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 4), "has-warning", $("#min-4").text());
$('div.alert-error').fadeOut();
});
$('input.min-4').on('blur', function () {
validate($(this), inBounds($(this).val().length, 4), "has-error", $("#min-4").text());
});
//Validation - Password confirmation
$('input.pass-confirm').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().parent().parent().find("p.error").children().text() == $("#diff-pass").text()) {
initializeInput($(this), false);
$(this).parent().prev().html($("#diff-pass").text());
}
//is("p.error")
});
$('input.pass-confirm').on('keyup change', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-warning", $("#diff-pass").text());
$('div.alert-error').fadeOut();
});
$('input.pass-confirm').on('blur', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-error", $("#diff-pass").text());
});
//Validation - Password
$('input.password').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().prev().text().length > 0) {
initializeInput($(this), false);
}
//is("p.error")
});
});
function validate(input, valid, invalid, message) {
input.parent().removeClass("has-success has-warning has-error");
input.parent().prev().fadeOut();
if (valid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().addClass(invalid);
input.siblings().fadeOut();
if (invalid === "has-error") |
}
}
function initializeInput(input, isValid) {
if (input.parent().parent().parent().children().hasClass("error")) {
if (isValid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().prev().show();
input.parent().addClass('has-error');
input.siblings().fadeOut();
}
}
}
// Returns boolean whether the input has the minimum size required
function inBounds(actual, min) {
if (actual < min)
return false;
else
return true;
}
function confirmPass(actual, current) {
if (actual != current || current == "")
return false;
else
return true;
}
//Fills options with flags only
function teamOption(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 100,
imagePosition: "left"
});
}
//Fills options with flags and label
function teamOptionLabel(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 300,
imagePosition: "left"
});
}
function optionLabel(element, data) {
for (var i in data) {
element
.append($("<option></option>")
.attr("value", data[i].id)
.text(data[i].name));
}
element.selectric();
}
function matchList(element, data) {
var matches = data;
$(".inner-separator").css("height", Object.size(matches) * 120 + "px");
element.empty();
for (var i in matches) {
var match = matches[i]
var datetime = Date.parseString(match.datetime, "dd/MM/yyyy HH:mm");
var $match = $("<li>", {id: match.id, class: "match col-1-1"});
var $datetime = $("<div>", {class: "date col-1-3"});
var $day = $("<div>", {class: "day", text: datetime.getUTCDate()});
var $monthYear = $("<div>", {class: "month-year"});
var $month = $("<div>", {class: "month", text: monthsEN[datetime.getUTCMonth()]});
var $year = $("<div>", {class: "year", text: datetime.getUTCFullYear()});
var hours = datetime.getHours();
if (hours < 10)
hours = "0" + hours;
var minutes = datetime.getMinutes();
if (minutes < 10)
minutes = "0" + minutes;
var $time = $("<div>", {class: "time", text: hours + "h" + minutes})
$monthYear.append($month, $year);
$datetime.append($day, $monthYear, $time);
var title = stages[match.stage].name;
if (matches[i].group != "")
title += " " + groups[match.group].name;
var $stage = $("<div>", {class: "stage col-2-3", text: title});
var teamA = teams[match.matchTeamA.team];
var teamB = teams[match.matchTeamB.team];
var $teams = $("<div>", {class: "teams col-1-1"});
var $teamAImage = $("<img>", {src: "/assets/images/external/flags/" + teamA.logo});
$teamAImage.tooltip({
placement: "bottom",
title: teamA.name
});
var $teamBImage = $("<img>", {src: "/assets/images/external/flags/" + teamB.logo});
var $teamA = $("<div>", {class: "teamA"});
var $teamB = $("<div>", {class: "teamB"});
$teamBImage.tooltip({
placement: "bottom",
title: teamB.name
});
$teamA.append($teamAImage);
$teamB.append($teamBImage);
$teams.append($teamA, "X", $teamB);
//
$match.append($datetime, $stage, $teams);
element.append($match);
}
}
//Dialog
function messageDialog(type, message) {
switch(type) {
case "success":
$("#dialog-message")
.removeClass("error")
.text(message)
.addClass("success");
$(".bs-example-modal-sm").modal("show");
break;
case "error":
$("#dialog-message")
.removeClass("success")
.text(message)
.addClass("error");
$(".bs-example-modal-sm").modal("show");
break;
default:
alert("error");
}
}
//Ajax call + loaders (GET)
function getTeams() {
var teams | {
input.parent().prev().html(message);
input.parent().prev().fadeIn();
} | conditional_block |
main.js | plugin (remember note) */
//Validation - Email
$('input.email').each(function() {
initializeInput($('input.email'), $("input.email").formance('validate_email'));
});
$('input.email').on('keyup change', function() {
validate($(this), $(this).formance('validate_email'), "has-warning", $("#invalid-email").text());
$('div.alert-error').fadeOut();
});
$('input.email').on('blur', function () {
validate($(this), $(this).formance('validate_email'), "has-error", $("#invalid-email").text());
});
//Validation - Minimum characters: 1
if ($('input.min-1').get().length > 0)
initializeInput($('input.min-1'), inBounds($("input.min-1").val().length, 1));
$('input.min-1').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 1), "has-warning", $("#min-1").text());
$('div.alert-error').fadeOut();
});
$('input.min-1').on('blur', function () {
validate($(this), inBounds($(this).val().length, 1), "has-error", $("#min-1").text());
});
//Validation - Minimum characters: 4
$('input.min-4').each(function() {
initializeInput($(this), inBounds($(this).val().length, 4));
});
$('input.min-4').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 4), "has-warning", $("#min-4").text());
$('div.alert-error').fadeOut();
});
$('input.min-4').on('blur', function () {
validate($(this), inBounds($(this).val().length, 4), "has-error", $("#min-4").text());
});
//Validation - Password confirmation
$('input.pass-confirm').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().parent().parent().find("p.error").children().text() == $("#diff-pass").text()) {
initializeInput($(this), false);
$(this).parent().prev().html($("#diff-pass").text());
}
//is("p.error")
});
$('input.pass-confirm').on('keyup change', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-warning", $("#diff-pass").text());
$('div.alert-error').fadeOut();
});
$('input.pass-confirm').on('blur', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-error", $("#diff-pass").text());
});
//Validation - Password
$('input.password').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().prev().text().length > 0) {
initializeInput($(this), false);
}
//is("p.error")
});
});
function validate(input, valid, invalid, message) {
input.parent().removeClass("has-success has-warning has-error");
input.parent().prev().fadeOut();
if (valid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().addClass(invalid);
input.siblings().fadeOut();
if (invalid === "has-error") {
input.parent().prev().html(message);
input.parent().prev().fadeIn();
}
}
}
function initializeInput(input, isValid) |
// Returns boolean whether the input has the minimum size required
function inBounds(actual, min) {
if (actual < min)
return false;
else
return true;
}
function confirmPass(actual, current) {
if (actual != current || current == "")
return false;
else
return true;
}
//Fills options with flags only
function teamOption(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 100,
imagePosition: "left"
});
}
//Fills options with flags and label
function teamOptionLabel(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 300,
imagePosition: "left"
});
}
function optionLabel(element, data) {
for (var i in data) {
element
.append($("<option></option>")
.attr("value", data[i].id)
.text(data[i].name));
}
element.selectric();
}
function matchList(element, data) {
var matches = data;
$(".inner-separator").css("height", Object.size(matches) * 120 + "px");
element.empty();
for (var i in matches) {
var match = matches[i]
var datetime = Date.parseString(match.datetime, "dd/MM/yyyy HH:mm");
var $match = $("<li>", {id: match.id, class: "match col-1-1"});
var $datetime = $("<div>", {class: "date col-1-3"});
var $day = $("<div>", {class: "day", text: datetime.getUTCDate()});
var $monthYear = $("<div>", {class: "month-year"});
var $month = $("<div>", {class: "month", text: monthsEN[datetime.getUTCMonth()]});
var $year = $("<div>", {class: "year", text: datetime.getUTCFullYear()});
var hours = datetime.getHours();
if (hours < 10)
hours = "0" + hours;
var minutes = datetime.getMinutes();
if (minutes < 10)
minutes = "0" + minutes;
var $time = $("<div>", {class: "time", text: hours + "h" + minutes})
$monthYear.append($month, $year);
$datetime.append($day, $monthYear, $time);
var title = stages[match.stage].name;
if (matches[i].group != "")
title += " " + groups[match.group].name;
var $stage = $("<div>", {class: "stage col-2-3", text: title});
var teamA = teams[match.matchTeamA.team];
var teamB = teams[match.matchTeamB.team];
var $teams = $("<div>", {class: "teams col-1-1"});
var $teamAImage = $("<img>", {src: "/assets/images/external/flags/" + teamA.logo});
$teamAImage.tooltip({
placement: "bottom",
title: teamA.name
});
var $teamBImage = $("<img>", {src: "/assets/images/external/flags/" + teamB.logo});
var $teamA = $("<div>", {class: "teamA"});
var $teamB = $("<div>", {class: "teamB"});
$teamBImage.tooltip({
placement: "bottom",
title: teamB.name
});
$teamA.append($teamAImage);
$teamB.append($teamBImage);
$teams.append($teamA, "X", $teamB);
//
$match.append($datetime, $stage, $teams);
element.append($match);
}
}
//Dialog
function messageDialog(type, message) {
switch(type) {
case "success":
$("#dialog-message")
.removeClass("error")
.text(message)
.addClass("success");
$(".bs-example-modal-sm").modal("show");
break;
case "error":
$("#dialog-message")
.removeClass("success")
.text(message)
.addClass("error");
$(".bs-example-modal-sm").modal("show");
break;
default:
alert("error");
}
}
//Ajax call + loaders (GET)
function getTeams() {
var teams | {
if (input.parent().parent().parent().children().hasClass("error")) {
if (isValid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().prev().show();
input.parent().addClass('has-error');
input.siblings().fadeOut();
}
}
} | identifier_body |
main.js | plugin (remember note) */
//Validation - Email
$('input.email').each(function() {
initializeInput($('input.email'), $("input.email").formance('validate_email'));
});
$('input.email').on('keyup change', function() {
validate($(this), $(this).formance('validate_email'), "has-warning", $("#invalid-email").text());
$('div.alert-error').fadeOut();
});
$('input.email').on('blur', function () {
validate($(this), $(this).formance('validate_email'), "has-error", $("#invalid-email").text());
});
//Validation - Minimum characters: 1
if ($('input.min-1').get().length > 0)
initializeInput($('input.min-1'), inBounds($("input.min-1").val().length, 1));
$('input.min-1').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 1), "has-warning", $("#min-1").text());
$('div.alert-error').fadeOut();
});
$('input.min-1').on('blur', function () {
validate($(this), inBounds($(this).val().length, 1), "has-error", $("#min-1").text());
});
//Validation - Minimum characters: 4
$('input.min-4').each(function() {
initializeInput($(this), inBounds($(this).val().length, 4));
});
$('input.min-4').on('keyup change', function () {
validate($(this), inBounds($(this).val().length, 4), "has-warning", $("#min-4").text());
$('div.alert-error').fadeOut();
});
$('input.min-4').on('blur', function () {
validate($(this), inBounds($(this).val().length, 4), "has-error", $("#min-4").text());
});
//Validation - Password confirmation
$('input.pass-confirm').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().parent().parent().find("p.error").children().text() == $("#diff-pass").text()) {
initializeInput($(this), false);
$(this).parent().prev().html($("#diff-pass").text());
}
//is("p.error")
});
$('input.pass-confirm').on('keyup change', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-warning", $("#diff-pass").text());
$('div.alert-error').fadeOut();
});
$('input.pass-confirm').on('blur', function () {
validate($(this), confirmPass($(this).val(), $(this).parent().parent().prev().find("input").val()), "has-error", $("#diff-pass").text());
});
//Validation - Password
$('input.password').each(function() {
$(this).parent().removeClass("has-success has-warning has-error");
$(this).parent().prev().hide();
if ($(this).parent().prev().text().length > 0) {
initializeInput($(this), false);
}
//is("p.error")
});
});
function validate(input, valid, invalid, message) {
input.parent().removeClass("has-success has-warning has-error");
input.parent().prev().fadeOut();
if (valid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().addClass(invalid);
input.siblings().fadeOut();
if (invalid === "has-error") {
input.parent().prev().html(message);
input.parent().prev().fadeIn();
}
}
}
function initializeInput(input, isValid) {
if (input.parent().parent().parent().children().hasClass("error")) {
if (isValid) {
input.parent().addClass("has-success");
input.siblings().fadeIn();
} else {
input.parent().prev().show();
input.parent().addClass('has-error');
input.siblings().fadeOut();
}
}
}
// Returns boolean whether the input has the minimum size required
function inBounds(actual, min) {
if (actual < min)
return false;
else
return true;
}
function confirmPass(actual, current) {
if (actual != current || current == "")
return false;
else
return true;
}
//Fills options with flags only
function teamOption(element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { value: teams[i].id, imageSrc: "/assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 100,
imagePosition: "left"
});
}
//Fills options with flags and label
function | (element) {
var ddData = [];
for (var i in teams) {
if (teams[i].id == 7)
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo, selected: true } );
else
ddData.push( { text: teams[i].name, value: teams[i].id, imageSrc: "assets/images/external/flags/" + teams[i].logo } );
}
element.ddslick({
data: ddData,
width: 300,
imagePosition: "left"
});
}
function optionLabel(element, data) {
for (var i in data) {
element
.append($("<option></option>")
.attr("value", data[i].id)
.text(data[i].name));
}
element.selectric();
}
function matchList(element, data) {
var matches = data;
$(".inner-separator").css("height", Object.size(matches) * 120 + "px");
element.empty();
for (var i in matches) {
var match = matches[i]
var datetime = Date.parseString(match.datetime, "dd/MM/yyyy HH:mm");
var $match = $("<li>", {id: match.id, class: "match col-1-1"});
var $datetime = $("<div>", {class: "date col-1-3"});
var $day = $("<div>", {class: "day", text: datetime.getUTCDate()});
var $monthYear = $("<div>", {class: "month-year"});
var $month = $("<div>", {class: "month", text: monthsEN[datetime.getUTCMonth()]});
var $year = $("<div>", {class: "year", text: datetime.getUTCFullYear()});
var hours = datetime.getHours();
if (hours < 10)
hours = "0" + hours;
var minutes = datetime.getMinutes();
if (minutes < 10)
minutes = "0" + minutes;
var $time = $("<div>", {class: "time", text: hours + "h" + minutes})
$monthYear.append($month, $year);
$datetime.append($day, $monthYear, $time);
var title = stages[match.stage].name;
if (matches[i].group != "")
title += " " + groups[match.group].name;
var $stage = $("<div>", {class: "stage col-2-3", text: title});
var teamA = teams[match.matchTeamA.team];
var teamB = teams[match.matchTeamB.team];
var $teams = $("<div>", {class: "teams col-1-1"});
var $teamAImage = $("<img>", {src: "/assets/images/external/flags/" + teamA.logo});
$teamAImage.tooltip({
placement: "bottom",
title: teamA.name
});
var $teamBImage = $("<img>", {src: "/assets/images/external/flags/" + teamB.logo});
var $teamA = $("<div>", {class: "teamA"});
var $teamB = $("<div>", {class: "teamB"});
$teamBImage.tooltip({
placement: "bottom",
title: teamB.name
});
$teamA.append($teamAImage);
$teamB.append($teamBImage);
$teams.append($teamA, "X", $teamB);
//
$match.append($datetime, $stage, $teams);
element.append($match);
}
}
//Dialog
function messageDialog(type, message) {
switch(type) {
case "success":
$("#dialog-message")
.removeClass("error")
.text(message)
.addClass("success");
$(".bs-example-modal-sm").modal("show");
break;
case "error":
$("#dialog-message")
.removeClass("success")
.text(message)
.addClass("error");
$(".bs-example-modal-sm").modal("show");
break;
default:
alert("error");
}
}
//Ajax call + loaders (GET)
function getTeams() {
var teams = | teamOptionLabel | identifier_name |
atm.py | else :
print('Masukan tidak valid.')
else:
print('Saldo tidak cukup')
return saldo
#Deklarasi Fungsi Pulsa
def pulsa(saldo,history,namakartu):
#Program menampilkan menu pilihan pengisian pulsa
#KAMUS LOKAL
#saldo, kodepulsa : int
#history : 3d matriks of integer and string
#namakartun: string
#ALGORITMA FUNGSI
print('Anda akan mengisi pulsa kartu',namakartu)
print('Pilih nominal yang anda inginkan')
print('1. 50.000')
print('2. 100.000')
print('3. 150.000')
print('4. 200.000')
kodepulsa=int(input())
print('Masukkan nomor HP Anda')
nohp=int(input())
for i in range (4):
if i+1==kodepulsa :
saldo=konfirmasipulsa(saldo,history,((i+1)*50000),nohp)
return saldo
# ALGORITMA UTAMA
jumlahorang=4
data=[[0 for j in range (3)] for i in range (jumlahorang)]
history=[[[0 for j in range (2)] for i in range (3)] for k in range(jumlahorang)]
atmbarunyala=True
#Kolom pertama berisi nama
#Kolom kedua berisi PIN
#Kolom ketiga berisi saldo
#Deklarasi database awal
data[0][0]='Theodore Jonathan'
data[0][1]=519207
data[0][2]=8000000
data[1][0]='Reynaldo Averill'
data[1][1]=519097
data[1][2]=9000000
data[2][0]='Allief Nuriman'
data[2][1]=519117
data[2][2]=8500000
data[3][0]='Mohammad Sheva'
data[3][1]=519217
data[3][2]=9500000
# Layar login awal (Reynaldo Averill)
while True:
print('ATM Bank TARS')
print('Tekan 0 jika kartu ATM anda adalah kartu ATM TARS')
print('Tekan 1 jika kartu ATM anda berasal dari bank lain dan anda belum mengisi database di ATM ini')
print('Tekan 2 jika anda ingin registrasi data baru, dan kartu ATM anda adalah kartu ATM TARS')
if atmbarunyala==False:
print('Tekan 3 jika kartu ATM anda berasal dari bank lain dan ada sudah mengisi database di ATM ini')
jeniskartu=int(input())
cobapertamakali=True
# Registrasi kartu ATM non TARS
if(jeniskartu==1):
print('Maaf, kami tidak memiliki database anda. Silahkan isi database berikut: ')
registrasi(data)
jumlahorang+=1
#Memperluas array history karena jumlah orang bertambah
historybaru=[[[0 for j in range (2)] for i in range (3)] for k in range (jumlahorang)]
for k in range(jumlahorang-1):
for i in range (3):
for j in range(2):
historybaru[k][i][j]=history[k][i][j]
history=historybaru
print('Registrasi data selesai')
# Registrasi kartu ATM TARS
elif(jeniskartu==2):
regisulang=1
while(regisulang==1):
print('Silahkan lengkapi data berikut')
registrasi(data)
jumlahorang+=1
print('Registrasi data selesai. Apakah anda ingin melakukan registrasi data lagi ?')
print('Tekan 1 untuk ya')
print('Tekan 0 untuk tidak')
regisulang=int(input())
elif(jeniskartu==3):
jeniskartu=1
#Menghentikan program ATM
elif(jeniskartu==16519):
print('Program ATM akan dihentikan')
exit()
#Menu Utama (Mohammad Sheva)
ModeTransaksi = 1
while ModeTransaksi == 1 :
# PIN kartu
print('Silakan masukkan PIN anda')
print('Jagalah kerahasiaan PIN anda')
coba=0
pinadadidatabase=False
pinangkasemua=False
while((coba<3)and((pinangkasemua!=True)or(pinadadidatabase!=True))):
pin=input()
pinadadidatabase=False
pinangkasemua=False
if((pin.isdigit()==True) and (len(pin)==6)):
pinangkasemua=True
if cobapertamakali==True:
for i in range (jumlahorang):
if(int(data[i][1])==int(pin)):
pinadadidatabase=True
indeksorang=i
else:
if(int(data[indeksorang][1])==int(pin)):
pinadadidatabase=True
if(pinangkasemua==False)and (coba<2):
print('Pin harus berupa kombinasi dari 6 digit angka')
print('Silahkan masukkan ulang PIN anda')
elif((pinadadidatabase==False)and(coba<2)):
print('Pin anda salah, silahkan masukkan ulang PIN anda')
coba+=1
if((coba==3)and(pinangkasemua!=True)and(pinadadidatabase!=True)):
print('Anda telah memasukkan pin sebanyak 3 kali. Kartu ATM anda akan diblokir')
print('Silahkan datangi kantor cabang Bank TARS terdekat di daerah anda')
exit()
saldo = int(data[indeksorang][2])
if jeniskartu == 1 :
print("Transaksi di ATM ini akan dikenakan biaya administrasi sebesar Rp 6.500")
print("Pilihan transaksi :")
print("1. Setoran tunai dan Informasi Rekening")
print("2. Pembayaran")
print("3. Transfer")
transaksi = int(input("Masukkan pilihan transaksi : "))
# Setor tunai & Informasi Rekening
if transaksi == 1 :
print("Pilihan transaksi : ")
print("1. Setoran Tunai")
print("2. Informasi Saldo")
print("3. Transaksi Terakhir")
pilihan = int(input("Pilihan : "))
#Setoran tunai
if pilihan == 1 :
setor = int(input("Masukkan jumlah setoran : "))
print("Apakah Anda yakin akan menyetor sebanyak", setor, "?")
print("0. Tidak")
print("1. Ya")
yakin = int(input("Jawaban : "))
if yakin == 1 :
saldo = saldo + setor
jejaktransaksi(history,setor,'Setor Tunai')
print('Setor Tunai Berhasil')
#Informasi saldo
elif pilihan == 2 :
print("Saldo Anda tersisa", saldo)
#Transaksi terakhir
elif pilihan == 3 :
print('Tiga Transaksi Terakhir Anda: ')
print('| Nominal | Jenis Transaksi |')
for i in range (3):
print('|',end=' ')
for j in range (2): | # Pembayaran listrik, air, pendidikan, pulsa(Allief Nuriman)
elif transaksi == 2 :
print("1. Listrik")
print("2. Air")
print("3. Pendidikan")
print("4. Pulsa")
tujuantrf = int(input("Masukkan kode tujuan transfer : "))
#Pembayaran Listrik
if tujuantrf == 1 :
norek = int(input("Silakan masukkan 12 digit kode pelanggan : "))
print("Pastikan Anda membawa gawai Anda")
notelpon = int(input("Masukkan nomor handphone Anda : "))
print ("Kami telah mengirim SMS jumlah tagihan, denda, nama pelanggan ke nomor Anda, silakan dicek")
print("Silakan masukkan jumlah uang yang harus Anda bayar")
jumlahtransfer = int(input())
print("Apakah Anda yakin akan transfer sebanyak",jumlahtransfer,"?")
print("0. Tidak")
print("1. Ya")
yakin = int(input())
if yakin == 1 :
if | print(history[indeksorang][i][j],end=' | ' )
print()
| random_line_split |
atm.py | (data):
#Registrasi digunakan untuk menambahkan user baru pada database
#KAMUS LOKAL
#databaru : array of integer
#ALGORITMA FUNGSI
databaru=[0 for i in range (3)]
databaru[0]=input('Masukkan nama: ')
databaru[1]=input('Masukkan PIN anda (Terdiri dari 6 digit angka): ')
while((databaru[1].isdigit()==False)or(len(databaru[1])!=6)):
print('PIN harus terdiri dari 6 digit angka')
databaru[1]=input('Masukkan ulang PIN anda: ')
databaru[2]=input('Masukkan saldo: ')
while(databaru[2].isdigit()==False):
print('Saldo harus berupa bilangan bulat')
databaru[2]=input('Masukkan setoran awal anda: ')
data.append(databaru)
return data
#Deklarasi Fungsi jejaktransaksi
def jejaktransaksi(history,nominal,jenistransaksi):
#jejaktransaksi digunakan untuk mencatat transaksi yang dilakukan suatu akun
#KAMUS LOKAL
#history : matriks 3d of integer
#nominal : int
#jenistransaksi : str
#ALGORITMA FUNGSI
history[indeksorang][2][0]=history[indeksorang][1][0]
history[indeksorang][2][1]=history[indeksorang][1][1]
history[indeksorang][1][0]=history[indeksorang][0][0]
history[indeksorang][1][1]=history[indeksorang][0][1]
history[indeksorang][0][0]=nominal
history[indeksorang][0][1]=jenistransaksi
return history
#Deklarasi Fungsi Konfirmasi Pulsa
def konfirmasipulsa(saldo,history,nominalpulsa,nohp) :
#Program menampilkan layar konfirmasi saat melakukan pembelian pulsa
#KAMUS LOKAL
#saldo, nominal pulsa, nohp : int
#history : 3d matriks of integer and string
#ALGORITMA FUNGSI
if saldo >= nominalpulsa:
print('Anda yakin ingin isi pulsa',nominalpulsa,'ke nomor',nohp,'?')
print('0. Tidak')
print('1. Ya')
yakin=int(input())
if yakin == 1 :
print('Isi ulang berhasil.')
saldo=saldo-nominalpulsa
jejaktransaksi(history,nominalpulsa,"Pembayaran Pulsa")
else :
print('Masukan tidak valid.')
else:
print('Saldo tidak cukup')
return saldo
#Deklarasi Fungsi Pulsa
def pulsa(saldo,history,namakartu):
#Program menampilkan menu pilihan pengisian pulsa
#KAMUS LOKAL
#saldo, kodepulsa : int
#history : 3d matriks of integer and string
#namakartun: string
#ALGORITMA FUNGSI
print('Anda akan mengisi pulsa kartu',namakartu)
print('Pilih nominal yang anda inginkan')
print('1. 50.000')
print('2. 100.000')
print('3. 150.000')
print('4. 200.000')
kodepulsa=int(input())
print('Masukkan nomor HP Anda')
nohp=int(input())
for i in range (4):
if i+1==kodepulsa :
saldo=konfirmasipulsa(saldo,history,((i+1)*50000),nohp)
return saldo
# ALGORITMA UTAMA
jumlahorang=4
data=[[0 for j in range (3)] for i in range (jumlahorang)]
history=[[[0 for j in range (2)] for i in range (3)] for k in range(jumlahorang)]
atmbarunyala=True
#Kolom pertama berisi nama
#Kolom kedua berisi PIN
#Kolom ketiga berisi saldo
#Deklarasi database awal
data[0][0]='Theodore Jonathan'
data[0][1]=519207
data[0][2]=8000000
data[1][0]='Reynaldo Averill'
data[1][1]=519097
data[1][2]=9000000
data[2][0]='Allief Nuriman'
data[2][1]=519117
data[2][2]=8500000
data[3][0]='Mohammad Sheva'
data[3][1]=519217
data[3][2]=9500000
# Layar login awal (Reynaldo Averill)
while True:
print('ATM Bank TARS')
print('Tekan 0 jika kartu ATM anda adalah kartu ATM TARS')
print('Tekan 1 jika kartu ATM anda berasal dari bank lain dan anda belum mengisi database di ATM ini')
print('Tekan 2 jika anda ingin registrasi data baru, dan kartu ATM anda adalah kartu ATM TARS')
if atmbarunyala==False:
print('Tekan 3 jika kartu ATM anda berasal dari bank lain dan ada sudah mengisi database di ATM ini')
jeniskartu=int(input())
cobapertamakali=True
# Registrasi kartu ATM non TARS
if(jeniskartu==1):
print('Maaf, kami tidak memiliki database anda. Silahkan isi database berikut: ')
registrasi(data)
jumlahorang+=1
#Memperluas array history karena jumlah orang bertambah
historybaru=[[[0 for j in range (2)] for i in range (3)] for k in range (jumlahorang)]
for k in range(jumlahorang-1):
for i in range (3):
for j in range(2):
historybaru[k][i][j]=history[k][i][j]
history=historybaru
print('Registrasi data selesai')
# Registrasi kartu ATM TARS
elif(jeniskartu==2):
regisulang=1
while(regisulang==1):
print('Silahkan lengkapi data berikut')
registrasi(data)
jumlahorang+=1
print('Registrasi data selesai. Apakah anda ingin melakukan registrasi data lagi ?')
print('Tekan 1 untuk ya')
print('Tekan 0 untuk tidak')
regisulang=int(input())
elif(jeniskartu==3):
jeniskartu=1
#Menghentikan program ATM
elif(jeniskartu==16519):
print('Program ATM akan dihentikan')
exit()
#Menu Utama (Mohammad Sheva)
ModeTransaksi = 1
while ModeTransaksi == 1 :
# PIN kartu
print('Silakan masukkan PIN anda')
print('Jagalah kerahasiaan PIN anda')
coba=0
pinadadidatabase=False
pinangkasemua=False
while((coba<3)and((pinangkasemua!=True)or(pinadadidatabase!=True))):
pin=input()
pinadadidatabase=False
pinangkasemua=False
if((pin.isdigit()==True) and (len(pin)==6)):
pinangkasemua=True
if cobapertamakali==True:
for i in range (jumlahorang):
if(int(data[i][1])==int(pin)):
pinadadidatabase=True
indeksorang=i
else:
if(int(data[indeksorang][1])==int(pin)):
pinadadidatabase=True
if(pinangkasemua==False)and (coba<2):
print('Pin harus berupa kombinasi dari 6 digit angka')
print('Silahkan masukkan ulang PIN anda')
elif((pinadadidatabase==False)and(coba<2)):
print('Pin anda salah, silahkan masukkan ulang PIN anda')
coba+=1
if((coba==3)and(pinangkasemua!=True)and(pinadadidatabase!=True)):
print('Anda telah memasukkan pin sebanyak 3 kali. Kartu ATM anda akan diblokir')
print('Silahkan datangi kantor cabang Bank TARS terdekat di daerah anda')
exit()
saldo = int(data[indeksorang][2])
if jeniskartu == 1 | registrasi | identifier_name | |
atm.py | else :
print('Masukan tidak valid.')
else:
print('Saldo tidak cukup')
return saldo
#Deklarasi Fungsi Pulsa
def pulsa(saldo,history,namakartu):
#Program menampilkan menu pilihan pengisian pulsa
#KAMUS LOKAL
#saldo, kodepulsa : int
#history : 3d matriks of integer and string
#namakartun: string
#ALGORITMA FUNGSI
|
# ALGORITMA UTAMA
jumlahorang=4
data=[[0 for j in range (3)] for i in range (jumlahorang)]
history=[[[0 for j in range (2)] for i in range (3)] for k in range(jumlahorang)]
atmbarunyala=True
#Kolom pertama berisi nama
#Kolom kedua berisi PIN
#Kolom ketiga berisi saldo
#Deklarasi database awal
data[0][0]='Theodore Jonathan'
data[0][1]=519207
data[0][2]=8000000
data[1][0]='Reynaldo Averill'
data[1][1]=519097
data[1][2]=9000000
data[2][0]='Allief Nuriman'
data[2][1]=519117
data[2][2]=8500000
data[3][0]='Mohammad Sheva'
data[3][1]=519217
data[3][2]=9500000
# Layar login awal (Reynaldo Averill)
while True:
print('ATM Bank TARS')
print('Tekan 0 jika kartu ATM anda adalah kartu ATM TARS')
print('Tekan 1 jika kartu ATM anda berasal dari bank lain dan anda belum mengisi database di ATM ini')
print('Tekan 2 jika anda ingin registrasi data baru, dan kartu ATM anda adalah kartu ATM TARS')
if atmbarunyala==False:
print('Tekan 3 jika kartu ATM anda berasal dari bank lain dan ada sudah mengisi database di ATM ini')
jeniskartu=int(input())
cobapertamakali=True
# Registrasi kartu ATM non TARS
if(jeniskartu==1):
print('Maaf, kami tidak memiliki database anda. Silahkan isi database berikut: ')
registrasi(data)
jumlahorang+=1
#Memperluas array history karena jumlah orang bertambah
historybaru=[[[0 for j in range (2)] for i in range (3)] for k in range (jumlahorang)]
for k in range(jumlahorang-1):
for i in range (3):
for j in range(2):
historybaru[k][i][j]=history[k][i][j]
history=historybaru
print('Registrasi data selesai')
# Registrasi kartu ATM TARS
elif(jeniskartu==2):
regisulang=1
while(regisulang==1):
print('Silahkan lengkapi data berikut')
registrasi(data)
jumlahorang+=1
print('Registrasi data selesai. Apakah anda ingin melakukan registrasi data lagi ?')
print('Tekan 1 untuk ya')
print('Tekan 0 untuk tidak')
regisulang=int(input())
elif(jeniskartu==3):
jeniskartu=1
#Menghentikan program ATM
elif(jeniskartu==16519):
print('Program ATM akan dihentikan')
exit()
#Menu Utama (Mohammad Sheva)
ModeTransaksi = 1
while ModeTransaksi == 1 :
# PIN kartu
print('Silakan masukkan PIN anda')
print('Jagalah kerahasiaan PIN anda')
coba=0
pinadadidatabase=False
pinangkasemua=False
while((coba<3)and((pinangkasemua!=True)or(pinadadidatabase!=True))):
pin=input()
pinadadidatabase=False
pinangkasemua=False
if((pin.isdigit()==True) and (len(pin)==6)):
pinangkasemua=True
if cobapertamakali==True:
for i in range (jumlahorang):
if(int(data[i][1])==int(pin)):
pinadadidatabase=True
indeksorang=i
else:
if(int(data[indeksorang][1])==int(pin)):
pinadadidatabase=True
if(pinangkasemua==False)and (coba<2):
print('Pin harus berupa kombinasi dari 6 digit angka')
print('Silahkan masukkan ulang PIN anda')
elif((pinadadidatabase==False)and(coba<2)):
print('Pin anda salah, silahkan masukkan ulang PIN anda')
coba+=1
if((coba==3)and(pinangkasemua!=True)and(pinadadidatabase!=True)):
print('Anda telah memasukkan pin sebanyak 3 kali. Kartu ATM anda akan diblokir')
print('Silahkan datangi kantor cabang Bank TARS terdekat di daerah anda')
exit()
saldo = int(data[indeksorang][2])
if jeniskartu == 1 :
print("Transaksi di ATM ini akan dikenakan biaya administrasi sebesar Rp 6.500")
print("Pilihan transaksi :")
print("1. Setoran tunai dan Informasi Rekening")
print("2. Pembayaran")
print("3. Transfer")
transaksi = int(input("Masukkan pilihan transaksi : "))
# Setor tunai & Informasi Rekening
if transaksi == 1 :
print("Pilihan transaksi : ")
print("1. Setoran Tunai")
print("2. Informasi Saldo")
print("3. Transaksi Terakhir")
pilihan = int(input("Pilihan : "))
#Setoran tunai
if pilihan == 1 :
setor = int(input("Masukkan jumlah setoran : "))
print("Apakah Anda yakin akan menyetor sebanyak", setor, "?")
print("0. Tidak")
print("1. Ya")
yakin = int(input("Jawaban : "))
if yakin == 1 :
saldo = saldo + setor
jejaktransaksi(history,setor,'Setor Tunai')
print('Setor Tunai Berhasil')
#Informasi saldo
elif pilihan == 2 :
print("Saldo Anda tersisa", saldo)
#Transaksi terakhir
elif pilihan == 3 :
print('Tiga Transaksi Terakhir Anda: ')
print('| Nominal | Jenis Transaksi |')
for i in range (3):
print('|',end=' ')
for j in range (2):
print(history[indeksorang][i][j],end=' | ' )
print()
# Pembayaran listrik, air, pendidikan, pulsa(Allief Nuriman)
elif transaksi == 2 :
print("1. Listrik")
print("2. Air")
print("3. Pendidikan")
print("4. Pulsa")
tujuantrf = int(input("Masukkan kode tujuan transfer : "))
#Pembayaran Listrik
if tujuantrf == 1 :
norek = int(input("Silakan masukkan 12 digit kode pelanggan : "))
print("Pastikan Anda membawa gawai Anda")
notelpon = int(input("Masukkan nomor handphone Anda : "))
print ("Kami telah mengirim SMS jumlah tagihan, denda, nama pelanggan ke nomor Anda, silakan dicek")
print("Silakan masukkan jumlah uang yang harus Anda bayar")
jumlahtransfer = int(input())
print("Apakah Anda yakin akan transfer sebanyak",jumlahtransfer,"?")
print("0. Tidak")
print("1. Ya")
yakin = int(input())
if yakin == 1 :
| print('Anda akan mengisi pulsa kartu',namakartu)
print('Pilih nominal yang anda inginkan')
print('1. 50.000')
print('2. 100.000')
print('3. 150.000')
print('4. 200.000')
kodepulsa=int(input())
print('Masukkan nomor HP Anda')
nohp=int(input())
for i in range (4):
if i+1==kodepulsa :
saldo=konfirmasipulsa(saldo,history,((i+1)*50000),nohp)
return saldo | identifier_body |
atm.py | else :
print('Masukan tidak valid.')
else:
print('Saldo tidak cukup')
return saldo
#Deklarasi Fungsi Pulsa
def pulsa(saldo,history,namakartu):
#Program menampilkan menu pilihan pengisian pulsa
#KAMUS LOKAL
#saldo, kodepulsa : int
#history : 3d matriks of integer and string
#namakartun: string
#ALGORITMA FUNGSI
print('Anda akan mengisi pulsa kartu',namakartu)
print('Pilih nominal yang anda inginkan')
print('1. 50.000')
print('2. 100.000')
print('3. 150.000')
print('4. 200.000')
kodepulsa=int(input())
print('Masukkan nomor HP Anda')
nohp=int(input())
for i in range (4):
if i+1==kodepulsa :
saldo=konfirmasipulsa(saldo,history,((i+1)*50000),nohp)
return saldo
# ALGORITMA UTAMA
jumlahorang=4
data=[[0 for j in range (3)] for i in range (jumlahorang)]
history=[[[0 for j in range (2)] for i in range (3)] for k in range(jumlahorang)]
atmbarunyala=True
#Kolom pertama berisi nama
#Kolom kedua berisi PIN
#Kolom ketiga berisi saldo
#Deklarasi database awal
data[0][0]='Theodore Jonathan'
data[0][1]=519207
data[0][2]=8000000
data[1][0]='Reynaldo Averill'
data[1][1]=519097
data[1][2]=9000000
data[2][0]='Allief Nuriman'
data[2][1]=519117
data[2][2]=8500000
data[3][0]='Mohammad Sheva'
data[3][1]=519217
data[3][2]=9500000
# Layar login awal (Reynaldo Averill)
while True:
print('ATM Bank TARS')
print('Tekan 0 jika kartu ATM anda adalah kartu ATM TARS')
print('Tekan 1 jika kartu ATM anda berasal dari bank lain dan anda belum mengisi database di ATM ini')
print('Tekan 2 jika anda ingin registrasi data baru, dan kartu ATM anda adalah kartu ATM TARS')
if atmbarunyala==False:
print('Tekan 3 jika kartu ATM anda berasal dari bank lain dan ada sudah mengisi database di ATM ini')
jeniskartu=int(input())
cobapertamakali=True
# Registrasi kartu ATM non TARS
if(jeniskartu==1):
print('Maaf, kami tidak memiliki database anda. Silahkan isi database berikut: ')
registrasi(data)
jumlahorang+=1
#Memperluas array history karena jumlah orang bertambah
historybaru=[[[0 for j in range (2)] for i in range (3)] for k in range (jumlahorang)]
for k in range(jumlahorang-1):
for i in range (3):
for j in range(2):
historybaru[k][i][j]=history[k][i][j]
history=historybaru
print('Registrasi data selesai')
# Registrasi kartu ATM TARS
elif(jeniskartu==2):
regisulang=1
while(regisulang==1):
print('Silahkan lengkapi data berikut')
registrasi(data)
jumlahorang+=1
print('Registrasi data selesai. Apakah anda ingin melakukan registrasi data lagi ?')
print('Tekan 1 untuk ya')
print('Tekan 0 untuk tidak')
regisulang=int(input())
elif(jeniskartu==3):
jeniskartu=1
#Menghentikan program ATM
elif(jeniskartu==16519):
print('Program ATM akan dihentikan')
exit()
#Menu Utama (Mohammad Sheva)
ModeTransaksi = 1
while ModeTransaksi == 1 :
# PIN kartu
print('Silakan masukkan PIN anda')
print('Jagalah kerahasiaan PIN anda')
coba=0
pinadadidatabase=False
pinangkasemua=False
while((coba<3)and((pinangkasemua!=True)or(pinadadidatabase!=True))):
pin=input()
pinadadidatabase=False
pinangkasemua=False
if((pin.isdigit()==True) and (len(pin)==6)):
pinangkasemua=True
if cobapertamakali==True:
for i in range (jumlahorang):
if(int(data[i][1])==int(pin)):
pinadadidatabase=True
indeksorang=i
else:
|
if(pinangkasemua==False)and (coba<2):
print('Pin harus berupa kombinasi dari 6 digit angka')
print('Silahkan masukkan ulang PIN anda')
elif((pinadadidatabase==False)and(coba<2)):
print('Pin anda salah, silahkan masukkan ulang PIN anda')
coba+=1
if((coba==3)and(pinangkasemua!=True)and(pinadadidatabase!=True)):
print('Anda telah memasukkan pin sebanyak 3 kali. Kartu ATM anda akan diblokir')
print('Silahkan datangi kantor cabang Bank TARS terdekat di daerah anda')
exit()
saldo = int(data[indeksorang][2])
if jeniskartu == 1 :
print("Transaksi di ATM ini akan dikenakan biaya administrasi sebesar Rp 6.500")
print("Pilihan transaksi :")
print("1. Setoran tunai dan Informasi Rekening")
print("2. Pembayaran")
print("3. Transfer")
transaksi = int(input("Masukkan pilihan transaksi : "))
# Setor tunai & Informasi Rekening
if transaksi == 1 :
print("Pilihan transaksi : ")
print("1. Setoran Tunai")
print("2. Informasi Saldo")
print("3. Transaksi Terakhir")
pilihan = int(input("Pilihan : "))
#Setoran tunai
if pilihan == 1 :
setor = int(input("Masukkan jumlah setoran : "))
print("Apakah Anda yakin akan menyetor sebanyak", setor, "?")
print("0. Tidak")
print("1. Ya")
yakin = int(input("Jawaban : "))
if yakin == 1 :
saldo = saldo + setor
jejaktransaksi(history,setor,'Setor Tunai')
print('Setor Tunai Berhasil')
#Informasi saldo
elif pilihan == 2 :
print("Saldo Anda tersisa", saldo)
#Transaksi terakhir
elif pilihan == 3 :
print('Tiga Transaksi Terakhir Anda: ')
print('| Nominal | Jenis Transaksi |')
for i in range (3):
print('|',end=' ')
for j in range (2):
print(history[indeksorang][i][j],end=' | ' )
print()
# Pembayaran listrik, air, pendidikan, pulsa(Allief Nuriman)
elif transaksi == 2 :
print("1. Listrik")
print("2. Air")
print("3. Pendidikan")
print("4. Pulsa")
tujuantrf = int(input("Masukkan kode tujuan transfer : "))
#Pembayaran Listrik
if tujuantrf == 1 :
norek = int(input("Silakan masukkan 12 digit kode pelanggan : "))
print("Pastikan Anda membawa gawai Anda")
notelpon = int(input("Masukkan nomor handphone Anda : "))
print ("Kami telah mengirim SMS jumlah tagihan, denda, nama pelanggan ke nomor Anda, silakan dicek")
print("Silakan masukkan jumlah uang yang harus Anda bayar")
jumlahtransfer = int(input())
print("Apakah Anda yakin akan transfer sebanyak",jumlahtransfer,"?")
print("0. Tidak")
print("1. Ya")
yakin = int(input())
if yakin == 1 :
if | if(int(data[indeksorang][1])==int(pin)):
pinadadidatabase=True | conditional_block |
service.go | any async callbacks
func (ac *asyncCallbacksHandler) push(f func()) {
ac.cbQueue <- f
}
func (ac *asyncCallbacksHandler) close() {
close(ac.cbQueue)
}
func (c *Config) valid() error {
if !nameRegexp.MatchString(c.Name) {
return fmt.Errorf("%w: service name: name should not be empty and should consist of alphanumerical charactest, dashes and underscores", ErrConfigValidation)
}
if !semVerRegexp.MatchString(c.Version) {
return fmt.Errorf("%w: version: version should not be empty should match the SemVer format", ErrConfigValidation)
}
return nil
}
func (s *service) wrapConnectionEventCallbacks() {
s.m.Lock()
defer s.m.Unlock()
s.natsHandlers.closed = s.nc.ClosedHandler()
if s.natsHandlers.closed != nil {
s.nc.SetClosedHandler(func(c *nats.Conn) {
s.Stop()
s.natsHandlers.closed(c)
})
} else {
s.nc.SetClosedHandler(func(c *nats.Conn) {
s.Stop()
})
}
s.natsHandlers.asyncErr = s.nc.ErrorHandler()
if s.natsHandlers.asyncErr != nil {
s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) {
endpoint, match := s.matchSubscriptionSubject(sub.Subject)
if !match {
s.natsHandlers.asyncErr(c, sub, err)
return
}
if s.Config.ErrorHandler != nil {
s.Config.ErrorHandler(s, &NATSError{
Subject: sub.Subject,
Description: err.Error(),
})
}
s.m.Lock()
if endpoint != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = err.Error()
}
s.m.Unlock()
s.Stop()
s.natsHandlers.asyncErr(c, sub, err)
})
} else {
s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) {
endpoint, match := s.matchSubscriptionSubject(sub.Subject)
if !match {
return
}
if s.Config.ErrorHandler != nil {
s.Config.ErrorHandler(s, &NATSError{
Subject: sub.Subject,
Description: err.Error(),
})
}
s.m.Lock()
if endpoint != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = err.Error()
}
s.m.Unlock()
s.Stop()
})
}
}
func unwrapConnectionEventCallbacks(nc *nats.Conn, handlers handlers) {
nc.SetClosedHandler(handlers.closed)
nc.SetErrorHandler(handlers.asyncErr)
}
func (s *service) matchSubscriptionSubject(subj string) (*Endpoint, bool) {
s.m.Lock()
defer s.m.Unlock()
for _, verbSub := range s.verbSubs {
if verbSub.Subject == subj {
return nil, true
}
}
for _, e := range s.endpoints {
if matchEndpointSubject(e.Subject, subj) {
return e, true
}
}
return nil, false
}
func matchEndpointSubject(endpointSubject, literalSubject string) bool {
subjectTokens := strings.Split(literalSubject, ".")
endpointTokens := strings.Split(endpointSubject, ".")
if len(endpointTokens) > len(subjectTokens) {
return false
}
for i, et := range endpointTokens {
if i == len(endpointTokens)-1 && et == ">" {
return true
}
if et != subjectTokens[i] && et != "*" {
return false
}
}
return true
}
// addVerbHandlers generates control handlers for a specific verb.
// Each request generates 3 subscriptions, one for the general verb
// affecting all services written with the framework, one that handles
// all services of a particular kind, and finally a specific service instance.
func (svc *service) addVerbHandlers(nc *nats.Conn, verb Verb, handler HandlerFunc) error {
name := fmt.Sprintf("%s-all", verb.String())
if err := svc.addInternalHandler(nc, verb, "", "", name, handler); err != nil {
return err
}
name = fmt.Sprintf("%s-kind", verb.String())
if err := svc.addInternalHandler(nc, verb, svc.Config.Name, "", name, handler); err != nil {
return err
}
return svc.addInternalHandler(nc, verb, svc.Config.Name, svc.id, verb.String(), handler)
}
// addInternalHandler registers a control subject handler.
func (s *service) addInternalHandler(nc *nats.Conn, verb Verb, kind, id, name string, handler HandlerFunc) error {
subj, err := ControlSubject(verb, kind, id)
if err != nil {
s.Stop()
return err
}
s.verbSubs[name], err = nc.Subscribe(subj, func(msg *nats.Msg) {
handler(&request{msg: msg})
})
if err != nil {
s.Stop()
return err
}
return nil
}
// reqHandler invokes the service request handler and modifies service stats
func (s *service) reqHandler(endpoint *Endpoint, req *request) {
start := time.Now()
endpoint.Handler.Handle(req)
s.m.Lock()
endpoint.stats.NumRequests++
endpoint.stats.ProcessingTime += time.Since(start)
avgProcessingTime := endpoint.stats.ProcessingTime.Nanoseconds() / int64(endpoint.stats.NumRequests)
endpoint.stats.AverageProcessingTime = time.Duration(avgProcessingTime)
if req.respondError != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = req.respondError.Error()
}
s.m.Unlock()
}
// Stop drains the endpoint subscriptions and marks the service as stopped.
func (s *service) Stop() error {
s.m.Lock()
defer s.m.Unlock()
if s.stopped {
return nil
}
for _, e := range s.endpoints {
if err := e.stop(); err != nil {
return err
}
}
var keys []string
for key, sub := range s.verbSubs {
keys = append(keys, key)
if err := sub.Drain(); err != nil {
return fmt.Errorf("draining subscription for subject %q: %w", sub.Subject, err)
}
}
for _, key := range keys {
delete(s.verbSubs, key)
}
unwrapConnectionEventCallbacks(s.nc, s.natsHandlers)
s.stopped = true
if s.DoneHandler != nil {
s.asyncDispatcher.push(func() { s.DoneHandler(s) })
s.asyncDispatcher.close()
}
return nil
}
func (s *service) serviceIdentity() ServiceIdentity {
return ServiceIdentity{
Name: s.Config.Name,
ID: s.id,
Version: s.Config.Version,
Metadata: s.Config.Metadata,
}
}
// Info returns information about the service
func (s *service) Info() Info {
endpoints := make([]EndpointInfo, 0, len(s.endpoints))
for _, e := range s.endpoints {
endpoints = append(endpoints, EndpointInfo{
Name: e.Name,
Subject: e.Subject,
Metadata: e.Metadata,
})
}
return Info{
ServiceIdentity: s.serviceIdentity(),
Type: InfoResponseType,
Description: s.Config.Description,
Endpoints: endpoints,
}
}
// Stats returns statistics for the service endpoint and all monitoring endpoints.
func (s *service) Stats() Stats {
s.m.Lock()
defer s.m.Unlock()
stats := Stats{
ServiceIdentity: s.serviceIdentity(),
Endpoints: make([]*EndpointStats, 0),
Type: StatsResponseType,
Started: s.started,
}
for _, endpoint := range s.endpoints {
endpointStats := &EndpointStats{
Name: endpoint.stats.Name,
Subject: endpoint.stats.Subject,
NumRequests: endpoint.stats.NumRequests,
NumErrors: endpoint.stats.NumErrors,
LastError: endpoint.stats.LastError,
ProcessingTime: endpoint.stats.ProcessingTime,
AverageProcessingTime: endpoint.stats.AverageProcessingTime,
}
if s.StatsHandler != nil {
data, _ := json.Marshal(s.StatsHandler(endpoint))
endpointStats.Data = data
}
stats.Endpoints = append(stats.Endpoints, endpointStats)
}
return stats
}
// Reset resets all statistics on a service instance.
func (s *service) Reset() {
s.m.Lock()
for _, endpoint := range s.endpoints {
endpoint.reset()
}
s.started = time.Now().UTC()
s.m.Unlock()
}
// Stopped informs whether [Stop] was executed on the service.
func (s *service) Stopped() bool {
s.m.Lock()
defer s.m.Unlock()
return s.stopped
}
func (e *NATSError) Error() string {
return fmt.Sprintf("%q: %s", e.Subject, e.Description)
}
func (g *group) AddEndpoint(name string, handler Handler, opts ...EndpointOpt) error {
var options endpointOpts
for _, opt := range opts {
if err := opt(&options); err != nil {
return err
}
}
subject := name
if options.subject != "" {
subject = options.subject
}
endpointSubject := fmt.Sprintf("%s.%s", g.prefix, subject)
if g.prefix == "" | {
endpointSubject = subject
} | conditional_block | |
service.go | `json:"name"`
// Endpoint is an optional endpoint configuration.
// More complex, multi-endpoint services can be configured using
// Service.AddGroup and Service.AddEndpoint methods.
Endpoint *EndpointConfig `json:"endpoint"`
// Version is a SemVer compatible version string.
Version string `json:"version"`
// Description of the service.
Description string `json:"description"`
// Metadata annotates the service
Metadata map[string]string `json:"metadata,omitempty"`
// StatsHandler is a user-defined custom function.
// used to calculate additional service stats.
StatsHandler StatsHandler
// DoneHandler is invoked when all service subscription are stopped.
DoneHandler DoneHandler
// ErrorHandler is invoked on any nats-related service error.
ErrorHandler ErrHandler
}
EndpointConfig struct {
// Subject on which the endpoint is registered.
Subject string
// Handler used by the endpoint.
Handler Handler
// Metadata annotates the service
Metadata map[string]string `json:"metadata,omitempty"`
}
// NATSError represents an error returned by a NATS Subscription.
// It contains a subject on which the subscription failed, so that
// it can be linked with a specific service endpoint.
NATSError struct {
Subject string
Description string
}
// service represents a configured NATS service.
// It should be created using [Add] in order to configure the appropriate NATS subscriptions
// for request handler and monitoring.
service struct {
// Config contains a configuration of the service
Config
m sync.Mutex
id string
endpoints []*Endpoint
verbSubs map[string]*nats.Subscription
started time.Time
nc *nats.Conn
natsHandlers handlers
stopped bool
asyncDispatcher asyncCallbacksHandler
}
handlers struct {
closed nats.ConnHandler
asyncErr nats.ErrHandler
}
asyncCallbacksHandler struct {
cbQueue chan func()
}
)
const (
// Queue Group name used across all services
QG = "q"
// APIPrefix is the root of all control subjects
APIPrefix = "$SRV"
)
// Service Error headers
const (
ErrorHeader = "Nats-Service-Error"
ErrorCodeHeader = "Nats-Service-Error-Code"
)
// Verbs being used to set up a specific control subject.
const (
PingVerb Verb = iota
StatsVerb
InfoVerb
)
const (
InfoResponseType = "io.nats.micro.v1.info_response"
PingResponseType = "io.nats.micro.v1.ping_response"
StatsResponseType = "io.nats.micro.v1.stats_response"
)
var (
// this regular expression is suggested regexp for semver validation: https://semver.org/
semVerRegexp = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`)
nameRegexp = regexp.MustCompile(`^[A-Za-z0-9\-_]+$`)
subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`)
)
// Common errors returned by the Service framework.
var (
// ErrConfigValidation is returned when service configuration is invalid
ErrConfigValidation = errors.New("validation")
// ErrVerbNotSupported is returned when invalid [Verb] is used (PING, INFO, STATS)
ErrVerbNotSupported = errors.New("unsupported verb")
// ErrServiceNameRequired is returned when attempting to generate control subject with ID but empty name
ErrServiceNameRequired = errors.New("service name is required to generate ID control subject")
)
func (s Verb) String() string {
switch s {
case PingVerb:
return "PING"
case StatsVerb:
return "STATS"
case InfoVerb:
return "INFO"
default:
return ""
}
}
// AddService adds a microservice.
// It will enable internal common services (PING, STATS and INFO).
// Request handlers have to be registered separately using Service.AddEndpoint.
// A service name, version and Endpoint configuration are required to add a service.
// AddService returns a [Service] interface, allowing service management.
// Each service is assigned a unique ID.
func AddService(nc *nats.Conn, config Config) (Service, error) {
if err := config.valid(); err != nil {
return nil, err
}
if config.Metadata == nil {
config.Metadata = map[string]string{}
}
id := nuid.Next()
svc := &service{
Config: config,
nc: nc,
id: id,
asyncDispatcher: asyncCallbacksHandler{
cbQueue: make(chan func(), 100),
},
verbSubs: make(map[string]*nats.Subscription),
endpoints: make([]*Endpoint, 0),
}
// Add connection event (closed, error) wrapper handlers. If the service has
// custom callbacks, the events are queued and invoked by the same
// goroutine, starting now.
go svc.asyncDispatcher.run()
svc.wrapConnectionEventCallbacks()
if config.Endpoint != nil {
opts := []EndpointOpt{WithEndpointSubject(config.Endpoint.Subject)}
if config.Endpoint.Metadata != nil {
opts = append(opts, WithEndpointMetadata(config.Endpoint.Metadata))
}
if err := svc.AddEndpoint("default", config.Endpoint.Handler, opts...); err != nil {
svc.asyncDispatcher.close()
return nil, err
}
}
// Setup internal subscriptions.
pingResponse := Ping{
ServiceIdentity: svc.serviceIdentity(),
Type: PingResponseType,
}
handleVerb := func(verb Verb, valuef func() any) func(req Request) {
return func(req Request) {
response, _ := json.Marshal(valuef())
if err := req.Respond(response); err != nil {
if err := req.Error("500", fmt.Sprintf("Error handling %s request: %s", verb, err), nil); err != nil && config.ErrorHandler != nil {
svc.asyncDispatcher.push(func() { config.ErrorHandler(svc, &NATSError{req.Subject(), err.Error()}) })
}
}
}
}
for verb, source := range map[Verb]func() any{
InfoVerb: func() any { return svc.Info() },
PingVerb: func() any { return pingResponse },
StatsVerb: func() any { return svc.Stats() },
} {
handler := handleVerb(verb, source)
if err := svc.addVerbHandlers(nc, verb, handler); err != nil {
svc.asyncDispatcher.close()
return nil, err
}
}
svc.started = time.Now().UTC()
return svc, nil
}
func (s *service) AddEndpoint(name string, handler Handler, opts ...EndpointOpt) error {
var options endpointOpts
for _, opt := range opts {
if err := opt(&options); err != nil {
return err
}
}
subject := name
if options.subject != "" {
subject = options.subject
}
return addEndpoint(s, name, subject, handler, options.metadata)
}
func addEndpoint(s *service, name, subject string, handler Handler, metadata map[string]string) error {
if !nameRegexp.MatchString(name) {
return fmt.Errorf("%w: invalid endpoint name", ErrConfigValidation)
}
if !subjectRegexp.MatchString(subject) {
return fmt.Errorf("%w: invalid endpoint subject", ErrConfigValidation)
}
endpoint := &Endpoint{
service: s,
EndpointConfig: EndpointConfig{
Subject: subject,
Handler: handler,
Metadata: metadata,
},
Name: name,
}
sub, err := s.nc.QueueSubscribe(
subject,
QG,
func(m *nats.Msg) {
s.reqHandler(endpoint, &request{msg: m})
},
)
if err != nil {
return err
}
endpoint.subscription = sub
s.endpoints = append(s.endpoints, endpoint)
endpoint.stats = EndpointStats{
Name: name,
Subject: subject,
}
return nil | service: s,
prefix: name,
}
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) run() {
for {
f := <-ac.cbQueue
if f == nil {
return
}
f()
}
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) push(f func()) {
ac.cbQueue <- f
}
func (ac *asyncCallbacksHandler) close() {
close(ac.cbQueue)
}
func (c *Config) valid() error {
if !nameRegexp.MatchString(c.Name) {
return fmt.Errorf("%w: service name: name should not be empty and | }
func (s *service) AddGroup(name string) Group {
return &group{ | random_line_split |
service.go | // this regular expression is suggested regexp for semver validation: https://semver.org/
semVerRegexp = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`)
nameRegexp = regexp.MustCompile(`^[A-Za-z0-9\-_]+$`)
subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`)
)
// Common errors returned by the Service framework.
var (
// ErrConfigValidation is returned when service configuration is invalid
ErrConfigValidation = errors.New("validation")
// ErrVerbNotSupported is returned when invalid [Verb] is used (PING, INFO, STATS)
ErrVerbNotSupported = errors.New("unsupported verb")
// ErrServiceNameRequired is returned when attempting to generate control subject with ID but empty name
ErrServiceNameRequired = errors.New("service name is required to generate ID control subject")
)
func (s Verb) String() string {
switch s {
case PingVerb:
return "PING"
case StatsVerb:
return "STATS"
case InfoVerb:
return "INFO"
default:
return ""
}
}
// AddService adds a microservice.
// It will enable internal common services (PING, STATS and INFO).
// Request handlers have to be registered separately using Service.AddEndpoint.
// A service name, version and Endpoint configuration are required to add a service.
// AddService returns a [Service] interface, allowing service management.
// Each service is assigned a unique ID.
func AddService(nc *nats.Conn, config Config) (Service, error) {
if err := config.valid(); err != nil {
return nil, err
}
if config.Metadata == nil {
config.Metadata = map[string]string{}
}
id := nuid.Next()
svc := &service{
Config: config,
nc: nc,
id: id,
asyncDispatcher: asyncCallbacksHandler{
cbQueue: make(chan func(), 100),
},
verbSubs: make(map[string]*nats.Subscription),
endpoints: make([]*Endpoint, 0),
}
// Add connection event (closed, error) wrapper handlers. If the service has
// custom callbacks, the events are queued and invoked by the same
// goroutine, starting now.
go svc.asyncDispatcher.run()
svc.wrapConnectionEventCallbacks()
if config.Endpoint != nil {
opts := []EndpointOpt{WithEndpointSubject(config.Endpoint.Subject)}
if config.Endpoint.Metadata != nil {
opts = append(opts, WithEndpointMetadata(config.Endpoint.Metadata))
}
if err := svc.AddEndpoint("default", config.Endpoint.Handler, opts...); err != nil {
svc.asyncDispatcher.close()
return nil, err
}
}
// Setup internal subscriptions.
pingResponse := Ping{
ServiceIdentity: svc.serviceIdentity(),
Type: PingResponseType,
}
handleVerb := func(verb Verb, valuef func() any) func(req Request) {
return func(req Request) {
response, _ := json.Marshal(valuef())
if err := req.Respond(response); err != nil {
if err := req.Error("500", fmt.Sprintf("Error handling %s request: %s", verb, err), nil); err != nil && config.ErrorHandler != nil {
svc.asyncDispatcher.push(func() { config.ErrorHandler(svc, &NATSError{req.Subject(), err.Error()}) })
}
}
}
}
for verb, source := range map[Verb]func() any{
InfoVerb: func() any { return svc.Info() },
PingVerb: func() any { return pingResponse },
StatsVerb: func() any { return svc.Stats() },
} {
handler := handleVerb(verb, source)
if err := svc.addVerbHandlers(nc, verb, handler); err != nil {
svc.asyncDispatcher.close()
return nil, err
}
}
svc.started = time.Now().UTC()
return svc, nil
}
func (s *service) AddEndpoint(name string, handler Handler, opts ...EndpointOpt) error {
var options endpointOpts
for _, opt := range opts {
if err := opt(&options); err != nil {
return err
}
}
subject := name
if options.subject != "" {
subject = options.subject
}
return addEndpoint(s, name, subject, handler, options.metadata)
}
func addEndpoint(s *service, name, subject string, handler Handler, metadata map[string]string) error {
if !nameRegexp.MatchString(name) {
return fmt.Errorf("%w: invalid endpoint name", ErrConfigValidation)
}
if !subjectRegexp.MatchString(subject) {
return fmt.Errorf("%w: invalid endpoint subject", ErrConfigValidation)
}
endpoint := &Endpoint{
service: s,
EndpointConfig: EndpointConfig{
Subject: subject,
Handler: handler,
Metadata: metadata,
},
Name: name,
}
sub, err := s.nc.QueueSubscribe(
subject,
QG,
func(m *nats.Msg) {
s.reqHandler(endpoint, &request{msg: m})
},
)
if err != nil {
return err
}
endpoint.subscription = sub
s.endpoints = append(s.endpoints, endpoint)
endpoint.stats = EndpointStats{
Name: name,
Subject: subject,
}
return nil
}
func (s *service) AddGroup(name string) Group {
return &group{
service: s,
prefix: name,
}
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) run() {
for {
f := <-ac.cbQueue
if f == nil {
return
}
f()
}
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) push(f func()) {
ac.cbQueue <- f
}
func (ac *asyncCallbacksHandler) close() {
close(ac.cbQueue)
}
func (c *Config) valid() error {
if !nameRegexp.MatchString(c.Name) {
return fmt.Errorf("%w: service name: name should not be empty and should consist of alphanumerical charactest, dashes and underscores", ErrConfigValidation)
}
if !semVerRegexp.MatchString(c.Version) {
return fmt.Errorf("%w: version: version should not be empty should match the SemVer format", ErrConfigValidation)
}
return nil
}
func (s *service) wrapConnectionEventCallbacks() {
s.m.Lock()
defer s.m.Unlock()
s.natsHandlers.closed = s.nc.ClosedHandler()
if s.natsHandlers.closed != nil {
s.nc.SetClosedHandler(func(c *nats.Conn) {
s.Stop()
s.natsHandlers.closed(c)
})
} else {
s.nc.SetClosedHandler(func(c *nats.Conn) {
s.Stop()
})
}
s.natsHandlers.asyncErr = s.nc.ErrorHandler()
if s.natsHandlers.asyncErr != nil {
s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) {
endpoint, match := s.matchSubscriptionSubject(sub.Subject)
if !match {
s.natsHandlers.asyncErr(c, sub, err)
return
}
if s.Config.ErrorHandler != nil {
s.Config.ErrorHandler(s, &NATSError{
Subject: sub.Subject,
Description: err.Error(),
})
}
s.m.Lock()
if endpoint != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = err.Error()
}
s.m.Unlock()
s.Stop()
s.natsHandlers.asyncErr(c, sub, err)
})
} else {
s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) {
endpoint, match := s.matchSubscriptionSubject(sub.Subject)
if !match {
return
}
if s.Config.ErrorHandler != nil {
s.Config.ErrorHandler(s, &NATSError{
Subject: sub.Subject,
Description: err.Error(),
})
}
s.m.Lock()
if endpoint != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = err.Error()
}
s.m.Unlock()
s.Stop()
})
}
}
func unwrapConnectionEventCallbacks(nc *nats.Conn, handlers handlers) {
nc.SetClosedHandler(handlers.closed)
nc.SetErrorHandler(handlers.asyncErr)
}
func (s *service) matchSubscriptionSubject(subj string) (*Endpoint, bool) {
s.m.Lock()
defer s.m.Unlock()
for _, verbSub := range s.verbSubs {
if verbSub.Subject == subj {
return nil, true
}
}
for _, e := range s.endpoints {
if matchEndpointSubject(e.Subject, subj) {
return e, true
}
}
return nil, false
}
func | matchEndpointSubject | identifier_name | |
service.go | 0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`)
nameRegexp = regexp.MustCompile(`^[A-Za-z0-9\-_]+$`)
subjectRegexp = regexp.MustCompile(`^[^ >]*[>]?$`)
)
// Common errors returned by the Service framework.
var (
// ErrConfigValidation is returned when service configuration is invalid
ErrConfigValidation = errors.New("validation")
// ErrVerbNotSupported is returned when invalid [Verb] is used (PING, INFO, STATS)
ErrVerbNotSupported = errors.New("unsupported verb")
// ErrServiceNameRequired is returned when attempting to generate control subject with ID but empty name
ErrServiceNameRequired = errors.New("service name is required to generate ID control subject")
)
func (s Verb) String() string {
switch s {
case PingVerb:
return "PING"
case StatsVerb:
return "STATS"
case InfoVerb:
return "INFO"
default:
return ""
}
}
// AddService adds a microservice.
// It will enable internal common services (PING, STATS and INFO).
// Request handlers have to be registered separately using Service.AddEndpoint.
// A service name, version and Endpoint configuration are required to add a service.
// AddService returns a [Service] interface, allowing service management.
// Each service is assigned a unique ID.
func AddService(nc *nats.Conn, config Config) (Service, error) {
if err := config.valid(); err != nil {
return nil, err
}
if config.Metadata == nil {
config.Metadata = map[string]string{}
}
id := nuid.Next()
svc := &service{
Config: config,
nc: nc,
id: id,
asyncDispatcher: asyncCallbacksHandler{
cbQueue: make(chan func(), 100),
},
verbSubs: make(map[string]*nats.Subscription),
endpoints: make([]*Endpoint, 0),
}
// Add connection event (closed, error) wrapper handlers. If the service has
// custom callbacks, the events are queued and invoked by the same
// goroutine, starting now.
go svc.asyncDispatcher.run()
svc.wrapConnectionEventCallbacks()
if config.Endpoint != nil {
opts := []EndpointOpt{WithEndpointSubject(config.Endpoint.Subject)}
if config.Endpoint.Metadata != nil {
opts = append(opts, WithEndpointMetadata(config.Endpoint.Metadata))
}
if err := svc.AddEndpoint("default", config.Endpoint.Handler, opts...); err != nil {
svc.asyncDispatcher.close()
return nil, err
}
}
// Setup internal subscriptions.
pingResponse := Ping{
ServiceIdentity: svc.serviceIdentity(),
Type: PingResponseType,
}
handleVerb := func(verb Verb, valuef func() any) func(req Request) {
return func(req Request) {
response, _ := json.Marshal(valuef())
if err := req.Respond(response); err != nil {
if err := req.Error("500", fmt.Sprintf("Error handling %s request: %s", verb, err), nil); err != nil && config.ErrorHandler != nil {
svc.asyncDispatcher.push(func() { config.ErrorHandler(svc, &NATSError{req.Subject(), err.Error()}) })
}
}
}
}
for verb, source := range map[Verb]func() any{
InfoVerb: func() any { return svc.Info() },
PingVerb: func() any { return pingResponse },
StatsVerb: func() any { return svc.Stats() },
} {
handler := handleVerb(verb, source)
if err := svc.addVerbHandlers(nc, verb, handler); err != nil {
svc.asyncDispatcher.close()
return nil, err
}
}
svc.started = time.Now().UTC()
return svc, nil
}
func (s *service) AddEndpoint(name string, handler Handler, opts ...EndpointOpt) error {
var options endpointOpts
for _, opt := range opts {
if err := opt(&options); err != nil {
return err
}
}
subject := name
if options.subject != "" {
subject = options.subject
}
return addEndpoint(s, name, subject, handler, options.metadata)
}
func addEndpoint(s *service, name, subject string, handler Handler, metadata map[string]string) error {
if !nameRegexp.MatchString(name) {
return fmt.Errorf("%w: invalid endpoint name", ErrConfigValidation)
}
if !subjectRegexp.MatchString(subject) {
return fmt.Errorf("%w: invalid endpoint subject", ErrConfigValidation)
}
endpoint := &Endpoint{
service: s,
EndpointConfig: EndpointConfig{
Subject: subject,
Handler: handler,
Metadata: metadata,
},
Name: name,
}
sub, err := s.nc.QueueSubscribe(
subject,
QG,
func(m *nats.Msg) {
s.reqHandler(endpoint, &request{msg: m})
},
)
if err != nil {
return err
}
endpoint.subscription = sub
s.endpoints = append(s.endpoints, endpoint)
endpoint.stats = EndpointStats{
Name: name,
Subject: subject,
}
return nil
}
func (s *service) AddGroup(name string) Group {
return &group{
service: s,
prefix: name,
}
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) run() {
for {
f := <-ac.cbQueue
if f == nil {
return
}
f()
}
}
// dispatch is responsible for calling any async callbacks
func (ac *asyncCallbacksHandler) push(f func()) {
ac.cbQueue <- f
}
func (ac *asyncCallbacksHandler) close() {
close(ac.cbQueue)
}
func (c *Config) valid() error {
if !nameRegexp.MatchString(c.Name) {
return fmt.Errorf("%w: service name: name should not be empty and should consist of alphanumerical charactest, dashes and underscores", ErrConfigValidation)
}
if !semVerRegexp.MatchString(c.Version) {
return fmt.Errorf("%w: version: version should not be empty should match the SemVer format", ErrConfigValidation)
}
return nil
}
func (s *service) wrapConnectionEventCallbacks() {
s.m.Lock()
defer s.m.Unlock()
s.natsHandlers.closed = s.nc.ClosedHandler()
if s.natsHandlers.closed != nil {
s.nc.SetClosedHandler(func(c *nats.Conn) {
s.Stop()
s.natsHandlers.closed(c)
})
} else {
s.nc.SetClosedHandler(func(c *nats.Conn) {
s.Stop()
})
}
s.natsHandlers.asyncErr = s.nc.ErrorHandler()
if s.natsHandlers.asyncErr != nil {
s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) {
endpoint, match := s.matchSubscriptionSubject(sub.Subject)
if !match {
s.natsHandlers.asyncErr(c, sub, err)
return
}
if s.Config.ErrorHandler != nil {
s.Config.ErrorHandler(s, &NATSError{
Subject: sub.Subject,
Description: err.Error(),
})
}
s.m.Lock()
if endpoint != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = err.Error()
}
s.m.Unlock()
s.Stop()
s.natsHandlers.asyncErr(c, sub, err)
})
} else {
s.nc.SetErrorHandler(func(c *nats.Conn, sub *nats.Subscription, err error) {
endpoint, match := s.matchSubscriptionSubject(sub.Subject)
if !match {
return
}
if s.Config.ErrorHandler != nil {
s.Config.ErrorHandler(s, &NATSError{
Subject: sub.Subject,
Description: err.Error(),
})
}
s.m.Lock()
if endpoint != nil {
endpoint.stats.NumErrors++
endpoint.stats.LastError = err.Error()
}
s.m.Unlock()
s.Stop()
})
}
}
func unwrapConnectionEventCallbacks(nc *nats.Conn, handlers handlers) {
nc.SetClosedHandler(handlers.closed)
nc.SetErrorHandler(handlers.asyncErr)
}
func (s *service) matchSubscriptionSubject(subj string) (*Endpoint, bool) {
s.m.Lock()
defer s.m.Unlock()
for _, verbSub := range s.verbSubs {
if verbSub.Subject == subj {
return nil, true
}
}
for _, e := range s.endpoints {
if matchEndpointSubject(e.Subject, subj) {
return e, true
}
}
return nil, false
}
func matchEndpointSubject(endpointSubject, literalSubject string) bool | {
subjectTokens := strings.Split(literalSubject, ".")
endpointTokens := strings.Split(endpointSubject, ".")
if len(endpointTokens) > len(subjectTokens) {
return false
}
for i, et := range endpointTokens {
if i == len(endpointTokens)-1 && et == ">" {
return true
}
if et != subjectTokens[i] && et != "*" {
return false
}
}
return true
} | identifier_body | |
game_tree.rs | at that point in time.
/// Uses lazy evaluation to avoid storing the entire data structure
/// in memory. See the LazyGameTree struct for more info.
///
/// Note that there is no case when a player is stuck; we simply
/// skip their turn if they have no moves and move
/// to the next Turn state.
#[derive(Debug)]
pub enum GameTree {
Turn { state: GameState, valid_moves: HashMap<Move, LazyGameTree> },
End(GameState),
}
impl GameTree {
/// Initialize a GameTree from the given initial GameState.
/// The given state does not have to be the start of a game -
/// it is allowed to be any valid game state. It is referred to
/// as the initial state because the generated tree will start from
/// that state with links to each potential subsequent state, but
/// not any previous states.
pub fn new(initial_state: &GameState) -> GameTree {
assert!(initial_state.all_penguins_are_placed(), "{:?}", initial_state);
let valid_moves = initial_state.get_valid_moves();
if valid_moves.is_empty() {
GameTree::End(initial_state.clone())
} else {
let valid_moves = valid_moves.into_iter().map(|move_| {
let lazy_game = LazyGameTree::from_move(&move_, initial_state);
(move_, lazy_game)
}).collect();
GameTree::Turn {
state: initial_state.clone(),
valid_moves,
}
}
}
/// Returns a shared reference to the GameState of the current node of the GameTree
pub fn | (&self) -> &GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns a mutable reference to the GameState of the current node of the GameTree
pub fn get_state_mut(&mut self) -> &mut GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns the GameState of the current node of the GameTree
pub fn take_state(self) -> GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn get_game_after_move(&mut self, move_: Move) -> Option<&mut GameTree> {
match self {
GameTree::Turn { valid_moves, .. } => {
valid_moves.get_mut(&move_).map(|lazy_game| lazy_game.get_evaluated())
},
GameTree::End(_) => None,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn take_game_after_move(self, move_: Move) -> Option<GameTree> {
match self {
GameTree::Turn { mut valid_moves, .. } => {
valid_moves.remove(&move_).map(|lazy_game| lazy_game.evaluate())
},
GameTree::End(_) => None,
}
}
/// Applies a function to the GameTree for every valid move, returning
/// a HashMap of the same moves mapped to their new results
pub fn map<T, F>(&mut self, mut f: F) -> HashMap<Move, T>
where F: FnMut(&mut GameTree) -> T
{
match self {
GameTree::Turn { valid_moves, .. } => {
valid_moves.iter_mut().map(|(move_, lazy_game)| {
let game = lazy_game.get_evaluated();
(move_.clone(), f(game))
}).collect()
},
GameTree::End(_) => HashMap::new(),
}
}
pub fn is_game_over(&self) -> bool {
match self {
GameTree::Turn { .. } => false,
GameTree::End(_) => true,
}
}
}
/// A LazyGameTree is either an already evaluted GameTree or
/// is an Unevaluated thunk that can be evaluated to return a GameTree.
/// Since Games are stored as recursive trees in memory keeping
/// the branches of each GameTree::Turn as LazyGameTree::Unevaluated saves
/// us from allocating an exponential amount of memory for every
/// possible GameState.
pub enum LazyGameTree {
Evaluated(GameTree),
Unevaluated(Box<dyn FnMut() -> GameTree>),
}
impl LazyGameTree {
/// Retrieves the GameTree from this LazyGameTree,
/// evaluating this LazyGameTree if it hasn't already been
pub fn get_evaluated(&mut self) -> &mut GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(thunk) => {
let game = thunk();
*self = LazyGameTree::Evaluated(game);
self.get_evaluated()
},
}
}
pub fn evaluate(self) -> GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(mut thunk) => thunk(),
}
}
/// Create a Unevaluated LazyGameTree from the given state
/// and the move to take to advance that state. The passed in
/// move must be valid for the given game state.
fn from_move(move_: &Move, state: &GameState) -> LazyGameTree {
let mut state = state.clone();
let move_ = move_.clone();
LazyGameTree::Unevaluated(Box::new(move || {
state.move_avatar_for_current_player(move_)
.expect(&format!("Invalid move for the given GameState passed to LazyGameTree::from_move.\
\nMove: {:?}\nGameState: {:?}", move_, state));
GameTree::new(&state)
}))
}
}
impl std::fmt::Debug for LazyGameTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
LazyGameTree::Evaluated(game) => write!(f, "Evaluated({:?})", game),
LazyGameTree::Unevaluated(_) => write!(f, "Unevaluated(_)"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::strategy::tests::take_zigzag_placement;
// Starts a game with a 3 row, 5 column board and all penguins placed.
fn start_game() -> GameTree {
let mut state = GameState::with_default_board(5, 3, 2);
while !state.all_penguins_are_placed() {
take_zigzag_placement(&mut state);
}
GameTree::new(&state)
}
fn get_expected_valid_moves(game: &GameTree) -> Vec<Move> {
let mut expected_valid_moves = vec![];
let state = game.get_state();
let occupied_tiles = state.get_occupied_tiles();
for penguin in state.current_player().penguins.iter() {
let current_tile = state.get_tile(penguin.tile_id.unwrap()).unwrap();
for tile in current_tile.all_reachable_tiles(&state.board, &occupied_tiles) {
expected_valid_moves.push(Move::new(current_tile.tile_id, tile.tile_id))
}
}
expected_valid_moves
}
#[test]
fn test_new() {
// valid_moves generated correctly
// - have expected moves, check if same as generated
// starting gamestate is same as one passed to new
let game = start_game();
let mut valid_moves = game.get_state().get_valid_moves();
let mut expected_valid_moves = get_expected_valid_moves(&game);
expected_valid_moves.sort();
valid_moves.sort();
assert_eq!(expected_valid_moves, valid_moves);
}
#[test]
fn is_initially_unevaluated() {
let game = start_game();
match game {
GameTree::Turn { valid_moves, .. } => {
// Assert all the branches to the tree are initially Unevaluated
assert!(valid_moves.iter().all(|(_, lazy_game)| {
match lazy_game {
LazyGameTree::Evaluated(_) => false,
LazyGameTree::Unevaluated(_) => true,
}
}));
},
GameTree::End(_) => unreachable!("start_game should never return a finished game"),
}
}
#[test]
fn test_get_game_after_move() {
let mut initial_game = start_game();
// record initial moves and the identity of the player whose turn it is
let mut initial_valid_moves = initial_game.get_state().get_valid_moves();
let initial_turn = initial_game.get_state().current_turn;
let game_after_move = initial_game.get_game_after_move(initial_valid_moves[0]).unwrap(); // make a move
// record new moves and the identity of the player whose turn it now is
let mut valid_moves = game_after_move.get_state().get_valid_moves();
let current_turn = game_after_move.get_state().current_turn;
let mut expected_valid_moves = get_expected_valid_moves(&game_after_move);
initial_valid_moves.sort();
valid_moves.sort();
expected_valid_moves.sort();
assert_ne!( | get_state | identifier_name |
game_tree.rs | at that point in time.
/// Uses lazy evaluation to avoid storing the entire data structure
/// in memory. See the LazyGameTree struct for more info.
///
/// Note that there is no case when a player is stuck; we simply
/// skip their turn if they have no moves and move
/// to the next Turn state.
#[derive(Debug)]
pub enum GameTree {
Turn { state: GameState, valid_moves: HashMap<Move, LazyGameTree> },
End(GameState),
}
impl GameTree {
/// Initialize a GameTree from the given initial GameState.
/// The given state does not have to be the start of a game -
/// it is allowed to be any valid game state. It is referred to
/// as the initial state because the generated tree will start from
/// that state with links to each potential subsequent state, but
/// not any previous states.
pub fn new(initial_state: &GameState) -> GameTree {
assert!(initial_state.all_penguins_are_placed(), "{:?}", initial_state);
let valid_moves = initial_state.get_valid_moves();
if valid_moves.is_empty() {
GameTree::End(initial_state.clone())
} else {
let valid_moves = valid_moves.into_iter().map(|move_| {
let lazy_game = LazyGameTree::from_move(&move_, initial_state);
(move_, lazy_game)
}).collect();
GameTree::Turn {
state: initial_state.clone(),
valid_moves,
}
}
}
/// Returns a shared reference to the GameState of the current node of the GameTree
pub fn get_state(&self) -> &GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns a mutable reference to the GameState of the current node of the GameTree
pub fn get_state_mut(&mut self) -> &mut GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns the GameState of the current node of the GameTree
pub fn take_state(self) -> GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move. | GameTree::Turn { valid_moves, .. } => {
valid_moves.get_mut(&move_).map(|lazy_game| lazy_game.get_evaluated())
},
GameTree::End(_) => None,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn take_game_after_move(self, move_: Move) -> Option<GameTree> {
match self {
GameTree::Turn { mut valid_moves, .. } => {
valid_moves.remove(&move_).map(|lazy_game| lazy_game.evaluate())
},
GameTree::End(_) => None,
}
}
/// Applies a function to the GameTree for every valid move, returning
/// a HashMap of the same moves mapped to their new results
pub fn map<T, F>(&mut self, mut f: F) -> HashMap<Move, T>
where F: FnMut(&mut GameTree) -> T
{
match self {
GameTree::Turn { valid_moves, .. } => {
valid_moves.iter_mut().map(|(move_, lazy_game)| {
let game = lazy_game.get_evaluated();
(move_.clone(), f(game))
}).collect()
},
GameTree::End(_) => HashMap::new(),
}
}
pub fn is_game_over(&self) -> bool {
match self {
GameTree::Turn { .. } => false,
GameTree::End(_) => true,
}
}
}
/// A LazyGameTree is either an already evaluted GameTree or
/// is an Unevaluated thunk that can be evaluated to return a GameTree.
/// Since Games are stored as recursive trees in memory keeping
/// the branches of each GameTree::Turn as LazyGameTree::Unevaluated saves
/// us from allocating an exponential amount of memory for every
/// possible GameState.
pub enum LazyGameTree {
Evaluated(GameTree),
Unevaluated(Box<dyn FnMut() -> GameTree>),
}
impl LazyGameTree {
/// Retrieves the GameTree from this LazyGameTree,
/// evaluating this LazyGameTree if it hasn't already been
pub fn get_evaluated(&mut self) -> &mut GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(thunk) => {
let game = thunk();
*self = LazyGameTree::Evaluated(game);
self.get_evaluated()
},
}
}
pub fn evaluate(self) -> GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(mut thunk) => thunk(),
}
}
/// Create a Unevaluated LazyGameTree from the given state
/// and the move to take to advance that state. The passed in
/// move must be valid for the given game state.
fn from_move(move_: &Move, state: &GameState) -> LazyGameTree {
let mut state = state.clone();
let move_ = move_.clone();
LazyGameTree::Unevaluated(Box::new(move || {
state.move_avatar_for_current_player(move_)
.expect(&format!("Invalid move for the given GameState passed to LazyGameTree::from_move.\
\nMove: {:?}\nGameState: {:?}", move_, state));
GameTree::new(&state)
}))
}
}
impl std::fmt::Debug for LazyGameTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
LazyGameTree::Evaluated(game) => write!(f, "Evaluated({:?})", game),
LazyGameTree::Unevaluated(_) => write!(f, "Unevaluated(_)"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::strategy::tests::take_zigzag_placement;
// Starts a game with a 3 row, 5 column board and all penguins placed.
fn start_game() -> GameTree {
let mut state = GameState::with_default_board(5, 3, 2);
while !state.all_penguins_are_placed() {
take_zigzag_placement(&mut state);
}
GameTree::new(&state)
}
fn get_expected_valid_moves(game: &GameTree) -> Vec<Move> {
let mut expected_valid_moves = vec![];
let state = game.get_state();
let occupied_tiles = state.get_occupied_tiles();
for penguin in state.current_player().penguins.iter() {
let current_tile = state.get_tile(penguin.tile_id.unwrap()).unwrap();
for tile in current_tile.all_reachable_tiles(&state.board, &occupied_tiles) {
expected_valid_moves.push(Move::new(current_tile.tile_id, tile.tile_id))
}
}
expected_valid_moves
}
#[test]
fn test_new() {
// valid_moves generated correctly
// - have expected moves, check if same as generated
// starting gamestate is same as one passed to new
let game = start_game();
let mut valid_moves = game.get_state().get_valid_moves();
let mut expected_valid_moves = get_expected_valid_moves(&game);
expected_valid_moves.sort();
valid_moves.sort();
assert_eq!(expected_valid_moves, valid_moves);
}
#[test]
fn is_initially_unevaluated() {
let game = start_game();
match game {
GameTree::Turn { valid_moves, .. } => {
// Assert all the branches to the tree are initially Unevaluated
assert!(valid_moves.iter().all(|(_, lazy_game)| {
match lazy_game {
LazyGameTree::Evaluated(_) => false,
LazyGameTree::Unevaluated(_) => true,
}
}));
},
GameTree::End(_) => unreachable!("start_game should never return a finished game"),
}
}
#[test]
fn test_get_game_after_move() {
let mut initial_game = start_game();
// record initial moves and the identity of the player whose turn it is
let mut initial_valid_moves = initial_game.get_state().get_valid_moves();
let initial_turn = initial_game.get_state().current_turn;
let game_after_move = initial_game.get_game_after_move(initial_valid_moves[0]).unwrap(); // make a move
// record new moves and the identity of the player whose turn it now is
let mut valid_moves = game_after_move.get_state().get_valid_moves();
let current_turn = game_after_move.get_state().current_turn;
let mut expected_valid_moves = get_expected_valid_moves(&game_after_move);
initial_valid_moves.sort();
valid_moves.sort();
expected_valid_moves.sort();
assert_ne!(initial | /// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn get_game_after_move(&mut self, move_: Move) -> Option<&mut GameTree> {
match self { | random_line_split |
game_tree.rs | at that point in time.
/// Uses lazy evaluation to avoid storing the entire data structure
/// in memory. See the LazyGameTree struct for more info.
///
/// Note that there is no case when a player is stuck; we simply
/// skip their turn if they have no moves and move
/// to the next Turn state.
#[derive(Debug)]
pub enum GameTree {
Turn { state: GameState, valid_moves: HashMap<Move, LazyGameTree> },
End(GameState),
}
impl GameTree {
/// Initialize a GameTree from the given initial GameState.
/// The given state does not have to be the start of a game -
/// it is allowed to be any valid game state. It is referred to
/// as the initial state because the generated tree will start from
/// that state with links to each potential subsequent state, but
/// not any previous states.
pub fn new(initial_state: &GameState) -> GameTree {
assert!(initial_state.all_penguins_are_placed(), "{:?}", initial_state);
let valid_moves = initial_state.get_valid_moves();
if valid_moves.is_empty() {
GameTree::End(initial_state.clone())
} else {
let valid_moves = valid_moves.into_iter().map(|move_| {
let lazy_game = LazyGameTree::from_move(&move_, initial_state);
(move_, lazy_game)
}).collect();
GameTree::Turn {
state: initial_state.clone(),
valid_moves,
}
}
}
/// Returns a shared reference to the GameState of the current node of the GameTree
pub fn get_state(&self) -> &GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns a mutable reference to the GameState of the current node of the GameTree
pub fn get_state_mut(&mut self) -> &mut GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns the GameState of the current node of the GameTree
pub fn take_state(self) -> GameState {
match self {
GameTree::Turn { state, .. } => state,
GameTree::End(state) => state,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn get_game_after_move(&mut self, move_: Move) -> Option<&mut GameTree> {
match self {
GameTree::Turn { valid_moves, .. } => {
valid_moves.get_mut(&move_).map(|lazy_game| lazy_game.get_evaluated())
},
GameTree::End(_) => None,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn take_game_after_move(self, move_: Move) -> Option<GameTree> {
match self {
GameTree::Turn { mut valid_moves, .. } => {
valid_moves.remove(&move_).map(|lazy_game| lazy_game.evaluate())
},
GameTree::End(_) => None,
}
}
/// Applies a function to the GameTree for every valid move, returning
/// a HashMap of the same moves mapped to their new results
pub fn map<T, F>(&mut self, mut f: F) -> HashMap<Move, T>
where F: FnMut(&mut GameTree) -> T
{
match self {
GameTree::Turn { valid_moves, .. } => {
valid_moves.iter_mut().map(|(move_, lazy_game)| {
let game = lazy_game.get_evaluated();
(move_.clone(), f(game))
}).collect()
},
GameTree::End(_) => HashMap::new(),
}
}
pub fn is_game_over(&self) -> bool {
match self {
GameTree::Turn { .. } => false,
GameTree::End(_) => true,
}
}
}
/// A LazyGameTree is either an already evaluted GameTree or
/// is an Unevaluated thunk that can be evaluated to return a GameTree.
/// Since Games are stored as recursive trees in memory keeping
/// the branches of each GameTree::Turn as LazyGameTree::Unevaluated saves
/// us from allocating an exponential amount of memory for every
/// possible GameState.
pub enum LazyGameTree {
Evaluated(GameTree),
Unevaluated(Box<dyn FnMut() -> GameTree>),
}
impl LazyGameTree {
/// Retrieves the GameTree from this LazyGameTree,
/// evaluating this LazyGameTree if it hasn't already been
pub fn get_evaluated(&mut self) -> &mut GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(thunk) => {
let game = thunk();
*self = LazyGameTree::Evaluated(game);
self.get_evaluated()
},
}
}
pub fn evaluate(self) -> GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(mut thunk) => thunk(),
}
}
/// Create a Unevaluated LazyGameTree from the given state
/// and the move to take to advance that state. The passed in
/// move must be valid for the given game state.
fn from_move(move_: &Move, state: &GameState) -> LazyGameTree {
let mut state = state.clone();
let move_ = move_.clone();
LazyGameTree::Unevaluated(Box::new(move || {
state.move_avatar_for_current_player(move_)
.expect(&format!("Invalid move for the given GameState passed to LazyGameTree::from_move.\
\nMove: {:?}\nGameState: {:?}", move_, state));
GameTree::new(&state)
}))
}
}
impl std::fmt::Debug for LazyGameTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
LazyGameTree::Evaluated(game) => write!(f, "Evaluated({:?})", game),
LazyGameTree::Unevaluated(_) => write!(f, "Unevaluated(_)"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::strategy::tests::take_zigzag_placement;
// Starts a game with a 3 row, 5 column board and all penguins placed.
fn start_game() -> GameTree {
let mut state = GameState::with_default_board(5, 3, 2);
while !state.all_penguins_are_placed() {
take_zigzag_placement(&mut state);
}
GameTree::new(&state)
}
fn get_expected_valid_moves(game: &GameTree) -> Vec<Move> |
#[test]
fn test_new() {
// valid_moves generated correctly
// - have expected moves, check if same as generated
// starting gamestate is same as one passed to new
let game = start_game();
let mut valid_moves = game.get_state().get_valid_moves();
let mut expected_valid_moves = get_expected_valid_moves(&game);
expected_valid_moves.sort();
valid_moves.sort();
assert_eq!(expected_valid_moves, valid_moves);
}
#[test]
fn is_initially_unevaluated() {
let game = start_game();
match game {
GameTree::Turn { valid_moves, .. } => {
// Assert all the branches to the tree are initially Unevaluated
assert!(valid_moves.iter().all(|(_, lazy_game)| {
match lazy_game {
LazyGameTree::Evaluated(_) => false,
LazyGameTree::Unevaluated(_) => true,
}
}));
},
GameTree::End(_) => unreachable!("start_game should never return a finished game"),
}
}
#[test]
fn test_get_game_after_move() {
let mut initial_game = start_game();
// record initial moves and the identity of the player whose turn it is
let mut initial_valid_moves = initial_game.get_state().get_valid_moves();
let initial_turn = initial_game.get_state().current_turn;
let game_after_move = initial_game.get_game_after_move(initial_valid_moves[0]).unwrap(); // make a move
// record new moves and the identity of the player whose turn it now is
let mut valid_moves = game_after_move.get_state().get_valid_moves();
let current_turn = game_after_move.get_state().current_turn;
let mut expected_valid_moves = get_expected_valid_moves(&game_after_move);
initial_valid_moves.sort();
valid_moves.sort();
expected_valid_moves.sort();
assert_ne | {
let mut expected_valid_moves = vec![];
let state = game.get_state();
let occupied_tiles = state.get_occupied_tiles();
for penguin in state.current_player().penguins.iter() {
let current_tile = state.get_tile(penguin.tile_id.unwrap()).unwrap();
for tile in current_tile.all_reachable_tiles(&state.board, &occupied_tiles) {
expected_valid_moves.push(Move::new(current_tile.tile_id, tile.tile_id))
}
}
expected_valid_moves
} | identifier_body |
fundvalue.py | 'fid TEXT,'
'date TEXT,'
'asset INTEGER,' # Data_assetAllocation or Data_fluctuationScale
'share INTEGER,'
'holder_company REAL,' # %
'holder_individual REAL,' # %
'holder_internal REAL,' # %
'stock_ratio REAL,' # %
'bond_ratio REAL,' # %
'cash_ratio REAL,' # %
'buy INTEGER,'
'sell INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_stockshare ('
'fid TEXT,'
'date TEXT,'
'share REAL,' # %
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_history ('
'fid TEXT,'
'date TEXT,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益
'totalval INTEGER,' # * 10000, 货币基金为7日年化
'incratepct REAL,'
'dividend TEXT,'
'divcash INTEGER,' # * 10000
'divval INTEGER,' # * 10000
'buystatus TEXT,'
'sellstatus TEXT,'
'PRIMARY KEY (fid, date)'
')')
self.db.commit()
def fund_list(self):
if self.db.execute("SELECT 1 FROM funds").fetchone():
return
req = self.session.get('http://fund.eastmoney.com/js/fundcode_search.js')
req.raise_for_status()
match = re_var.match(req.text)
if not match:
raise ValueError("can't parse fundcode_search.js: " + req.text)
d = json.loads(match.group(2))
cur = self.db.cursor()
cur.executemany("INSERT OR IGNORE INTO funds (fid, name, type)"
" VALUES (?,?,?)", map(operator.itemgetter(0, 2, 3), d))
self.db.commit()
def fund_info(self, fundid):
if self.db.execute("SELECT 1 FROM funds WHERE fid=? AND updated=?", (
fundid, time.strftime('%Y-%m-%d'))).fetchone():
return
pageurl = 'http://fund.eastmoney.com/%s.html' % fundid
req = self.session.get(pageurl)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.content, 'lxml')
infotds = soup.find('div', class_='infoOfFund').find_all('td')
dl02 = soup.find('dl', class_='dataItem02')
updatedtxt = dl02.dt.p.contents[-1].strip('()')
cfreturn = lambda s: float(s.rstrip('%')) if s != '--' else None
d_funds = {
'since': infotds[3].contents[-1][1:],
'company': infotds[4].a.string,
'company_id': infotds[4].a['href'].rsplit('/', 1)[1].split('.')[0],
'star': cint(infotds[5].div['class'][0][4:]),
'return_3y': cfreturn(dl02.contents[-1].find(
'span', class_='ui-num').string),
'return_all': cfreturn(soup.find('dl', class_='dataItem03'
).contents[-1].find('span', class_='ui-num').string),
}
if not infotds[0].contents[-1].name:
d_funds['risk'] = infotds[0].contents[-1][5:]
req = self.session.get(
'http://fund.eastmoney.com/pingzhongdata/%s.js?v=%s' % (
fundid, time.strftime('%Y%m%d%H%M%S')), headers={"Referer": pageurl})
req.raise_for_status()
js = parse_jsvars(req.text, 'ignore', parse_float=decimal.Decimal)
if js['ishb']:
d_funds['updated'] = date_year(updatedtxt)
d_funds['unitval'] = dec2int(
soup.find('dl', class_='dataItem01').dd.span.string)
d_funds['totalval'] = dec2int(dl02.dd.span.string.rstrip('%'))
else:
d_funds['updated'] = updatedtxt
d_funds['unitval'] = dec2int(dl02.dd.span.string)
d_funds['totalval'] = dec2int(
soup.find('dl', class_='dataItem03').dd.span.string)
d_funds['stockcodes'] = ','.join(js['stockCodes']) or None
d_funds['bondcodes'] = js['zqCodes'] or None
d_funds['name'] = js['fS_name']
d_funds['rate'] = dec2int100(js['fund_sourceRate'])
d_funds['rate_em'] = dec2int100(js['fund_Rate'])
d_funds['minval'] = dec2int100(js['fund_minsg'])
d_funds['return_1m'] = cfloat(js['syl_1y'])
d_funds['return_3m'] = cfloat(js['syl_3y'])
d_funds['return_6m'] = cfloat(js['syl_6y'])
d_funds['return_1y'] = cfloat(js['syl_1n'])
js_perf = js.get('Data_performanceEvaluation')
if js_perf:
if js_perf['avr'] != '暂无数据':
d_funds['perf_average'] = cfloat(js_perf['avr'])
if js_perf['data']:
d_funds['perf_choice'] = cfloat(js_perf['data'][0])
d_funds['perf_return'] = cfloat(js_perf['data'][1])
d_funds['perf_risk'] = cfloat(js_perf['data'][2])
d_funds['perf_stable'] = cfloat(js_perf['data'][3])
d_funds['perf_time'] = cfloat(js_perf['data'][4])
cur = self.db.cursor()
update_partial(cur, 'funds', {'fid': fundid}, d_funds)
cur.execute("DELETE FROM fund_managers WHERE fid=?", (fundid,))
for manager in js['Data_currentFundManager']:
managerid = int(manager['id'])
d_manager = {
'name': manager['name'],
'star': cint(manager['star']),
'updated': manager['power']['jzrq'],
'worktime': parse_worktime(manager['workTime']),
'pic': manager['pic']
}
if manager['power']['avr'] != '暂无数据':
d_manager['perf_average'] = cfloat(manager['power']['avr'])
if manager['power']['data']:
d_manager['perf_experience'] = cfloat(manager['power']['data'][0])
d_manager['perf_return'] = cfloat(manager['power']['data'][1])
d_manager['perf_risk'] = cfloat(manager['power']['data'][2])
d_manager['perf_stable'] = cfloat(manager['power']['data'][3])
d_manager['perf_time'] = cfloat(manager['power']['data'][4])
if manager.get('fundSize'):
d_manager['fund_num'] = int(
manager['fundSize'].split('(')[1].split('只')[0])
d_manager['fund_asset'] = int(decimal.Decimal(
manager['fundSize'].split('亿')[0]).scaleb(8))
with contextlib.suppress(KeyError, IndexError):
d_manager['profit'] = cfloat(manager['profit']['series'
][0]['data'][0]['y'])
with contextlib.suppress(KeyError, IndexError):
d_manager['profit_average'] = cfloat(manager['profit']['series'
][0]['data'][1]['y'])
with contextlib.suppress(KeyError, IndexError):
d_manager['profit_hs300'] = cfloat(manager['profit']['series'
][0]['data'][2]['y'])
update_partial(cur, 'managers', {'id': managerid}, d_manager)
cur.execute("REPLACE INTO fund_managers VALUES (?,?,?)", (
fundid, managerid, d_funds['updated']
))
for row in js['Data_rateInSimilarType']:
cur.execute("INSERT OR IGNORE INTO fund_simrank VALUES (?,?,?,?)", (
fundid, ms2date(row['x']), cint(row['y']), cint(row['sc'])
))
d_finfo = collections.defaultdict(dict)
jsgraph = js.get('Data_fundSharesPositions', [])
for row in jsgraph:
cur.execute("INSERT OR IGNORE INTO fund_stockshare VALUES (?,?,?)",
(fundid, ms2date(row[0]), cfloat(row[1])))
jsgraph = js['Data_fluctuationScale']
for k, row in zip(jsgraph['categories'], jsgraph['series']):
d_finfo[k]['asset'] = int(row['y'].scaleb(8))
jsgraph = js['Data_holderStructure']
for k, row in zip(jsgraph['categories'],
zip(*(r[ | 'data'] for r in jsgraph['series']))):
| conditional_block | |
fundvalue.py | (d):
keys, values = zip(*d.items())
return ' AND '.join(k + '=?' for k in keys), values
def update_partial(cursor, table, keys, values):
inskeys, qms, vals = make_insert(keys)
cursor.execute("INSERT OR IGNORE INTO %s (%s) VALUES (%s)" % (
table, inskeys, qms), vals)
setkeys, vals1 = make_update(values)
whrkeys, vals2 = make_where(keys)
cursor.execute("UPDATE %s SET %s WHERE %s" % (
table, setkeys, whrkeys), vals1 + vals2)
def parse_jsvars(js, errors='ignore', **kwargs):
i_jsvars = iter(filter(None, re_var.split(re_jscomment.sub('', js))))
result = {}
for name, value in zip(i_jsvars, i_jsvars):
try:
result[name] = json.loads(value, **kwargs)
except json.JSONDecodeError:
if errors == 'literal_eval':
result[name] = ast.literal_eval(value)
elif errors == 'ignore':
result[name] = None
else:
raise
return result
def parse_worktime(s):
if not s:
return None
yearspl = s.split('年')
if len(yearspl) == 1:
return int(s[:-1])
res = round(int(yearspl[0])*365.25)
if yearspl[1]:
res += int(yearspl[1][1:-1])
return res
def date_year(s):
month, day = [int(x.strip('0')) for x in s.split('-')]
date = datetime.date.today().replace(month=month, day=day)
if date > datetime.date.today():
date = date.replace(date.year-1)
return date.isoformat()
class EMFundClient:
def __init__(self, db='funds.db'):
self.db = sqlite3.connect(db)
self.init_db()
self.session = requests.Session()
self.session.headers.update(HEADERS)
self.executor = concurrent.futures.ThreadPoolExecutor(6)
def init_db(self):
cur = self.db.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS funds ('
'fid TEXT PRIMARY KEY,'
'name TEXT,'
'type TEXT,'
'risk TEXT,'
'company TEXT,'
'company_id INTEGER,'
'since TEXT,'
'updated TEXT,' # YYYY-MM-DD
'star INTEGER,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益
'totalval INTEGER,' # * 10000, 货币基金为7日年化
'rate INTEGER,' # * 10000 (100%)
'rate_em INTEGER,' # * 10000 (100%)
'minval INTEGER,' # * 100
'return_1m REAL,'
'return_3m REAL,'
'return_6m REAL,'
'return_1y REAL,' # %
'return_3y REAL,' # %
'return_all REAL,' # %
'stockcodes TEXT,'
'bondcodes TEXT,'
'perf_average REAL,'
'perf_choice REAL,' # 选证能力
'perf_return REAL,' # 收益率
'perf_risk REAL,' # 抗风险
'perf_stable REAL,' # 稳定性
'perf_time REAL' # 择时能力
')')
cur.execute('CREATE TABLE IF NOT EXISTS managers ('
'id INTEGER PRIMARY KEY,'
'name TEXT,'
'star INTEGER,'
'updated TEXT,'
'worktime INTEGER,' # round(365.25 * years + days)
'fund_num INTEGER,'
'fund_asset INTEGER,'
'profit REAL,'
'profit_average REAL,'
'profit_hs300 REAL,'
'perf_average REAL,'
'perf_experience REAL,' # 经验值
'perf_return REAL,' # 收益率
'perf_risk REAL,' # 抗风险
'perf_stable REAL,' # 稳定性
'perf_time REAL,' # 择时能力
'pic TEXT'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_managers ('
'fid TEXT,'
'manager INTEGER,'
'updated TEXT,'
'PRIMARY KEY (fid, manager)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_simrank ('
'fid TEXT,'
'date TEXT,'
'rank INTEGER,'
'total INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_info ('
'fid TEXT,'
'date TEXT,'
'asset INTEGER,' # Data_assetAllocation or Data_fluctuationScale
'share INTEGER,'
'holder_company REAL,' # %
'holder_individual REAL,' # %
'holder_internal REAL,' # %
'stock_ratio REAL,' # %
'bond_ratio REAL,' # %
'cash_ratio REAL,' # %
'buy INTEGER,'
'sell INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_stockshare ('
'fid TEXT,'
'date TEXT,'
'share REAL,' # %
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_history ('
'fid TEXT,'
'date TEXT,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益
'totalval INTEGER,' # * 10000, 货币基金为7日年化
'incratepct REAL,'
'dividend TEXT,'
'divcash INTEGER,' # * 10000
'divval INTEGER,' # * 10000
'buystatus TEXT,'
'sellstatus TEXT,'
'PRIMARY KEY (fid, date)'
')')
self.db.commit()
def fund_list(self):
if self.db.execute("SELECT 1 FROM funds").fetchone():
return
req = self.session.get('http://fund.eastmoney.com/js/fundcode_search.js')
req.raise_for_status()
match = re_var.match(req.text)
if not match:
raise ValueError("can't parse fundcode_search.js: " + req.text)
d = json.loads(match.group(2))
cur = self.db.cursor()
cur.executemany("INSERT OR IGNORE INTO funds (fid, name, type)"
" VALUES (?,?,?)", map(operator.itemgetter(0, 2, 3), d))
self.db.commit()
def fund_info(self, fundid):
if self.db.execute("SELECT 1 FROM funds WHERE fid=? AND updated=?", (
fundid, time.strftime('%Y-%m-%d'))).fetchone():
return
pageurl = 'http://fund.eastmoney.com/%s.html' % fundid
req = self.session.get(pageurl)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.content, 'lxml')
infotds = soup.find('div', class_='infoOfFund').find_all('td')
dl02 = soup.find('dl', class_='dataItem02')
updatedtxt = dl02.dt.p.contents[-1].strip('()')
cfreturn = lambda s: float(s.rstrip('%')) if s != '--' else None
d_funds = {
'since': infotds[3].contents[-1][1:],
'company': infotds[4].a.string,
'company_id': infotds[4].a['href'].rsplit('/', 1)[1].split('.')[0],
'star': cint(infotds[5].div['class'][0][4:]),
'return_3y': cfreturn(dl02.contents[-1].find(
'span', class_='ui-num').string),
'return_all': cfreturn(soup.find('dl', class_='dataItem03'
).contents[-1].find('span', class_='ui-num').string),
}
if not infotds[0].contents[-1].name:
d_funds['risk'] = infotds[0].contents[-1][5:]
req = self.session.get(
'http://fund.eastmoney.com/pingzhongdata/%s.js?v=%s' % (
fundid, time.strftime('%Y%m%d%H%M%S')), headers={"Referer": pageurl})
req.raise_for_status()
js = parse_jsvars(req.text, 'ignore', parse_float=decimal.Decimal)
if js['ishb']:
d_funds['updated'] = date_year(updatedtxt)
d_funds['unitval'] = dec2int(
soup.find('dl', class_='dataItem01').dd.span.string)
d_funds['totalval'] = dec2int(dl02.dd.span.string.rstrip('%'))
else:
d_funds['updated | make_where | identifier_name | |
fundvalue.py | :
if errors == 'literal_eval':
result[name] = ast.literal_eval(value)
elif errors == 'ignore':
result[name] = None
else:
raise
return result
def parse_worktime(s):
if not s:
return None
yearspl = s.split('年')
if len(yearspl) == 1:
return int(s[:-1])
res = round(int(yearspl[0])*365.25)
if yearspl[1]:
res += int(yearspl[1][1:-1])
return res
def date_year(s):
month, day = [int(x.strip('0')) for x in s.split('-')]
date = datetime.date.today().replace(month=month, day=day)
if date > datetime.date.today():
date = date.replace(date.year-1)
return date.isoformat()
class EMFundClient:
def __init__(self, db='funds.db'):
self.db = sqlite3.connect(db)
self.init_db()
self.session = requests.Session()
self.session.headers.update(HEADERS)
self.executor = concurrent.futures.ThreadPoolExecutor(6)
def init_db(self):
cur = self.db.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS funds ('
'fid TEXT PRIMARY KEY,'
'name TEXT,'
'type TEXT,'
'risk TEXT,'
'company TEXT,'
'company_id INTEGER,'
'since TEXT,'
'updated TEXT,' # YYYY-MM-DD
'star INTEGER,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益
'totalval INTEGER,' # * 10000, 货币基金为7日年化
'rate INTEGER,' # * 10000 (100%)
'rate_em INTEGER,' # * 10000 (100%)
'minval INTEGER,' # * 100
'return_1m REAL,'
'return_3m REAL,'
'return_6m REAL,'
'return_1y REAL,' # %
'return_3y REAL,' # %
'return_all REAL,' # %
'stockcodes TEXT,'
'bondcodes TEXT,'
'perf_average REAL,'
'perf_choice REAL,' # 选证能力
'perf_return REAL,' # 收益率
'perf_risk REAL,' # 抗风险
'perf_stable REAL,' # 稳定性
'perf_time REAL' # 择时能力
')')
cur.execute('CREATE TABLE IF NOT EXISTS managers ('
'id INTEGER PRIMARY KEY,'
'name TEXT,'
'star INTEGER,'
'updated TEXT,'
'worktime INTEGER,' # round(365.25 * years + days)
'fund_num INTEGER,'
'fund_asset INTEGER,'
'profit REAL,'
'profit_average REAL,'
'profit_hs300 REAL,'
'perf_average REAL,'
'perf_experience REAL,' # 经验值
'perf_return REAL,' # 收益率
'perf_risk REAL,' # 抗风险
'perf_stable REAL,' # 稳定性 | 'manager INTEGER,'
'updated TEXT,'
'PRIMARY KEY (fid, manager)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_simrank ('
'fid TEXT,'
'date TEXT,'
'rank INTEGER,'
'total INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_info ('
'fid TEXT,'
'date TEXT,'
'asset INTEGER,' # Data_assetAllocation or Data_fluctuationScale
'share INTEGER,'
'holder_company REAL,' # %
'holder_individual REAL,' # %
'holder_internal REAL,' # %
'stock_ratio REAL,' # %
'bond_ratio REAL,' # %
'cash_ratio REAL,' # %
'buy INTEGER,'
'sell INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_stockshare ('
'fid TEXT,'
'date TEXT,'
'share REAL,' # %
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_history ('
'fid TEXT,'
'date TEXT,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益
'totalval INTEGER,' # * 10000, 货币基金为7日年化
'incratepct REAL,'
'dividend TEXT,'
'divcash INTEGER,' # * 10000
'divval INTEGER,' # * 10000
'buystatus TEXT,'
'sellstatus TEXT,'
'PRIMARY KEY (fid, date)'
')')
self.db.commit()
def fund_list(self):
if self.db.execute("SELECT 1 FROM funds").fetchone():
return
req = self.session.get('http://fund.eastmoney.com/js/fundcode_search.js')
req.raise_for_status()
match = re_var.match(req.text)
if not match:
raise ValueError("can't parse fundcode_search.js: " + req.text)
d = json.loads(match.group(2))
cur = self.db.cursor()
cur.executemany("INSERT OR IGNORE INTO funds (fid, name, type)"
" VALUES (?,?,?)", map(operator.itemgetter(0, 2, 3), d))
self.db.commit()
def fund_info(self, fundid):
if self.db.execute("SELECT 1 FROM funds WHERE fid=? AND updated=?", (
fundid, time.strftime('%Y-%m-%d'))).fetchone():
return
pageurl = 'http://fund.eastmoney.com/%s.html' % fundid
req = self.session.get(pageurl)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.content, 'lxml')
infotds = soup.find('div', class_='infoOfFund').find_all('td')
dl02 = soup.find('dl', class_='dataItem02')
updatedtxt = dl02.dt.p.contents[-1].strip('()')
cfreturn = lambda s: float(s.rstrip('%')) if s != '--' else None
d_funds = {
'since': infotds[3].contents[-1][1:],
'company': infotds[4].a.string,
'company_id': infotds[4].a['href'].rsplit('/', 1)[1].split('.')[0],
'star': cint(infotds[5].div['class'][0][4:]),
'return_3y': cfreturn(dl02.contents[-1].find(
'span', class_='ui-num').string),
'return_all': cfreturn(soup.find('dl', class_='dataItem03'
).contents[-1].find('span', class_='ui-num').string),
}
if not infotds[0].contents[-1].name:
d_funds['risk'] = infotds[0].contents[-1][5:]
req = self.session.get(
'http://fund.eastmoney.com/pingzhongdata/%s.js?v=%s' % (
fundid, time.strftime('%Y%m%d%H%M%S')), headers={"Referer": pageurl})
req.raise_for_status()
js = parse_jsvars(req.text, 'ignore', parse_float=decimal.Decimal)
if js['ishb']:
d_funds['updated'] = date_year(updatedtxt)
d_funds['unitval'] = dec2int(
soup.find('dl', class_='dataItem01').dd.span.string)
d_funds['totalval'] = dec2int(dl02.dd.span.string.rstrip('%'))
else:
d_funds['updated'] = updatedtxt
d_funds['unitval'] = dec2int(dl02.dd.span.string)
d_funds['totalval'] = dec2int(
soup.find('dl', class_='dataItem03').dd.span.string)
d_funds['stockcodes'] = ','.join(js['stockCodes']) or None
d_funds['bondcodes'] = js['zqCodes'] or None
d_funds['name'] = js['fS_name']
d_funds['rate'] = dec2int100(js['fund_sourceRate'])
d_funds['rate_em'] = dec2int100(js['fund_Rate'])
d_funds['minval'] = dec2int100(js['fund_minsg'])
d_funds['return_1m'] = cfloat(js['syl_1y'])
d_funds['return_3m'] = cfloat(js['syl_3y'])
d_funds[' | 'perf_time REAL,' # 择时能力
'pic TEXT'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_managers ('
'fid TEXT,' | random_line_split |
fundvalue.py | if errors == 'literal_eval':
result[name] = ast.literal_eval(value)
elif errors == 'ignore':
result[name] = None
else:
raise
return result
def parse_worktime(s):
if not s:
return None
yearspl = s.split('年')
if len(yearspl) == 1:
return int(s[:-1])
res = round(int(yearspl[0])*365.25)
if yearspl[1]:
res += int(yearspl[1][1:-1])
return res
def date_year(s):
month, day = [int(x.strip('0')) for x in s.split('-')]
date = datetime.date.today().replace(month=month, day=day)
if date > datetime.date.today():
date = date.replace(date.year-1)
return date.isoformat()
class EMFundClient:
de | 'totalval INTEGER,' # * 10000, 货币基金为7日年化
'rate INTEGER,' # * 10000 (100%)
'rate_em INTEGER,' # * 10000 (100%)
'minval INTEGER,' # * 100
'return_1m REAL,'
'return_3m REAL,'
'return_6m REAL,'
'return_1y REAL,' # %
'return_3y REAL,' # %
'return_all REAL,' # %
'stockcodes TEXT,'
'bondcodes TEXT,'
'perf_average REAL,'
'perf_choice REAL,' # 选证能力
'perf_return REAL,' # 收益率
'perf_risk REAL,' # 抗风险
'perf_stable REAL,' # 稳定性
'perf_time REAL' # 择时能力
')')
cur.execute('CREATE TABLE IF NOT EXISTS managers ('
'id INTEGER PRIMARY KEY,'
'name TEXT,'
'star INTEGER,'
'updated TEXT,'
'worktime INTEGER,' # round(365.25 * years + days)
'fund_num INTEGER,'
'fund_asset INTEGER,'
'profit REAL,'
'profit_average REAL,'
'profit_hs300 REAL,'
'perf_average REAL,'
'perf_experience REAL,' # 经验值
'perf_return REAL,' # 收益率
'perf_risk REAL,' # 抗风险
'perf_stable REAL,' # 稳定性
'perf_time REAL,' # 择时能力
'pic TEXT'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_managers ('
'fid TEXT,'
'manager INTEGER,'
'updated TEXT,'
'PRIMARY KEY (fid, manager)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_simrank ('
'fid TEXT,'
'date TEXT,'
'rank INTEGER,'
'total INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_info ('
'fid TEXT,'
'date TEXT,'
'asset INTEGER,' # Data_assetAllocation or Data_fluctuationScale
'share INTEGER,'
'holder_company REAL,' # %
'holder_individual REAL,' # %
'holder_internal REAL,' # %
'stock_ratio REAL,' # %
'bond_ratio REAL,' # %
'cash_ratio REAL,' # %
'buy INTEGER,'
'sell INTEGER,'
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_stockshare ('
'fid TEXT,'
'date TEXT,'
'share REAL,' # %
'PRIMARY KEY (fid, date)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS fund_history ('
'fid TEXT,'
'date TEXT,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益
'totalval INTEGER,' # * 10000, 货币基金为7日年化
'incratepct REAL,'
'dividend TEXT,'
'divcash INTEGER,' # * 10000
'divval INTEGER,' # * 10000
'buystatus TEXT,'
'sellstatus TEXT,'
'PRIMARY KEY (fid, date)'
')')
self.db.commit()
def fund_list(self):
if self.db.execute("SELECT 1 FROM funds").fetchone():
return
req = self.session.get('http://fund.eastmoney.com/js/fundcode_search.js')
req.raise_for_status()
match = re_var.match(req.text)
if not match:
raise ValueError("can't parse fundcode_search.js: " + req.text)
d = json.loads(match.group(2))
cur = self.db.cursor()
cur.executemany("INSERT OR IGNORE INTO funds (fid, name, type)"
" VALUES (?,?,?)", map(operator.itemgetter(0, 2, 3), d))
self.db.commit()
def fund_info(self, fundid):
if self.db.execute("SELECT 1 FROM funds WHERE fid=? AND updated=?", (
fundid, time.strftime('%Y-%m-%d'))).fetchone():
return
pageurl = 'http://fund.eastmoney.com/%s.html' % fundid
req = self.session.get(pageurl)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.content, 'lxml')
infotds = soup.find('div', class_='infoOfFund').find_all('td')
dl02 = soup.find('dl', class_='dataItem02')
updatedtxt = dl02.dt.p.contents[-1].strip('()')
cfreturn = lambda s: float(s.rstrip('%')) if s != '--' else None
d_funds = {
'since': infotds[3].contents[-1][1:],
'company': infotds[4].a.string,
'company_id': infotds[4].a['href'].rsplit('/', 1)[1].split('.')[0],
'star': cint(infotds[5].div['class'][0][4:]),
'return_3y': cfreturn(dl02.contents[-1].find(
'span', class_='ui-num').string),
'return_all': cfreturn(soup.find('dl', class_='dataItem03'
).contents[-1].find('span', class_='ui-num').string),
}
if not infotds[0].contents[-1].name:
d_funds['risk'] = infotds[0].contents[-1][5:]
req = self.session.get(
'http://fund.eastmoney.com/pingzhongdata/%s.js?v=%s' % (
fundid, time.strftime('%Y%m%d%H%M%S')), headers={"Referer": pageurl})
req.raise_for_status()
js = parse_jsvars(req.text, 'ignore', parse_float=decimal.Decimal)
if js['ishb']:
d_funds['updated'] = date_year(updatedtxt)
d_funds['unitval'] = dec2int(
soup.find('dl', class_='dataItem01').dd.span.string)
d_funds['totalval'] = dec2int(dl02.dd.span.string.rstrip('%'))
else:
d_funds['updated'] = updatedtxt
d_funds['unitval'] = dec2int(dl02.dd.span.string)
d_funds['totalval'] = dec2int(
soup.find('dl', class_='dataItem03').dd.span.string)
d_funds['stockcodes'] = ','.join(js['stockCodes']) or None
d_funds['bondcodes'] = js['zqCodes'] or None
d_funds['name'] = js['fS_name']
d_funds['rate'] = dec2int100(js['fund_sourceRate'])
d_funds['rate_em'] = dec2int100(js['fund_Rate'])
d_funds['minval'] = dec2int100(js['fund_minsg'])
d_funds['return_1m'] = cfloat(js['syl_1y'])
d_funds['return_3m'] = cfloat(js['syl_3y'])
d_f | f __init__(self, db='funds.db'):
self.db = sqlite3.connect(db)
self.init_db()
self.session = requests.Session()
self.session.headers.update(HEADERS)
self.executor = concurrent.futures.ThreadPoolExecutor(6)
def init_db(self):
cur = self.db.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS funds ('
'fid TEXT PRIMARY KEY,'
'name TEXT,'
'type TEXT,'
'risk TEXT,'
'company TEXT,'
'company_id INTEGER,'
'since TEXT,'
'updated TEXT,' # YYYY-MM-DD
'star INTEGER,'
'unitval INTEGER,' # * 10000, 货币基金为每万份收益 | identifier_body |
ej4.py | =='max':
img += learning_rate * grads
elif max_or_min=='min':
img -= learning_rate * grads
else:
print('invalid selection of learning')
return
return loss, img
def initialize_image():
# We start from a gray image with some random noise
img = tf.random.uniform((1, img_width, img_height, 3))
# ResNet50V2 expects inputs in the range [-1, +1].
# Here we scale our random inputs to [-0.125, +0.125]
return (img - 0.5) * 0.25
def visualize_filter(index, it=30, lr=10, layer='filter', max_or_min='max', type_img='noise', filename=None):
# We run gradient ascent for 20 steps
iterations = it
learning_rate = lr
if type_img=='noise':
img = initialize_image()
elif type_img=='specific':
img = plt.imread(filename).astype(np.float32)
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])
else:
print('invalida image type')
return
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, index, learning_rate, layer, max_or_min)
# Decode the resulting input image
img = deprocess_image(img[0].numpy())
return loss, img
def | (img):
# Normalize array: center on 0., ensure variance is 0.15
img -= img.mean()
img /= img.std() + 1e-5
img *= 0.15
# Center crop
img = img[25:-25, 25:-25, :]
# Clip to [0, 1]
img += 0.5
img = np.clip(img, 0, 1)
# Convert to RGB array
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img
img_width = 400
img_height = 400
layer_name = "conv5_block3_out"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
#### ploteo de los 64 filtros
all_imgs = []
for filter_index in range(64):
print("Processing filter %d" % (filter_index,))
loss, img = visualize_filter(filter_index, 30, 10, 'filter')
all_imgs.append(img)
margin = 5
n = 8
cropped_width = img_width - 25 * 2
cropped_height = img_height - 25 * 2
width = n * cropped_width + (n - 1) * margin
height = n * cropped_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
for i in range(n):
for j in range(n):
img = all_imgs[i * n + j]
stitched_filters[
(cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,
(cropped_height + margin) * j : (cropped_height + margin) * j
+ cropped_height,
:,
] = img
keras.preprocessing.image.save_img("ej4_64filtros_conv5.pdf", stitched_filters)
### termina el ploteo de los 64 filtros
### ploteo al patón bauza
loss, img = visualize_filter(0, 1000, 10, 'filter', 'max', 'specific', 'paton_bauza.jpeg')
plt.imshow(img)
plt.title('El super hombre del cual se refería Nietszche a.k.a Patón Bauza luego de maximizar la activación', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_paton_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico maximizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'max')
plt.imshow(img)
plt.title('Filtro máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_max_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico minimizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'min')
plt.imshow(img)
plt.title('Filtro con activación mínima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_min_conv5.pdf', format='pdf')
plt.show()
### ploteo como se ve una clase para la red neuronal
img_width = 1080
img_height = 1080
layer_name = "probs"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
loss, img = visualize_filter(283, 30, 100, 'class', 'max')
plt.imshow(img)
plt.title('Clase 283 (gato siames) con activación máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('class.pdf', format='pdf')
plt.show()
########################################
#
#
#######
# #
#######
########################################
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from textwrap import wrap
import seaborn as sns
from tensorflow.keras.utils import to_categorical
sns.set(style='whitegrid')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train=np.reshape(x_train, (x_train.shape[0], np.prod(x_train.shape[1:])))
x_test=np.reshape(x_test, (x_test.shape[0], np.prod(x_test.shape[1:])))
x_train, x_val, y_train, y_val = train_test_split(x_train,y_train,test_size=30000, stratify=y_train)
x_train = x_train/255
x_val = x_val/255
x_test = x_test/255
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
y_val=to_categorical(y_val)
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
x_val = np.reshape(x_val, (len(x_val), 28, 28, 1))
''' Entreno la red 1 vez, desp la cargo
lr=5e-4
rf=1e-2
batch_size=100
epochs=50
model_mnist = keras.models.Sequential(name='ej4_mnist')
model_mnist.add(keras.layers.Input(shape=x_train[0].shape))
model_mnist.add(keras.layers.Conv2D(10,
(24, 24),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro1'))
model_mnist.add(keras.layers.MaxPooling2D((2, 2),
padding='same'))
model_mnist.add(keras.layers.Conv2D(10,
(12, 12),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro2'))
model_mnist.add(keras.layers.Flatten())
model_mnist.add(keras.layers.Dense(10,
activation='linear',
kernel_regularizer=keras.regularizers.l2(rf),
name='output'))
model_mnist.compile(keras.optimizers.Adam(learning_rate=lr),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.CategoricalAccuracy(name='acc')])
model_mnist.summary()
history = model_mnist.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_val, y_val),
verbose=2)
model_mnist.save_weights('weights_ej5_b')
model_mnist.save('model_mnist.h5')
'''
model_mnist = keras.models.load_model('model_mnist.h5')
img_width = 28
img_height = 28
layer = model_mnist.get_layer(name='output')
feature_extractor_mnist = keras.Model(inputs=model_mnist.inputs, outputs=layer.output)
def compute_loss(input_image, index, layer):
activation = feature_extractor_mnist(input_image)
if layer=='filter':
filter_activation = activation[:, 2:-2, 2:-2, index]
elif layer=='class':
filter_activation = activation[:, index]
else:
print('invalid class')
return
return tf.reduce_mean(filter_activation)
@tf.function
| deprocess_image | identifier_name |
ej4.py | random inputs to [-0.125, +0.125]
return (img - 0.5) * 0.25
def visualize_filter(index, it=30, lr=10, layer='filter', max_or_min='max', type_img='noise', filename=None):
# We run gradient ascent for 20 steps
iterations = it
learning_rate = lr
if type_img=='noise':
img = initialize_image()
elif type_img=='specific':
img = plt.imread(filename).astype(np.float32)
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])
else:
print('invalida image type')
return
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, index, learning_rate, layer, max_or_min)
# Decode the resulting input image
img = deprocess_image(img[0].numpy())
return loss, img
def deprocess_image(img):
# Normalize array: center on 0., ensure variance is 0.15
img -= img.mean()
img /= img.std() + 1e-5
img *= 0.15
# Center crop
img = img[25:-25, 25:-25, :]
# Clip to [0, 1]
img += 0.5
img = np.clip(img, 0, 1)
# Convert to RGB array
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img
img_width = 400
img_height = 400
layer_name = "conv5_block3_out"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
#### ploteo de los 64 filtros
all_imgs = []
for filter_index in range(64):
print("Processing filter %d" % (filter_index,))
loss, img = visualize_filter(filter_index, 30, 10, 'filter')
all_imgs.append(img)
margin = 5
n = 8
cropped_width = img_width - 25 * 2
cropped_height = img_height - 25 * 2
width = n * cropped_width + (n - 1) * margin
height = n * cropped_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
for i in range(n):
for j in range(n):
img = all_imgs[i * n + j]
stitched_filters[
(cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,
(cropped_height + margin) * j : (cropped_height + margin) * j
+ cropped_height,
:,
] = img
keras.preprocessing.image.save_img("ej4_64filtros_conv5.pdf", stitched_filters)
### termina el ploteo de los 64 filtros
### ploteo al patón bauza
loss, img = visualize_filter(0, 1000, 10, 'filter', 'max', 'specific', 'paton_bauza.jpeg')
plt.imshow(img)
plt.title('El super hombre del cual se refería Nietszche a.k.a Patón Bauza luego de maximizar la activación', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_paton_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico maximizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'max')
plt.imshow(img)
plt.title('Filtro máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_max_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico minimizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'min')
plt.imshow(img)
plt.title('Filtro con activación mínima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_min_conv5.pdf', format='pdf')
plt.show()
### ploteo como se ve una clase para la red neuronal
img_width = 1080
img_height = 1080
layer_name = "probs"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
loss, img = visualize_filter(283, 30, 100, 'class', 'max')
plt.imshow(img)
plt.title('Clase 283 (gato siames) con activación máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('class.pdf', format='pdf')
plt.show()
########################################
#
#
#######
# #
#######
########################################
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from textwrap import wrap
import seaborn as sns
from tensorflow.keras.utils import to_categorical
sns.set(style='whitegrid')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train=np.reshape(x_train, (x_train.shape[0], np.prod(x_train.shape[1:])))
x_test=np.reshape(x_test, (x_test.shape[0], np.prod(x_test.shape[1:])))
x_train, x_val, y_train, y_val = train_test_split(x_train,y_train,test_size=30000, stratify=y_train)
x_train = x_train/255
x_val = x_val/255
x_test = x_test/255
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
y_val=to_categorical(y_val)
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
x_val = np.reshape(x_val, (len(x_val), 28, 28, 1))
''' Entreno la red 1 vez, desp la cargo
lr=5e-4
rf=1e-2
batch_size=100
epochs=50
model_mnist = keras.models.Sequential(name='ej4_mnist')
model_mnist.add(keras.layers.Input(shape=x_train[0].shape))
model_mnist.add(keras.layers.Conv2D(10,
(24, 24),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro1'))
model_mnist.add(keras.layers.MaxPooling2D((2, 2),
padding='same'))
model_mnist.add(keras.layers.Conv2D(10,
(12, 12),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro2'))
model_mnist.add(keras.layers.Flatten())
model_mnist.add(keras.layers.Dense(10,
activation='linear',
kernel_regularizer=keras.regularizers.l2(rf),
name='output'))
model_mnist.compile(keras.optimizers.Adam(learning_rate=lr),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.CategoricalAccuracy(name='acc')])
model_mnist.summary()
history = model_mnist.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_val, y_val),
verbose=2)
model_mnist.save_weights('weights_ej5_b')
model_mnist.save('model_mnist.h5')
'''
model_mnist = keras.models.load_model('model_mnist.h5')
img_width = 28
img_height = 28
layer = model_mnist.get_layer(name='output')
feature_extractor_mnist = keras.Model(inputs=model_mnist.inputs, outputs=layer.output)
def compute_loss(input_image, index, layer):
activation = feature_extractor_mnist(input_image)
if layer=='filter':
filter_activation = activation[:, 2:-2, 2:-2, index]
elif layer=='class':
filter_activation = activation[:, index]
else:
print('invalid class')
return
return tf.reduce_mean(filter_activation)
@tf.function
def gradient_ascent_step(img, index, learning_rate, layer, max_or_min):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img, index, layer)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads = tf.math.l2_normalize(grads)
if max_or_min=='max':
img += learning_rate * grads
elif max_or_min=='min':
img -= le | arning_rate * grads
els | conditional_block | |
ej4.py |
@tf.function
def gradient_ascent_step(img, index, learning_rate, layer, max_or_min):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img, index, layer)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads = tf.math.l2_normalize(grads)
if max_or_min=='max':
img += learning_rate * grads
elif max_or_min=='min':
img -= learning_rate * grads
else:
print('invalid selection of learning')
return
return loss, img
def initialize_image():
# We start from a gray image with some random noise
img = tf.random.uniform((1, img_width, img_height, 3))
# ResNet50V2 expects inputs in the range [-1, +1].
# Here we scale our random inputs to [-0.125, +0.125]
return (img - 0.5) * 0.25
def visualize_filter(index, it=30, lr=10, layer='filter', max_or_min='max', type_img='noise', filename=None):
# We run gradient ascent for 20 steps
iterations = it
learning_rate = lr
if type_img=='noise':
img = initialize_image()
elif type_img=='specific':
img = plt.imread(filename).astype(np.float32)
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])
else:
print('invalida image type')
return
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, index, learning_rate, layer, max_or_min)
# Decode the resulting input image
img = deprocess_image(img[0].numpy())
return loss, img
def deprocess_image(img):
# Normalize array: center on 0., ensure variance is 0.15
img -= img.mean()
img /= img.std() + 1e-5
img *= 0.15
# Center crop
img = img[25:-25, 25:-25, :]
# Clip to [0, 1]
img += 0.5
img = np.clip(img, 0, 1)
# Convert to RGB array
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img
img_width = 400
img_height = 400
layer_name = "conv5_block3_out"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
#### ploteo de los 64 filtros
all_imgs = []
for filter_index in range(64):
print("Processing filter %d" % (filter_index,))
loss, img = visualize_filter(filter_index, 30, 10, 'filter')
all_imgs.append(img)
margin = 5
n = 8
cropped_width = img_width - 25 * 2
cropped_height = img_height - 25 * 2
width = n * cropped_width + (n - 1) * margin
height = n * cropped_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
for i in range(n):
for j in range(n):
img = all_imgs[i * n + j]
stitched_filters[
(cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,
(cropped_height + margin) * j : (cropped_height + margin) * j
+ cropped_height,
:,
] = img
keras.preprocessing.image.save_img("ej4_64filtros_conv5.pdf", stitched_filters)
### termina el ploteo de los 64 filtros
### ploteo al patón bauza
loss, img = visualize_filter(0, 1000, 10, 'filter', 'max', 'specific', 'paton_bauza.jpeg')
plt.imshow(img)
plt.title('El super hombre del cual se refería Nietszche a.k.a Patón Bauza luego de maximizar la activación', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_paton_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico maximizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'max')
plt.imshow(img)
plt.title('Filtro máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_max_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico minimizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'min')
plt.imshow(img)
plt.title('Filtro con activación mínima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_min_conv5.pdf', format='pdf')
plt.show()
### ploteo como se ve una clase para la red neuronal
img_width = 1080
img_height = 1080
layer_name = "probs"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
loss, img = visualize_filter(283, 30, 100, 'class', 'max')
plt.imshow(img)
plt.title('Clase 283 (gato siames) con activación máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('class.pdf', format='pdf')
plt.show()
########################################
#
#
#######
# #
#######
########################################
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from textwrap import wrap
import seaborn as sns
from tensorflow.keras.utils import to_categorical
sns.set(style='whitegrid')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train=np.reshape(x_train, (x_train.shape[0], np.prod(x_train.shape[1:])))
x_test=np.reshape(x_test, (x_test.shape[0], np.prod(x_test.shape[1:])))
x_train, x_val, y_train, y_val = train_test_split(x_train,y_train,test_size=30000, stratify=y_train)
x_train = x_train/255
x_val = x_val/255
x_test = x_test/255
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
y_val=to_categorical(y_val)
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
x_val = np.reshape(x_val, (len(x_val), 28, 28, 1))
''' Entreno la red 1 vez, desp la cargo
lr=5e-4
rf=1e-2
batch_size=100
epochs=50
model_mnist = keras.models.Sequential(name='ej4_mnist')
model_mnist.add(keras.layers.Input(shape=x_train[0].shape))
model_mnist.add(keras.layers.Conv2D(10,
(24, 24),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro1'))
model_mnist.add(keras.layers.MaxPooling2D((2, 2),
padding='same'))
model_mnist.add(keras.layers.Conv2D(10,
(12, 12),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro2'))
model_mnist.add(keras.layers.Flatten())
model_mnist.add(keras.layers.Dense(10,
activation='linear',
kernel_regularizer=keras.regularizers.l2(rf),
name='output'))
model_mnist.compile(keras.optimizers.Adam(learning_rate=lr),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.CategoricalAccuracy(name='acc')])
model_mnist.summary()
history = model_mnist.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_val, y_val),
verbose=2)
model_mnist.save | activation = feature_extractor(input_image)
if layer=='filter':
filter_activation = activation[:, 2:-2, 2:-2, index]
elif layer=='class':
filter_activation = activation[:, index]
print(filter_activation)
else:
print('invalid class')
return
print(tf.reduce_mean(filter_activation))
return tf.reduce_mean(filter_activation) | identifier_body | |
ej4.py | 0, 1)
# Convert to RGB array
img *= 255
img = np.clip(img, 0, 255).astype("uint8")
return img
img_width = 400
img_height = 400
layer_name = "conv5_block3_out"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
#### ploteo de los 64 filtros
all_imgs = []
for filter_index in range(64):
print("Processing filter %d" % (filter_index,))
loss, img = visualize_filter(filter_index, 30, 10, 'filter')
all_imgs.append(img)
margin = 5
n = 8
cropped_width = img_width - 25 * 2
cropped_height = img_height - 25 * 2
width = n * cropped_width + (n - 1) * margin
height = n * cropped_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
for i in range(n):
for j in range(n):
img = all_imgs[i * n + j]
stitched_filters[
(cropped_width + margin) * i : (cropped_width + margin) * i + cropped_width,
(cropped_height + margin) * j : (cropped_height + margin) * j
+ cropped_height,
:,
] = img
keras.preprocessing.image.save_img("ej4_64filtros_conv5.pdf", stitched_filters)
### termina el ploteo de los 64 filtros
### ploteo al patón bauza
loss, img = visualize_filter(0, 1000, 10, 'filter', 'max', 'specific', 'paton_bauza.jpeg')
plt.imshow(img)
plt.title('El super hombre del cual se refería Nietszche a.k.a Patón Bauza luego de maximizar la activación', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_paton_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico maximizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'max')
plt.imshow(img)
plt.title('Filtro máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_max_conv5.pdf', format='pdf')
plt.show()
### ploteo un filtro especifico minimizando la activacion
loss, img = visualize_filter(0, 30, 10, 'filter', 'min')
plt.imshow(img)
plt.title('Filtro con activación mínima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('ej4_filter_min_conv5.pdf', format='pdf')
plt.show()
### ploteo como se ve una clase para la red neuronal
img_width = 1080
img_height = 1080
layer_name = "probs"
model = keras.applications.ResNet50V2(weights="imagenet", include_top=True)
layer = model.get_layer(name=layer_name)
feature_extractor = keras.Model(inputs=model.inputs, outputs=layer.output)
loss, img = visualize_filter(283, 30, 100, 'class', 'max')
plt.imshow(img)
plt.title('Clase 283 (gato siames) con activación máxima para ruido como entrada', fontsize=14, wrap=True)
plt.axis('off')
plt.savefig('class.pdf', format='pdf')
plt.show()
########################################
#
#
#######
# #
#######
########################################
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from textwrap import wrap
import seaborn as sns
from tensorflow.keras.utils import to_categorical
sns.set(style='whitegrid')
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train=np.reshape(x_train, (x_train.shape[0], np.prod(x_train.shape[1:])))
x_test=np.reshape(x_test, (x_test.shape[0], np.prod(x_test.shape[1:])))
x_train, x_val, y_train, y_val = train_test_split(x_train,y_train,test_size=30000, stratify=y_train)
x_train = x_train/255
x_val = x_val/255
x_test = x_test/255
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
y_val=to_categorical(y_val)
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
x_val = np.reshape(x_val, (len(x_val), 28, 28, 1))
''' Entreno la red 1 vez, desp la cargo
lr=5e-4
rf=1e-2
batch_size=100
epochs=50
model_mnist = keras.models.Sequential(name='ej4_mnist')
model_mnist.add(keras.layers.Input(shape=x_train[0].shape))
model_mnist.add(keras.layers.Conv2D(10,
(24, 24),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro1'))
model_mnist.add(keras.layers.MaxPooling2D((2, 2),
padding='same'))
model_mnist.add(keras.layers.Conv2D(10,
(12, 12),
activation='relu',
kernel_regularizer=keras.regularizers.l2(rf),
padding='same',
name='filtro2'))
model_mnist.add(keras.layers.Flatten())
model_mnist.add(keras.layers.Dense(10,
activation='linear',
kernel_regularizer=keras.regularizers.l2(rf),
name='output'))
model_mnist.compile(keras.optimizers.Adam(learning_rate=lr),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.CategoricalAccuracy(name='acc')])
model_mnist.summary()
history = model_mnist.fit(x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_val, y_val),
verbose=2)
model_mnist.save_weights('weights_ej5_b')
model_mnist.save('model_mnist.h5')
'''
model_mnist = keras.models.load_model('model_mnist.h5')
img_width = 28
img_height = 28
layer = model_mnist.get_layer(name='output')
feature_extractor_mnist = keras.Model(inputs=model_mnist.inputs, outputs=layer.output)
def compute_loss(input_image, index, layer):
activation = feature_extractor_mnist(input_image)
if layer=='filter':
filter_activation = activation[:, 2:-2, 2:-2, index]
elif layer=='class':
filter_activation = activation[:, index]
else:
print('invalid class')
return
return tf.reduce_mean(filter_activation)
@tf.function
def gradient_ascent_step(img, index, learning_rate, layer, max_or_min):
with tf.GradientTape() as tape:
tape.watch(img)
loss = compute_loss(img, index, layer)
# Compute gradients.
grads = tape.gradient(loss, img)
# Normalize gradients.
grads = tf.math.l2_normalize(grads)
if max_or_min=='max':
img += learning_rate * grads
elif max_or_min=='min':
img -= learning_rate * grads
else:
print('invalid selection of learning')
return
return loss, img
def initialize_image():
# We start from a gray image with some random noise
img = tf.random.uniform((1, img_width, img_height, 1))
# ResNet50V2 expects inputs in the range [-1, +1].
# Here we scale our random inputs to [-0.125, +0.125]
return (img - 0.5) * 0.25
def visualize_filter(index, it=30, lr=10, layer='filter', max_or_min='max', type_img='noise', filename=None):
# We run gradient ascent for 20 steps
iterations = it
learning_rate = lr
if type_img=='noise':
img = initialize_image()
elif type_img=='specific':
img = plt.imread(filename).astype(np.float32)
img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])
else:
print('invalida image type')
return
for iteration in range(iterations):
loss, img = gradient_ascent_step(img, index, learning_rate, layer, max_or_min)
# Decode the resulting input image
img = deprocess_image(img[0].numpy())
| return loss, img
def deprocess_image(img):
# Normalize array: center on 0., ensure variance is 0.15
| random_line_split | |
admission.go | 10,000 routes, need to wait up to 30 seconds for caches to sync
timeToWaitForCacheSync = 20 * time.Second
hstsAnnotation = "haproxy.router.openshift.io/hsts_header"
)
func Register(plugins *admission.Plugins) {
plugins.Register(pluginName,
func(_ io.Reader) (admission.Interface, error) {
return NewRequiredRouteAnnotations(), nil
})
}
// cacheSync guards the isSynced variable
// Once isSynced is true, we don't care about setting it anymore
type cacheSync struct {
isSyncedLock sync.RWMutex
isSynced bool
}
func (cs *cacheSync) hasSynced() bool {
cs.isSyncedLock.RLock()
defer cs.isSyncedLock.RUnlock()
return cs.isSynced
}
func (cs *cacheSync) setSynced() {
cs.isSyncedLock.Lock()
defer cs.isSyncedLock.Unlock()
cs.isSynced = true
}
type requiredRouteAnnotations struct {
*admission.Handler
routeLister routev1listers.RouteLister
nsLister corev1listers.NamespaceLister
ingressLister configv1listers.IngressLister
cachesToSync []cache.InformerSynced
cacheSyncLock cacheSync
}
// Ensure that the required OpenShift admission interfaces are implemented.
var _ = initializer.WantsExternalKubeInformerFactory(&requiredRouteAnnotations{})
var _ = admission.ValidationInterface(&requiredRouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftConfigInformers(&requiredRouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftRouteInformers(&requiredRouteAnnotations{})
var maxAgeRegExp = regexp.MustCompile(`max-age=(\d+)`)
// Validate ensures that routes specify required annotations, and returns nil if valid.
// The admission handler ensures this is only called for Create/Update operations.
func (o *requiredRouteAnnotations) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) {
if a.GetResource().GroupResource() != grouproute.Resource("routes") {
return nil
}
newRoute, isRoute := a.GetObject().(*routeapi.Route)
if !isRoute {
return nil
}
// Determine if there are HSTS changes in this update
if a.GetOperation() == admission.Update {
wants, has := false, false
var oldHSTS, newHSTS string
newHSTS, wants = newRoute.Annotations[hstsAnnotation]
oldObject := a.GetOldObject().(*routeapi.Route)
oldHSTS, has = oldObject.Annotations[hstsAnnotation]
// Skip the validation if we're not making a change to HSTS at this time
if wants == has && newHSTS == oldHSTS {
return nil
}
}
// Cannot apply HSTS if route is not TLS. Ignore silently to keep backward compatibility.
tls := newRoute.Spec.TLS
if tls == nil || (tls.Termination != routeapi.TLSTerminationEdge && tls.Termination != routeapi.TLSTerminationReencrypt) {
// TODO - will address missing annotations on routes as route status in https://issues.redhat.com/browse/NE-678
return nil
}
// Wait just once up to 20 seconds for all caches to sync
if !o.waitForSyncedStore(ctx) {
return admission.NewForbidden(a, errors.New(pluginName+": caches not synchronized"))
}
ingress, err := o.ingressLister.Get("cluster")
if err != nil {
return admission.NewForbidden(a, err)
}
namespace, err := o.nsLister.Get(newRoute.Namespace)
if err != nil {
return admission.NewForbidden(a, err)
}
if err = isRouteHSTSAllowed(ingress, newRoute, namespace); err != nil {
return admission.NewForbidden(a, err)
}
return nil
}
func (o *requiredRouteAnnotations) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) {
o.nsLister = kubeInformers.Core().V1().Namespaces().Lister()
o.cachesToSync = append(o.cachesToSync, kubeInformers.Core().V1().Namespaces().Informer().HasSynced)
}
// waitForSyncedStore calls cache.WaitForCacheSync, which will wait up to timeToWaitForCacheSync
// for the cachesToSync to synchronize.
func (o *requiredRouteAnnotations) waitForSyncedStore(ctx context.Context) bool {
syncCtx, cancelFn := context.WithTimeout(ctx, timeToWaitForCacheSync)
defer cancelFn()
if !o.cacheSyncLock.hasSynced() {
if !cache.WaitForCacheSync(syncCtx.Done(), o.cachesToSync...) {
return false
}
o.cacheSyncLock.setSynced()
}
return true
}
func (o *requiredRouteAnnotations) ValidateInitialization() error |
func NewRequiredRouteAnnotations() *requiredRouteAnnotations {
return &requiredRouteAnnotations{
Handler: admission.NewHandler(admission.Create, admission.Update),
}
}
func (o *requiredRouteAnnotations) SetOpenShiftRouteInformers(informers routeinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Route().V1().Routes().Informer().HasSynced)
o.routeLister = informers.Route().V1().Routes().Lister()
}
func (o *requiredRouteAnnotations) SetOpenShiftConfigInformers(informers configinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Config().V1().Ingresses().Informer().HasSynced)
o.ingressLister = informers.Config().V1().Ingresses().Lister()
}
// isRouteHSTSAllowed returns nil if the route is allowed. Otherwise, returns details and a suggestion in the error
func isRouteHSTSAllowed(ingress *configv1.Ingress, newRoute *routeapi.Route, namespace *corev1.Namespace) error {
requirements := ingress.Spec.RequiredHSTSPolicies
for _, requirement := range requirements {
// Check if the required namespaceSelector (if any) and the domainPattern match
if matches, err := requiredNamespaceDomainMatchesRoute(requirement, newRoute, namespace); err != nil {
return err
} else if !matches {
// If one of either the namespaceSelector or domain didn't match, we will continue to look
continue
}
routeHSTS, err := hstsConfigFromRoute(newRoute)
if err != nil {
return err
}
// If there is no annotation but there needs to be one, return error
if routeHSTS != nil {
if err = routeHSTS.meetsRequirements(requirement); err != nil {
return err
}
}
// Validation only checks the first matching required HSTS rule.
return nil
}
// None of the requirements matched this route's domain/namespace, it is automatically allowed
return nil
}
type hstsConfig struct {
maxAge int32
preload bool
includeSubDomains bool
}
const (
HSTSMaxAgeMissingOrWrongError = "HSTS max-age must be set correctly in HSTS annotation"
HSTSMaxAgeGreaterError = "HSTS max-age is greater than maximum age %ds"
HSTSMaxAgeLessThanError = "HSTS max-age is less than minimum age %ds"
HSTSPreloadMustError = "HSTS preload must be specified"
HSTSPreloadMustNotError = "HSTS preload must not be specified"
HSTSIncludeSubDomainsMustError = "HSTS includeSubDomains must be specified"
HSTSIncludeSubDomainsMustNotError = "HSTS includeSubDomains must not be specified"
)
// Parse out the hstsConfig fields from the annotation
// Unrecognized fields are ignored
func hstsConfigFromRoute(route *routeapi.Route) (*hstsConfig, error) {
var ret hstsConfig
trimmed := strings.ToLower(strings.ReplaceAll(route.Annotations[hstsAnnotation], " ", ""))
tokens := strings.Split(trimmed, ";")
for _, token := range tokens {
if strings.EqualFold(token, "includeSubDomains") {
ret.includeSubDomains = true
}
if strings.EqualFold(token, "preload") {
ret.preload = true
}
// unrecognized tokens are ignored
}
if match := maxAgeRegExp.FindStringSubmatch(trimmed); match != nil && len(match) > 1 {
age, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return nil, err
}
ret.maxAge = int32(age)
} else {
return nil, fmt.Errorf(HSTSMaxAgeMissingOr | {
if o.ingressLister == nil {
return fmt.Errorf(pluginName + " plugin needs an ingress lister")
}
if o.routeLister == nil {
return fmt.Errorf(pluginName + " plugin needs a route lister")
}
if o.nsLister == nil {
return fmt.Errorf(pluginName + " plugin needs a namespace lister")
}
if len(o.cachesToSync) < 3 {
return fmt.Errorf(pluginName + " plugin missing informer synced functions")
}
return nil
} | identifier_body |
admission.go | RouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftRouteInformers(&requiredRouteAnnotations{})
var maxAgeRegExp = regexp.MustCompile(`max-age=(\d+)`)
// Validate ensures that routes specify required annotations, and returns nil if valid.
// The admission handler ensures this is only called for Create/Update operations.
func (o *requiredRouteAnnotations) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) {
if a.GetResource().GroupResource() != grouproute.Resource("routes") {
return nil
}
newRoute, isRoute := a.GetObject().(*routeapi.Route)
if !isRoute {
return nil
}
// Determine if there are HSTS changes in this update
if a.GetOperation() == admission.Update {
wants, has := false, false
var oldHSTS, newHSTS string
newHSTS, wants = newRoute.Annotations[hstsAnnotation]
oldObject := a.GetOldObject().(*routeapi.Route)
oldHSTS, has = oldObject.Annotations[hstsAnnotation]
// Skip the validation if we're not making a change to HSTS at this time
if wants == has && newHSTS == oldHSTS {
return nil
}
}
// Cannot apply HSTS if route is not TLS. Ignore silently to keep backward compatibility.
tls := newRoute.Spec.TLS
if tls == nil || (tls.Termination != routeapi.TLSTerminationEdge && tls.Termination != routeapi.TLSTerminationReencrypt) {
// TODO - will address missing annotations on routes as route status in https://issues.redhat.com/browse/NE-678
return nil
}
// Wait just once up to 20 seconds for all caches to sync
if !o.waitForSyncedStore(ctx) {
return admission.NewForbidden(a, errors.New(pluginName+": caches not synchronized"))
}
ingress, err := o.ingressLister.Get("cluster")
if err != nil {
return admission.NewForbidden(a, err)
}
namespace, err := o.nsLister.Get(newRoute.Namespace)
if err != nil {
return admission.NewForbidden(a, err)
}
if err = isRouteHSTSAllowed(ingress, newRoute, namespace); err != nil {
return admission.NewForbidden(a, err)
}
return nil
}
func (o *requiredRouteAnnotations) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) {
o.nsLister = kubeInformers.Core().V1().Namespaces().Lister()
o.cachesToSync = append(o.cachesToSync, kubeInformers.Core().V1().Namespaces().Informer().HasSynced)
}
// waitForSyncedStore calls cache.WaitForCacheSync, which will wait up to timeToWaitForCacheSync
// for the cachesToSync to synchronize.
func (o *requiredRouteAnnotations) waitForSyncedStore(ctx context.Context) bool {
syncCtx, cancelFn := context.WithTimeout(ctx, timeToWaitForCacheSync)
defer cancelFn()
if !o.cacheSyncLock.hasSynced() {
if !cache.WaitForCacheSync(syncCtx.Done(), o.cachesToSync...) {
return false
}
o.cacheSyncLock.setSynced()
}
return true
}
func (o *requiredRouteAnnotations) ValidateInitialization() error {
if o.ingressLister == nil {
return fmt.Errorf(pluginName + " plugin needs an ingress lister")
}
if o.routeLister == nil {
return fmt.Errorf(pluginName + " plugin needs a route lister")
}
if o.nsLister == nil {
return fmt.Errorf(pluginName + " plugin needs a namespace lister")
}
if len(o.cachesToSync) < 3 {
return fmt.Errorf(pluginName + " plugin missing informer synced functions")
}
return nil
}
func NewRequiredRouteAnnotations() *requiredRouteAnnotations {
return &requiredRouteAnnotations{
Handler: admission.NewHandler(admission.Create, admission.Update),
}
}
func (o *requiredRouteAnnotations) SetOpenShiftRouteInformers(informers routeinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Route().V1().Routes().Informer().HasSynced)
o.routeLister = informers.Route().V1().Routes().Lister()
}
func (o *requiredRouteAnnotations) SetOpenShiftConfigInformers(informers configinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Config().V1().Ingresses().Informer().HasSynced)
o.ingressLister = informers.Config().V1().Ingresses().Lister()
}
// isRouteHSTSAllowed returns nil if the route is allowed. Otherwise, returns details and a suggestion in the error
func isRouteHSTSAllowed(ingress *configv1.Ingress, newRoute *routeapi.Route, namespace *corev1.Namespace) error {
requirements := ingress.Spec.RequiredHSTSPolicies
for _, requirement := range requirements {
// Check if the required namespaceSelector (if any) and the domainPattern match
if matches, err := requiredNamespaceDomainMatchesRoute(requirement, newRoute, namespace); err != nil {
return err
} else if !matches {
// If one of either the namespaceSelector or domain didn't match, we will continue to look
continue
}
routeHSTS, err := hstsConfigFromRoute(newRoute)
if err != nil {
return err
}
// If there is no annotation but there needs to be one, return error
if routeHSTS != nil {
if err = routeHSTS.meetsRequirements(requirement); err != nil {
return err
}
}
// Validation only checks the first matching required HSTS rule.
return nil
}
// None of the requirements matched this route's domain/namespace, it is automatically allowed
return nil
}
type hstsConfig struct {
maxAge int32
preload bool
includeSubDomains bool
}
const (
HSTSMaxAgeMissingOrWrongError = "HSTS max-age must be set correctly in HSTS annotation"
HSTSMaxAgeGreaterError = "HSTS max-age is greater than maximum age %ds"
HSTSMaxAgeLessThanError = "HSTS max-age is less than minimum age %ds"
HSTSPreloadMustError = "HSTS preload must be specified"
HSTSPreloadMustNotError = "HSTS preload must not be specified"
HSTSIncludeSubDomainsMustError = "HSTS includeSubDomains must be specified"
HSTSIncludeSubDomainsMustNotError = "HSTS includeSubDomains must not be specified"
)
// Parse out the hstsConfig fields from the annotation
// Unrecognized fields are ignored
func hstsConfigFromRoute(route *routeapi.Route) (*hstsConfig, error) {
var ret hstsConfig
trimmed := strings.ToLower(strings.ReplaceAll(route.Annotations[hstsAnnotation], " ", ""))
tokens := strings.Split(trimmed, ";")
for _, token := range tokens {
if strings.EqualFold(token, "includeSubDomains") {
ret.includeSubDomains = true
}
if strings.EqualFold(token, "preload") {
ret.preload = true
}
// unrecognized tokens are ignored
}
if match := maxAgeRegExp.FindStringSubmatch(trimmed); match != nil && len(match) > 1 {
age, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return nil, err
}
ret.maxAge = int32(age)
} else {
return nil, fmt.Errorf(HSTSMaxAgeMissingOrWrongError)
}
return &ret, nil
}
// Make sure the given requirement meets the configured HSTS policy, validating:
// - range for maxAge (existence already established)
// - preloadPolicy
// - includeSubDomainsPolicy
func (c *hstsConfig) meetsRequirements(requirement configv1.RequiredHSTSPolicy) error {
if requirement.MaxAge.LargestMaxAge != nil && c.maxAge > *requirement.MaxAge.LargestMaxAge {
return fmt.Errorf(HSTSMaxAgeGreaterError, *requirement.MaxAge.LargestMaxAge)
}
if requirement.MaxAge.SmallestMaxAge != nil && c.maxAge < *requirement.MaxAge.SmallestMaxAge {
return fmt.Errorf(HSTSMaxAgeLessThanError, *requirement.MaxAge.SmallestMaxAge)
}
switch requirement.PreloadPolicy {
case configv1.NoOpinionPreloadPolicy:
// anything is allowed, do nothing
case configv1.RequirePreloadPolicy:
if !c.preload {
return fmt.Errorf(HSTSPreloadMustError)
}
case configv1.RequireNoPreloadPolicy:
if c.preload {
return fmt.Errorf(HSTSPreloadMustNotError)
}
}
switch requirement.IncludeSubDomainsPolicy {
case configv1.NoOpinionIncludeSubDomains:
// anything is allowed, do nothing
case configv1.RequireIncludeSubDomains:
if !c.includeSubDomains {
return fmt.Errorf(HSTSIncludeSubDomainsMustError)
}
case configv1.RequireNoIncludeSubDomains:
if c.includeSubDomains | {
return fmt.Errorf(HSTSIncludeSubDomainsMustNotError)
} | conditional_block | |
admission.go | 10,000 routes, need to wait up to 30 seconds for caches to sync
timeToWaitForCacheSync = 20 * time.Second
hstsAnnotation = "haproxy.router.openshift.io/hsts_header"
)
func Register(plugins *admission.Plugins) {
plugins.Register(pluginName,
func(_ io.Reader) (admission.Interface, error) {
return NewRequiredRouteAnnotations(), nil
})
}
// cacheSync guards the isSynced variable
// Once isSynced is true, we don't care about setting it anymore
type cacheSync struct {
isSyncedLock sync.RWMutex
isSynced bool
}
func (cs *cacheSync) hasSynced() bool {
cs.isSyncedLock.RLock()
defer cs.isSyncedLock.RUnlock()
return cs.isSynced
}
func (cs *cacheSync) setSynced() {
cs.isSyncedLock.Lock()
defer cs.isSyncedLock.Unlock()
cs.isSynced = true
}
type requiredRouteAnnotations struct {
*admission.Handler
routeLister routev1listers.RouteLister
nsLister corev1listers.NamespaceLister
ingressLister configv1listers.IngressLister
cachesToSync []cache.InformerSynced
cacheSyncLock cacheSync
}
// Ensure that the required OpenShift admission interfaces are implemented.
var _ = initializer.WantsExternalKubeInformerFactory(&requiredRouteAnnotations{})
var _ = admission.ValidationInterface(&requiredRouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftConfigInformers(&requiredRouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftRouteInformers(&requiredRouteAnnotations{})
var maxAgeRegExp = regexp.MustCompile(`max-age=(\d+)`)
// Validate ensures that routes specify required annotations, and returns nil if valid.
// The admission handler ensures this is only called for Create/Update operations.
func (o *requiredRouteAnnotations) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) {
if a.GetResource().GroupResource() != grouproute.Resource("routes") {
return nil
}
newRoute, isRoute := a.GetObject().(*routeapi.Route)
if !isRoute {
return nil
}
// Determine if there are HSTS changes in this update
if a.GetOperation() == admission.Update {
wants, has := false, false
var oldHSTS, newHSTS string
newHSTS, wants = newRoute.Annotations[hstsAnnotation]
oldObject := a.GetOldObject().(*routeapi.Route)
oldHSTS, has = oldObject.Annotations[hstsAnnotation]
// Skip the validation if we're not making a change to HSTS at this time
if wants == has && newHSTS == oldHSTS {
return nil
}
}
// Cannot apply HSTS if route is not TLS. Ignore silently to keep backward compatibility.
tls := newRoute.Spec.TLS
if tls == nil || (tls.Termination != routeapi.TLSTerminationEdge && tls.Termination != routeapi.TLSTerminationReencrypt) {
// TODO - will address missing annotations on routes as route status in https://issues.redhat.com/browse/NE-678
return nil
}
// Wait just once up to 20 seconds for all caches to sync
if !o.waitForSyncedStore(ctx) {
return admission.NewForbidden(a, errors.New(pluginName+": caches not synchronized"))
}
ingress, err := o.ingressLister.Get("cluster")
if err != nil {
return admission.NewForbidden(a, err)
}
namespace, err := o.nsLister.Get(newRoute.Namespace)
if err != nil {
return admission.NewForbidden(a, err)
}
if err = isRouteHSTSAllowed(ingress, newRoute, namespace); err != nil {
return admission.NewForbidden(a, err)
}
return nil
}
func (o *requiredRouteAnnotations) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) {
o.nsLister = kubeInformers.Core().V1().Namespaces().Lister()
o.cachesToSync = append(o.cachesToSync, kubeInformers.Core().V1().Namespaces().Informer().HasSynced)
}
// waitForSyncedStore calls cache.WaitForCacheSync, which will wait up to timeToWaitForCacheSync
// for the cachesToSync to synchronize.
func (o *requiredRouteAnnotations) | (ctx context.Context) bool {
syncCtx, cancelFn := context.WithTimeout(ctx, timeToWaitForCacheSync)
defer cancelFn()
if !o.cacheSyncLock.hasSynced() {
if !cache.WaitForCacheSync(syncCtx.Done(), o.cachesToSync...) {
return false
}
o.cacheSyncLock.setSynced()
}
return true
}
func (o *requiredRouteAnnotations) ValidateInitialization() error {
if o.ingressLister == nil {
return fmt.Errorf(pluginName + " plugin needs an ingress lister")
}
if o.routeLister == nil {
return fmt.Errorf(pluginName + " plugin needs a route lister")
}
if o.nsLister == nil {
return fmt.Errorf(pluginName + " plugin needs a namespace lister")
}
if len(o.cachesToSync) < 3 {
return fmt.Errorf(pluginName + " plugin missing informer synced functions")
}
return nil
}
func NewRequiredRouteAnnotations() *requiredRouteAnnotations {
return &requiredRouteAnnotations{
Handler: admission.NewHandler(admission.Create, admission.Update),
}
}
func (o *requiredRouteAnnotations) SetOpenShiftRouteInformers(informers routeinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Route().V1().Routes().Informer().HasSynced)
o.routeLister = informers.Route().V1().Routes().Lister()
}
func (o *requiredRouteAnnotations) SetOpenShiftConfigInformers(informers configinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Config().V1().Ingresses().Informer().HasSynced)
o.ingressLister = informers.Config().V1().Ingresses().Lister()
}
// isRouteHSTSAllowed returns nil if the route is allowed. Otherwise, returns details and a suggestion in the error
func isRouteHSTSAllowed(ingress *configv1.Ingress, newRoute *routeapi.Route, namespace *corev1.Namespace) error {
requirements := ingress.Spec.RequiredHSTSPolicies
for _, requirement := range requirements {
// Check if the required namespaceSelector (if any) and the domainPattern match
if matches, err := requiredNamespaceDomainMatchesRoute(requirement, newRoute, namespace); err != nil {
return err
} else if !matches {
// If one of either the namespaceSelector or domain didn't match, we will continue to look
continue
}
routeHSTS, err := hstsConfigFromRoute(newRoute)
if err != nil {
return err
}
// If there is no annotation but there needs to be one, return error
if routeHSTS != nil {
if err = routeHSTS.meetsRequirements(requirement); err != nil {
return err
}
}
// Validation only checks the first matching required HSTS rule.
return nil
}
// None of the requirements matched this route's domain/namespace, it is automatically allowed
return nil
}
type hstsConfig struct {
maxAge int32
preload bool
includeSubDomains bool
}
const (
HSTSMaxAgeMissingOrWrongError = "HSTS max-age must be set correctly in HSTS annotation"
HSTSMaxAgeGreaterError = "HSTS max-age is greater than maximum age %ds"
HSTSMaxAgeLessThanError = "HSTS max-age is less than minimum age %ds"
HSTSPreloadMustError = "HSTS preload must be specified"
HSTSPreloadMustNotError = "HSTS preload must not be specified"
HSTSIncludeSubDomainsMustError = "HSTS includeSubDomains must be specified"
HSTSIncludeSubDomainsMustNotError = "HSTS includeSubDomains must not be specified"
)
// Parse out the hstsConfig fields from the annotation
// Unrecognized fields are ignored
func hstsConfigFromRoute(route *routeapi.Route) (*hstsConfig, error) {
var ret hstsConfig
trimmed := strings.ToLower(strings.ReplaceAll(route.Annotations[hstsAnnotation], " ", ""))
tokens := strings.Split(trimmed, ";")
for _, token := range tokens {
if strings.EqualFold(token, "includeSubDomains") {
ret.includeSubDomains = true
}
if strings.EqualFold(token, "preload") {
ret.preload = true
}
// unrecognized tokens are ignored
}
if match := maxAgeRegExp.FindStringSubmatch(trimmed); match != nil && len(match) > 1 {
age, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return nil, err
}
ret.maxAge = int32(age)
} else {
return nil, fmt.Errorf(HSTSMaxAgeMissingOrWrong | waitForSyncedStore | identifier_name |
admission.go | with 10,000 routes, need to wait up to 30 seconds for caches to sync
timeToWaitForCacheSync = 20 * time.Second
hstsAnnotation = "haproxy.router.openshift.io/hsts_header"
)
func Register(plugins *admission.Plugins) {
plugins.Register(pluginName,
func(_ io.Reader) (admission.Interface, error) {
return NewRequiredRouteAnnotations(), nil
})
}
// cacheSync guards the isSynced variable
// Once isSynced is true, we don't care about setting it anymore
type cacheSync struct {
isSyncedLock sync.RWMutex
isSynced bool
}
func (cs *cacheSync) hasSynced() bool {
cs.isSyncedLock.RLock()
defer cs.isSyncedLock.RUnlock()
return cs.isSynced
}
func (cs *cacheSync) setSynced() {
cs.isSyncedLock.Lock()
defer cs.isSyncedLock.Unlock()
cs.isSynced = true
}
type requiredRouteAnnotations struct {
*admission.Handler
routeLister routev1listers.RouteLister
nsLister corev1listers.NamespaceLister
ingressLister configv1listers.IngressLister
cachesToSync []cache.InformerSynced
cacheSyncLock cacheSync
}
// Ensure that the required OpenShift admission interfaces are implemented.
var _ = initializer.WantsExternalKubeInformerFactory(&requiredRouteAnnotations{})
var _ = admission.ValidationInterface(&requiredRouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftConfigInformers(&requiredRouteAnnotations{})
var _ = openshiftapiserveradmission.WantsOpenShiftRouteInformers(&requiredRouteAnnotations{})
var maxAgeRegExp = regexp.MustCompile(`max-age=(\d+)`)
// Validate ensures that routes specify required annotations, and returns nil if valid.
// The admission handler ensures this is only called for Create/Update operations.
func (o *requiredRouteAnnotations) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) {
if a.GetResource().GroupResource() != grouproute.Resource("routes") {
return nil
}
newRoute, isRoute := a.GetObject().(*routeapi.Route)
if !isRoute {
return nil
}
// Determine if there are HSTS changes in this update
if a.GetOperation() == admission.Update {
wants, has := false, false
var oldHSTS, newHSTS string
newHSTS, wants = newRoute.Annotations[hstsAnnotation]
oldObject := a.GetOldObject().(*routeapi.Route)
oldHSTS, has = oldObject.Annotations[hstsAnnotation]
// Skip the validation if we're not making a change to HSTS at this time
if wants == has && newHSTS == oldHSTS {
return nil
}
}
// Cannot apply HSTS if route is not TLS. Ignore silently to keep backward compatibility.
tls := newRoute.Spec.TLS
if tls == nil || (tls.Termination != routeapi.TLSTerminationEdge && tls.Termination != routeapi.TLSTerminationReencrypt) {
// TODO - will address missing annotations on routes as route status in https://issues.redhat.com/browse/NE-678
return nil
}
// Wait just once up to 20 seconds for all caches to sync
if !o.waitForSyncedStore(ctx) {
return admission.NewForbidden(a, errors.New(pluginName+": caches not synchronized"))
}
ingress, err := o.ingressLister.Get("cluster")
if err != nil {
return admission.NewForbidden(a, err)
}
namespace, err := o.nsLister.Get(newRoute.Namespace)
if err != nil {
return admission.NewForbidden(a, err)
}
if err = isRouteHSTSAllowed(ingress, newRoute, namespace); err != nil {
return admission.NewForbidden(a, err)
}
return nil
}
func (o *requiredRouteAnnotations) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) {
o.nsLister = kubeInformers.Core().V1().Namespaces().Lister()
o.cachesToSync = append(o.cachesToSync, kubeInformers.Core().V1().Namespaces().Informer().HasSynced)
}
// waitForSyncedStore calls cache.WaitForCacheSync, which will wait up to timeToWaitForCacheSync
// for the cachesToSync to synchronize.
func (o *requiredRouteAnnotations) waitForSyncedStore(ctx context.Context) bool {
syncCtx, cancelFn := context.WithTimeout(ctx, timeToWaitForCacheSync)
defer cancelFn()
if !o.cacheSyncLock.hasSynced() {
if !cache.WaitForCacheSync(syncCtx.Done(), o.cachesToSync...) {
return false
}
o.cacheSyncLock.setSynced()
}
return true
}
func (o *requiredRouteAnnotations) ValidateInitialization() error {
if o.ingressLister == nil {
return fmt.Errorf(pluginName + " plugin needs an ingress lister")
}
if o.routeLister == nil {
return fmt.Errorf(pluginName + " plugin needs a route lister")
}
if o.nsLister == nil {
return fmt.Errorf(pluginName + " plugin needs a namespace lister")
}
if len(o.cachesToSync) < 3 {
return fmt.Errorf(pluginName + " plugin missing informer synced functions")
}
return nil
}
func NewRequiredRouteAnnotations() *requiredRouteAnnotations {
return &requiredRouteAnnotations{
Handler: admission.NewHandler(admission.Create, admission.Update),
}
}
func (o *requiredRouteAnnotations) SetOpenShiftRouteInformers(informers routeinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Route().V1().Routes().Informer().HasSynced)
o.routeLister = informers.Route().V1().Routes().Lister()
}
func (o *requiredRouteAnnotations) SetOpenShiftConfigInformers(informers configinformers.SharedInformerFactory) {
o.cachesToSync = append(o.cachesToSync, informers.Config().V1().Ingresses().Informer().HasSynced)
o.ingressLister = informers.Config().V1().Ingresses().Lister()
}
// isRouteHSTSAllowed returns nil if the route is allowed. Otherwise, returns details and a suggestion in the error
func isRouteHSTSAllowed(ingress *configv1.Ingress, newRoute *routeapi.Route, namespace *corev1.Namespace) error {
requirements := ingress.Spec.RequiredHSTSPolicies
for _, requirement := range requirements {
// Check if the required namespaceSelector (if any) and the domainPattern match
if matches, err := requiredNamespaceDomainMatchesRoute(requirement, newRoute, namespace); err != nil {
return err
} else if !matches {
// If one of either the namespaceSelector or domain didn't match, we will continue to look
continue
}
routeHSTS, err := hstsConfigFromRoute(newRoute)
if err != nil {
return err
}
// If there is no annotation but there needs to be one, return error
if routeHSTS != nil {
if err = routeHSTS.meetsRequirements(requirement); err != nil {
return err
}
}
// Validation only checks the first matching required HSTS rule.
return nil | return nil
}
type hstsConfig struct {
maxAge int32
preload bool
includeSubDomains bool
}
const (
HSTSMaxAgeMissingOrWrongError = "HSTS max-age must be set correctly in HSTS annotation"
HSTSMaxAgeGreaterError = "HSTS max-age is greater than maximum age %ds"
HSTSMaxAgeLessThanError = "HSTS max-age is less than minimum age %ds"
HSTSPreloadMustError = "HSTS preload must be specified"
HSTSPreloadMustNotError = "HSTS preload must not be specified"
HSTSIncludeSubDomainsMustError = "HSTS includeSubDomains must be specified"
HSTSIncludeSubDomainsMustNotError = "HSTS includeSubDomains must not be specified"
)
// Parse out the hstsConfig fields from the annotation
// Unrecognized fields are ignored
func hstsConfigFromRoute(route *routeapi.Route) (*hstsConfig, error) {
var ret hstsConfig
trimmed := strings.ToLower(strings.ReplaceAll(route.Annotations[hstsAnnotation], " ", ""))
tokens := strings.Split(trimmed, ";")
for _, token := range tokens {
if strings.EqualFold(token, "includeSubDomains") {
ret.includeSubDomains = true
}
if strings.EqualFold(token, "preload") {
ret.preload = true
}
// unrecognized tokens are ignored
}
if match := maxAgeRegExp.FindStringSubmatch(trimmed); match != nil && len(match) > 1 {
age, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return nil, err
}
ret.maxAge = int32(age)
} else {
return nil, fmt.Errorf(HSTSMaxAgeMissingOrWrongError)
| }
// None of the requirements matched this route's domain/namespace, it is automatically allowed | random_line_split |
netlink_linux.go |
func (f *ipvsFlags) Serialize() []byte {
return (*(*[unsafe.Sizeof(*f)]byte)(unsafe.Pointer(f)))[:]
}
func (f *ipvsFlags) Len() int {
return int(unsafe.Sizeof(*f))
}
func setup() {
ipvsOnce.Do(func() {
var err error
if out, err := exec.Command("modprobe", "-va", "ip_vs").CombinedOutput(); err != nil {
logrus.Warnf("Running modprobe ip_vs failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err)
}
ipvsFamily, err = getIPVSFamily()
if err != nil {
logrus.Error("Could not get ipvs family information from the kernel. It is possible that ipvs is not enabled in your kernel. Native loadbalancing will not work until this is fixed.")
}
})
}
func fillService(s *Service) nl.NetlinkRequestData {
cmdAttr := nl.NewRtAttr(ipvsCmdAttrService, nil)
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrAddressFamily, nl.Uint16Attr(s.AddressFamily))
if s.FWMark != 0 {
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrFWMark, nl.Uint32Attr(s.FWMark))
} else {
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrProtocol, nl.Uint16Attr(s.Protocol))
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrAddress, rawIPData(s.Address))
// Port needs to be in network byte order.
portBuf := new(bytes.Buffer)
binary.Write(portBuf, binary.BigEndian, s.Port)
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrPort, portBuf.Bytes())
}
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrSchedName, nl.ZeroTerminated(s.SchedName))
if s.PEName != "" {
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrPEName, nl.ZeroTerminated(s.PEName))
}
f := &ipvsFlags{
flags: s.Flags,
mask: 0xFFFFFFFF,
}
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrFlags, f.Serialize())
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrTimeout, nl.Uint32Attr(s.Timeout))
nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrNetmask, nl.Uint32Attr(s.Netmask))
return cmdAttr
}
func fillDestination(d *Destination) nl.NetlinkRequestData {
cmdAttr := nl.NewRtAttr(ipvsCmdAttrDest, nil)
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrAddress, rawIPData(d.Address))
// Port needs to be in network byte order.
portBuf := new(bytes.Buffer)
binary.Write(portBuf, binary.BigEndian, d.Port)
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrPort, portBuf.Bytes())
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrForwardingMethod, nl.Uint32Attr(d.ConnectionFlags&ConnectionFlagFwdMask))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrWeight, nl.Uint32Attr(uint32(d.Weight)))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrUpperThreshold, nl.Uint32Attr(d.UpperThreshold))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrLowerThreshold, nl.Uint32Attr(d.LowerThreshold))
return cmdAttr
}
func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]byte, error) {
req := newIPVSRequest(cmd)
req.Seq = atomic.AddUint32(&i.seq, 1)
if s == nil {
req.Flags |= syscall.NLM_F_DUMP // Flag to dump all messages
req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) // Add a dummy attribute
} else {
req.AddData(fillService(s))
}
if d == nil {
if cmd == ipvsCmdGetDest {
req.Flags |= syscall.NLM_F_DUMP
}
} else {
req.AddData(fillDestination(d))
}
res, err := execute(i.sock, req, 0)
if err != nil {
return [][]byte{}, err
}
return res, nil
}
func (i *Handle) doCmd(s *Service, d *Destination, cmd uint8) error {
_, err := i.doCmdwithResponse(s, d, cmd)
return err
}
func getIPVSFamily() (int, error) {
sock, err := nl.GetNetlinkSocketAt(netns.None(), netns.None(), syscall.NETLINK_GENERIC)
if err != nil {
return 0, err
}
defer sock.Close()
req := newGenlRequest(genlCtrlID, genlCtrlCmdGetFamily)
req.AddData(nl.NewRtAttr(genlCtrlAttrFamilyName, nl.ZeroTerminated("IPVS")))
msgs, err := execute(sock, req, 0)
if err != nil {
return 0, err
}
for _, m := range msgs {
hdr := deserializeGenlMsg(m)
attrs, err := nl.ParseRouteAttr(m[hdr.Len():])
if err != nil {
return 0, err
}
for _, attr := range attrs {
switch int(attr.Attr.Type) {
case genlCtrlAttrFamilyID:
return int(native.Uint16(attr.Value[0:2])), nil
}
}
}
return 0, fmt.Errorf("no family id in the netlink response")
}
func rawIPData(ip net.IP) []byte {
family := nl.GetIPFamily(ip)
if family == nl.FAMILY_V4 {
return ip.To4()
}
return ip
}
func newIPVSRequest(cmd uint8) *nl.NetlinkRequest {
return newGenlRequest(ipvsFamily, cmd)
}
func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest {
req := nl.NewNetlinkRequest(familyID, syscall.NLM_F_ACK)
req.AddData(&genlMsgHdr{cmd: cmd, version: 1})
return req
}
func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
if err := s.Send(req); err != nil {
return nil, err
}
pid, err := s.GetPid()
if err != nil {
return nil, err
}
var res [][]byte
done:
for {
msgs, _, err := s.Receive()
if err != nil {
if s.GetFd() == -1 {
return nil, fmt.Errorf("Socket got closed on receive")
}
if err == syscall.EAGAIN {
// timeout fired
continue
}
return nil, err
}
for _, m := range msgs {
if m.Header.Seq != req.Seq {
continue
}
if m.Header.Pid != pid {
return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
}
if m.Header.Type == syscall.NLMSG_DONE {
break done
}
if m.Header.Type == syscall.NLMSG_ERROR {
error := int32(native.Uint32(m.Data[0:4]))
if error == 0 {
break done
}
return nil, syscall.Errno(-error)
}
if resType != 0 && m.Header.Type != resType {
continue
}
res = append(res, m.Data)
if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
break done
}
}
}
return res, nil
}
func parseIP(ip []byte, family uint16) (net.IP, error) {
var resIP net.IP
switch family {
case syscall.AF_INET:
resIP = (net.IP)(ip[:4])
case syscall.AF_INET6:
resIP = (net.IP)(ip[:16])
default:
return nil, fmt.Errorf("parseIP Error ip=%v", ip)
}
return resIP, nil
}
// parseStats
func assembleStats(msg []byte) (SvcStats, error) {
var s SvcStats
attrs, err := nl.ParseRouteAttr(msg)
if err != nil {
return s, err
}
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsStatsConns:
s.Connections = native.Uint32(attr.Value)
case ipvsStatsPktsIn:
s.PacketsIn = native.Uint32(attr.Value)
case ipvsStatsPktsOut:
s.PacketsOut = native.Uint32(attr.Value)
case ipvsStatsBytesIn:
s.BytesIn = native.Uint64(attr.Value)
case ipvsStatsBytesOut:
s.BytesOut = native.Uint64(attr.Value)
case ipvsStatsCPS:
s.CPS = native.Uint32(attr.Value)
case ipvsStatsPPSIn:
s.PPSIn = native.Uint32(attr.Value)
case ipvsStatsPPSOut:
s | {
return int(unsafe.Sizeof(*hdr))
} | identifier_body | |
netlink_linux.go | tAttrChild(cmdAttr, ipvsDestAttrWeight, nl.Uint32Attr(uint32(d.Weight)))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrUpperThreshold, nl.Uint32Attr(d.UpperThreshold))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrLowerThreshold, nl.Uint32Attr(d.LowerThreshold))
return cmdAttr
}
func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]byte, error) {
req := newIPVSRequest(cmd)
req.Seq = atomic.AddUint32(&i.seq, 1)
if s == nil {
req.Flags |= syscall.NLM_F_DUMP // Flag to dump all messages
req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) // Add a dummy attribute
} else {
req.AddData(fillService(s))
}
if d == nil {
if cmd == ipvsCmdGetDest {
req.Flags |= syscall.NLM_F_DUMP
}
} else {
req.AddData(fillDestination(d))
}
res, err := execute(i.sock, req, 0)
if err != nil {
return [][]byte{}, err
}
return res, nil
}
func (i *Handle) doCmd(s *Service, d *Destination, cmd uint8) error {
_, err := i.doCmdwithResponse(s, d, cmd)
return err
}
func getIPVSFamily() (int, error) {
sock, err := nl.GetNetlinkSocketAt(netns.None(), netns.None(), syscall.NETLINK_GENERIC)
if err != nil {
return 0, err
}
defer sock.Close()
req := newGenlRequest(genlCtrlID, genlCtrlCmdGetFamily)
req.AddData(nl.NewRtAttr(genlCtrlAttrFamilyName, nl.ZeroTerminated("IPVS")))
msgs, err := execute(sock, req, 0)
if err != nil {
return 0, err
}
for _, m := range msgs {
hdr := deserializeGenlMsg(m)
attrs, err := nl.ParseRouteAttr(m[hdr.Len():])
if err != nil {
return 0, err
}
for _, attr := range attrs |
}
return 0, fmt.Errorf("no family id in the netlink response")
}
func rawIPData(ip net.IP) []byte {
family := nl.GetIPFamily(ip)
if family == nl.FAMILY_V4 {
return ip.To4()
}
return ip
}
func newIPVSRequest(cmd uint8) *nl.NetlinkRequest {
return newGenlRequest(ipvsFamily, cmd)
}
func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest {
req := nl.NewNetlinkRequest(familyID, syscall.NLM_F_ACK)
req.AddData(&genlMsgHdr{cmd: cmd, version: 1})
return req
}
func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
if err := s.Send(req); err != nil {
return nil, err
}
pid, err := s.GetPid()
if err != nil {
return nil, err
}
var res [][]byte
done:
for {
msgs, _, err := s.Receive()
if err != nil {
if s.GetFd() == -1 {
return nil, fmt.Errorf("Socket got closed on receive")
}
if err == syscall.EAGAIN {
// timeout fired
continue
}
return nil, err
}
for _, m := range msgs {
if m.Header.Seq != req.Seq {
continue
}
if m.Header.Pid != pid {
return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
}
if m.Header.Type == syscall.NLMSG_DONE {
break done
}
if m.Header.Type == syscall.NLMSG_ERROR {
error := int32(native.Uint32(m.Data[0:4]))
if error == 0 {
break done
}
return nil, syscall.Errno(-error)
}
if resType != 0 && m.Header.Type != resType {
continue
}
res = append(res, m.Data)
if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
break done
}
}
}
return res, nil
}
func parseIP(ip []byte, family uint16) (net.IP, error) {
var resIP net.IP
switch family {
case syscall.AF_INET:
resIP = (net.IP)(ip[:4])
case syscall.AF_INET6:
resIP = (net.IP)(ip[:16])
default:
return nil, fmt.Errorf("parseIP Error ip=%v", ip)
}
return resIP, nil
}
// parseStats
func assembleStats(msg []byte) (SvcStats, error) {
var s SvcStats
attrs, err := nl.ParseRouteAttr(msg)
if err != nil {
return s, err
}
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsStatsConns:
s.Connections = native.Uint32(attr.Value)
case ipvsStatsPktsIn:
s.PacketsIn = native.Uint32(attr.Value)
case ipvsStatsPktsOut:
s.PacketsOut = native.Uint32(attr.Value)
case ipvsStatsBytesIn:
s.BytesIn = native.Uint64(attr.Value)
case ipvsStatsBytesOut:
s.BytesOut = native.Uint64(attr.Value)
case ipvsStatsCPS:
s.CPS = native.Uint32(attr.Value)
case ipvsStatsPPSIn:
s.PPSIn = native.Uint32(attr.Value)
case ipvsStatsPPSOut:
s.PPSOut = native.Uint32(attr.Value)
case ipvsStatsBPSIn:
s.BPSIn = native.Uint32(attr.Value)
case ipvsStatsBPSOut:
s.BPSOut = native.Uint32(attr.Value)
}
}
return s, nil
}
// assembleService assembles a services back from a hain of netlink attributes
func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
var s Service
var addressBytes []byte
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsSvcAttrAddressFamily:
s.AddressFamily = native.Uint16(attr.Value)
case ipvsSvcAttrProtocol:
s.Protocol = native.Uint16(attr.Value)
case ipvsSvcAttrAddress:
addressBytes = attr.Value
case ipvsSvcAttrPort:
s.Port = binary.BigEndian.Uint16(attr.Value)
case ipvsSvcAttrFWMark:
s.FWMark = native.Uint32(attr.Value)
case ipvsSvcAttrSchedName:
s.SchedName = nl.BytesToString(attr.Value)
case ipvsSvcAttrFlags:
s.Flags = native.Uint32(attr.Value)
case ipvsSvcAttrTimeout:
s.Timeout = native.Uint32(attr.Value)
case ipvsSvcAttrNetmask:
s.Netmask = native.Uint32(attr.Value)
case ipvsSvcAttrStats:
stats, err := assembleStats(attr.Value)
if err != nil {
return nil, err
}
s.Stats = stats
}
}
// parse Address after parse AddressFamily incase of parseIP error
if addressBytes != nil {
ip, err := parseIP(addressBytes, s.AddressFamily)
if err != nil {
return nil, err
}
s.Address = ip
}
return &s, nil
}
// parseService given a ipvs netlink response this function will respond with a valid service entry, an error otherwise
func (i *Handle) parseService(msg []byte) (*Service, error) {
var s *Service
// Remove General header for this message and parse the NetLink message
hdr := deserializeGenlMsg(msg)
NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():])
if err != nil {
return nil, err
}
if len(NetLinkAttrs) == 0 {
return nil, fmt.Errorf("error no valid netlink message found while parsing service record")
}
// Now Parse and get IPVS related attributes messages packed in this message.
ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value)
if err != nil {
return nil, err
}
// Assemble all the IPVS related attribute messages and create a service record
s, err = assembleService(ipvsAttrs)
if err != nil {
return nil, err
}
return s, nil
}
// doGetServicesCmd a wrapper which could be used commonly for both GetServices() and GetService(*Service)
func (i *Handle) doGetServicesCmd(svc *Service) ([]*Service, error) {
var res []*Service
msgs, err := i | {
switch int(attr.Attr.Type) {
case genlCtrlAttrFamilyID:
return int(native.Uint16(attr.Value[0:2])), nil
}
} | conditional_block |
netlink_linux.go | uint8) ([][]byte, error) {
req := newIPVSRequest(cmd)
req.Seq = atomic.AddUint32(&i.seq, 1)
if s == nil {
req.Flags |= syscall.NLM_F_DUMP // Flag to dump all messages
req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) // Add a dummy attribute
} else {
req.AddData(fillService(s))
}
if d == nil {
if cmd == ipvsCmdGetDest {
req.Flags |= syscall.NLM_F_DUMP
}
} else {
req.AddData(fillDestination(d))
}
res, err := execute(i.sock, req, 0)
if err != nil {
return [][]byte{}, err
}
return res, nil
}
func (i *Handle) doCmd(s *Service, d *Destination, cmd uint8) error {
_, err := i.doCmdwithResponse(s, d, cmd)
return err
}
func getIPVSFamily() (int, error) {
sock, err := nl.GetNetlinkSocketAt(netns.None(), netns.None(), syscall.NETLINK_GENERIC)
if err != nil {
return 0, err
}
defer sock.Close()
req := newGenlRequest(genlCtrlID, genlCtrlCmdGetFamily)
req.AddData(nl.NewRtAttr(genlCtrlAttrFamilyName, nl.ZeroTerminated("IPVS")))
msgs, err := execute(sock, req, 0)
if err != nil {
return 0, err
}
for _, m := range msgs {
hdr := deserializeGenlMsg(m)
attrs, err := nl.ParseRouteAttr(m[hdr.Len():])
if err != nil {
return 0, err
}
for _, attr := range attrs {
switch int(attr.Attr.Type) {
case genlCtrlAttrFamilyID:
return int(native.Uint16(attr.Value[0:2])), nil
}
}
}
return 0, fmt.Errorf("no family id in the netlink response")
}
func rawIPData(ip net.IP) []byte {
family := nl.GetIPFamily(ip)
if family == nl.FAMILY_V4 {
return ip.To4()
}
return ip
}
func newIPVSRequest(cmd uint8) *nl.NetlinkRequest {
return newGenlRequest(ipvsFamily, cmd)
}
func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest {
req := nl.NewNetlinkRequest(familyID, syscall.NLM_F_ACK)
req.AddData(&genlMsgHdr{cmd: cmd, version: 1})
return req
}
func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
if err := s.Send(req); err != nil {
return nil, err
}
pid, err := s.GetPid()
if err != nil {
return nil, err
}
var res [][]byte
done:
for {
msgs, _, err := s.Receive()
if err != nil {
if s.GetFd() == -1 {
return nil, fmt.Errorf("Socket got closed on receive")
}
if err == syscall.EAGAIN {
// timeout fired
continue
}
return nil, err
}
for _, m := range msgs {
if m.Header.Seq != req.Seq {
continue
}
if m.Header.Pid != pid {
return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
}
if m.Header.Type == syscall.NLMSG_DONE {
break done
}
if m.Header.Type == syscall.NLMSG_ERROR {
error := int32(native.Uint32(m.Data[0:4]))
if error == 0 {
break done
}
return nil, syscall.Errno(-error)
}
if resType != 0 && m.Header.Type != resType {
continue
}
res = append(res, m.Data)
if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
break done
}
}
}
return res, nil
}
func parseIP(ip []byte, family uint16) (net.IP, error) {
var resIP net.IP
switch family {
case syscall.AF_INET:
resIP = (net.IP)(ip[:4])
case syscall.AF_INET6:
resIP = (net.IP)(ip[:16])
default:
return nil, fmt.Errorf("parseIP Error ip=%v", ip)
}
return resIP, nil
}
// parseStats
func assembleStats(msg []byte) (SvcStats, error) {
var s SvcStats
attrs, err := nl.ParseRouteAttr(msg)
if err != nil {
return s, err
}
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsStatsConns:
s.Connections = native.Uint32(attr.Value)
case ipvsStatsPktsIn:
s.PacketsIn = native.Uint32(attr.Value)
case ipvsStatsPktsOut:
s.PacketsOut = native.Uint32(attr.Value)
case ipvsStatsBytesIn:
s.BytesIn = native.Uint64(attr.Value)
case ipvsStatsBytesOut:
s.BytesOut = native.Uint64(attr.Value)
case ipvsStatsCPS:
s.CPS = native.Uint32(attr.Value)
case ipvsStatsPPSIn:
s.PPSIn = native.Uint32(attr.Value)
case ipvsStatsPPSOut:
s.PPSOut = native.Uint32(attr.Value)
case ipvsStatsBPSIn:
s.BPSIn = native.Uint32(attr.Value)
case ipvsStatsBPSOut:
s.BPSOut = native.Uint32(attr.Value)
}
}
return s, nil
}
// assembleService assembles a services back from a hain of netlink attributes
func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
var s Service
var addressBytes []byte
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsSvcAttrAddressFamily:
s.AddressFamily = native.Uint16(attr.Value)
case ipvsSvcAttrProtocol:
s.Protocol = native.Uint16(attr.Value)
case ipvsSvcAttrAddress:
addressBytes = attr.Value
case ipvsSvcAttrPort:
s.Port = binary.BigEndian.Uint16(attr.Value)
case ipvsSvcAttrFWMark:
s.FWMark = native.Uint32(attr.Value)
case ipvsSvcAttrSchedName:
s.SchedName = nl.BytesToString(attr.Value)
case ipvsSvcAttrFlags:
s.Flags = native.Uint32(attr.Value)
case ipvsSvcAttrTimeout:
s.Timeout = native.Uint32(attr.Value)
case ipvsSvcAttrNetmask:
s.Netmask = native.Uint32(attr.Value)
case ipvsSvcAttrStats:
stats, err := assembleStats(attr.Value)
if err != nil {
return nil, err
}
s.Stats = stats
}
}
// parse Address after parse AddressFamily incase of parseIP error
if addressBytes != nil {
ip, err := parseIP(addressBytes, s.AddressFamily)
if err != nil {
return nil, err
}
s.Address = ip
}
return &s, nil
}
// parseService given a ipvs netlink response this function will respond with a valid service entry, an error otherwise
func (i *Handle) parseService(msg []byte) (*Service, error) {
var s *Service
// Remove General header for this message and parse the NetLink message
hdr := deserializeGenlMsg(msg)
NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():])
if err != nil {
return nil, err
}
if len(NetLinkAttrs) == 0 {
return nil, fmt.Errorf("error no valid netlink message found while parsing service record")
}
// Now Parse and get IPVS related attributes messages packed in this message.
ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value)
if err != nil {
return nil, err
}
// Assemble all the IPVS related attribute messages and create a service record
s, err = assembleService(ipvsAttrs)
if err != nil {
return nil, err
}
return s, nil
}
// doGetServicesCmd a wrapper which could be used commonly for both GetServices() and GetService(*Service)
func (i *Handle) doGetServicesCmd(svc *Service) ([]*Service, error) {
var res []*Service
msgs, err := i.doCmdwithResponse(svc, nil, ipvsCmdGetService)
if err != nil {
return nil, err
}
for _, msg := range msgs {
srv, err := i.parseService(msg)
if err != nil {
return nil, err
}
res = append(res, srv)
}
return res, nil
}
// doCmdWithoutAttr a simple wrapper of netlink socket execute command
func (i *Handle) | doCmdWithoutAttr | identifier_name | |
netlink_linux.go | tAttrChild(cmdAttr, ipvsDestAttrWeight, nl.Uint32Attr(uint32(d.Weight)))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrUpperThreshold, nl.Uint32Attr(d.UpperThreshold))
nl.NewRtAttrChild(cmdAttr, ipvsDestAttrLowerThreshold, nl.Uint32Attr(d.LowerThreshold))
return cmdAttr
}
func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]byte, error) {
req := newIPVSRequest(cmd)
req.Seq = atomic.AddUint32(&i.seq, 1)
if s == nil {
req.Flags |= syscall.NLM_F_DUMP // Flag to dump all messages
req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) // Add a dummy attribute
} else {
req.AddData(fillService(s))
}
if d == nil {
if cmd == ipvsCmdGetDest {
req.Flags |= syscall.NLM_F_DUMP
}
} else {
req.AddData(fillDestination(d))
}
res, err := execute(i.sock, req, 0)
if err != nil {
return [][]byte{}, err
}
return res, nil
}
func (i *Handle) doCmd(s *Service, d *Destination, cmd uint8) error {
_, err := i.doCmdwithResponse(s, d, cmd)
return err
}
func getIPVSFamily() (int, error) {
sock, err := nl.GetNetlinkSocketAt(netns.None(), netns.None(), syscall.NETLINK_GENERIC)
if err != nil {
return 0, err
}
defer sock.Close()
req := newGenlRequest(genlCtrlID, genlCtrlCmdGetFamily)
req.AddData(nl.NewRtAttr(genlCtrlAttrFamilyName, nl.ZeroTerminated("IPVS")))
msgs, err := execute(sock, req, 0)
if err != nil {
return 0, err
}
for _, m := range msgs {
hdr := deserializeGenlMsg(m)
attrs, err := nl.ParseRouteAttr(m[hdr.Len():])
if err != nil {
return 0, err
}
for _, attr := range attrs {
switch int(attr.Attr.Type) {
case genlCtrlAttrFamilyID:
return int(native.Uint16(attr.Value[0:2])), nil
}
}
}
return 0, fmt.Errorf("no family id in the netlink response")
}
func rawIPData(ip net.IP) []byte {
family := nl.GetIPFamily(ip)
if family == nl.FAMILY_V4 {
return ip.To4()
}
return ip
}
func newIPVSRequest(cmd uint8) *nl.NetlinkRequest {
return newGenlRequest(ipvsFamily, cmd)
} |
func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest {
req := nl.NewNetlinkRequest(familyID, syscall.NLM_F_ACK)
req.AddData(&genlMsgHdr{cmd: cmd, version: 1})
return req
}
func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
if err := s.Send(req); err != nil {
return nil, err
}
pid, err := s.GetPid()
if err != nil {
return nil, err
}
var res [][]byte
done:
for {
msgs, _, err := s.Receive()
if err != nil {
if s.GetFd() == -1 {
return nil, fmt.Errorf("Socket got closed on receive")
}
if err == syscall.EAGAIN {
// timeout fired
continue
}
return nil, err
}
for _, m := range msgs {
if m.Header.Seq != req.Seq {
continue
}
if m.Header.Pid != pid {
return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid)
}
if m.Header.Type == syscall.NLMSG_DONE {
break done
}
if m.Header.Type == syscall.NLMSG_ERROR {
error := int32(native.Uint32(m.Data[0:4]))
if error == 0 {
break done
}
return nil, syscall.Errno(-error)
}
if resType != 0 && m.Header.Type != resType {
continue
}
res = append(res, m.Data)
if m.Header.Flags&syscall.NLM_F_MULTI == 0 {
break done
}
}
}
return res, nil
}
func parseIP(ip []byte, family uint16) (net.IP, error) {
var resIP net.IP
switch family {
case syscall.AF_INET:
resIP = (net.IP)(ip[:4])
case syscall.AF_INET6:
resIP = (net.IP)(ip[:16])
default:
return nil, fmt.Errorf("parseIP Error ip=%v", ip)
}
return resIP, nil
}
// parseStats
func assembleStats(msg []byte) (SvcStats, error) {
var s SvcStats
attrs, err := nl.ParseRouteAttr(msg)
if err != nil {
return s, err
}
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsStatsConns:
s.Connections = native.Uint32(attr.Value)
case ipvsStatsPktsIn:
s.PacketsIn = native.Uint32(attr.Value)
case ipvsStatsPktsOut:
s.PacketsOut = native.Uint32(attr.Value)
case ipvsStatsBytesIn:
s.BytesIn = native.Uint64(attr.Value)
case ipvsStatsBytesOut:
s.BytesOut = native.Uint64(attr.Value)
case ipvsStatsCPS:
s.CPS = native.Uint32(attr.Value)
case ipvsStatsPPSIn:
s.PPSIn = native.Uint32(attr.Value)
case ipvsStatsPPSOut:
s.PPSOut = native.Uint32(attr.Value)
case ipvsStatsBPSIn:
s.BPSIn = native.Uint32(attr.Value)
case ipvsStatsBPSOut:
s.BPSOut = native.Uint32(attr.Value)
}
}
return s, nil
}
// assembleService assembles a services back from a hain of netlink attributes
func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) {
var s Service
var addressBytes []byte
for _, attr := range attrs {
attrType := int(attr.Attr.Type)
switch attrType {
case ipvsSvcAttrAddressFamily:
s.AddressFamily = native.Uint16(attr.Value)
case ipvsSvcAttrProtocol:
s.Protocol = native.Uint16(attr.Value)
case ipvsSvcAttrAddress:
addressBytes = attr.Value
case ipvsSvcAttrPort:
s.Port = binary.BigEndian.Uint16(attr.Value)
case ipvsSvcAttrFWMark:
s.FWMark = native.Uint32(attr.Value)
case ipvsSvcAttrSchedName:
s.SchedName = nl.BytesToString(attr.Value)
case ipvsSvcAttrFlags:
s.Flags = native.Uint32(attr.Value)
case ipvsSvcAttrTimeout:
s.Timeout = native.Uint32(attr.Value)
case ipvsSvcAttrNetmask:
s.Netmask = native.Uint32(attr.Value)
case ipvsSvcAttrStats:
stats, err := assembleStats(attr.Value)
if err != nil {
return nil, err
}
s.Stats = stats
}
}
// parse Address after parse AddressFamily incase of parseIP error
if addressBytes != nil {
ip, err := parseIP(addressBytes, s.AddressFamily)
if err != nil {
return nil, err
}
s.Address = ip
}
return &s, nil
}
// parseService given a ipvs netlink response this function will respond with a valid service entry, an error otherwise
func (i *Handle) parseService(msg []byte) (*Service, error) {
var s *Service
// Remove General header for this message and parse the NetLink message
hdr := deserializeGenlMsg(msg)
NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():])
if err != nil {
return nil, err
}
if len(NetLinkAttrs) == 0 {
return nil, fmt.Errorf("error no valid netlink message found while parsing service record")
}
// Now Parse and get IPVS related attributes messages packed in this message.
ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value)
if err != nil {
return nil, err
}
// Assemble all the IPVS related attribute messages and create a service record
s, err = assembleService(ipvsAttrs)
if err != nil {
return nil, err
}
return s, nil
}
// doGetServicesCmd a wrapper which could be used commonly for both GetServices() and GetService(*Service)
func (i *Handle) doGetServicesCmd(svc *Service) ([]*Service, error) {
var res []*Service
msgs, err := i.do | random_line_split | |
sgr.go | RCube213
SGRCube214
SGRCube215
SGRCube216
SGRCube217
SGRCube218
SGRCube219
SGRCube220
SGRCube221
SGRCube222
SGRCube223
SGRCube224
SGRCube225
SGRCube226
SGRCube227
SGRCube228
SGRCube229
SGRCube230
SGRCube231
// 8-bit color space: 24 shades of gray; see colors.go.
SGRGray1
SGRGray2
SGRGray3
SGRGray4
SGRGray5
SGRGray6
SGRGray7
SGRGray8
SGRGray9
SGRGray10
SGRGray11
SGRGray12
SGRGray13
SGRGray14
SGRGray15
SGRGray16
SGRGray17
SGRGray18
SGRGray19
SGRGray20
SGRGray21
SGRGray22
SGRGray23
SGRGray24
)
// RGB constructs a 24-bit SGR color from component values.
func RGB(r, g, b uint8) SGRColor {
return sgrColor24 | SGRColor(r) | SGRColor(g)<<8 | SGRColor(b)<<16
}
// RGBA creates an SGRColor from color.Color() alpha-premultiplied values,
// ignoring the alpha value. Clips components to 0xffff before converting to
// 24-bit color (8-bit per channel).
func RGBA(r, g, b, _ uint32) SGRColor {
if r > 0xffff {
r = 0xffff
}
if g > 0xffff {
g = 0xffff
}
if b > 0xffff {
b = 0xffff
}
return RGB(uint8(r>>8), uint8(g>>8), uint8(b>>8))
}
func (c SGRColor) String() string {
switch {
case c&sgrColor24 != 0:
var tmp [12]byte
p := c.appendRGB(tmp[:0])[1:]
for i := range p {
if p[i] == ';' {
p[i] = ','
}
}
return fmt.Sprintf("rgb(%s)", p)
case int(c) < len(colorNames):
return colorNames[c]
default:
return fmt.Sprintf("color%d", c)
}
}
// FG constructs an SGR attribute value with the color as foreground.
func (c SGRColor) FG() SGRAttr {
return sgrAttrFGSet | SGRAttr(c&sgrColorMask)<<sgrFGShift
}
// BG constructs an SGR attribute value with the color as background.
func (c SGRColor) BG() SGRAttr {
return sgrAttrBGSet | SGRAttr((c&sgrColorMask))<<sgrBGShift
}
// RGBA implements the color.Color interface.
func (c SGRColor) RGBA() (r, g, b, a uint32) {
r8, g8, b8 := c.RGB()
r = uint32(r8)
g = uint32(g8)
b = uint32(b8)
return r | r<<8, g | g<<8, b | b<<8, 0xffff
}
// RGB returns the equivalent RGB components.
func (c SGRColor) RGB() (r, g, b uint8) {
if c&sgrColor24 == 0 {
c = Palette8Colors[c&0xff]
}
return uint8(c), uint8(c >> 8), uint8(c >> 16)
}
// To24Bit converts the color to 24-bit mode, so that it won't encode as a
// legacy 3, 4, or 8-bit color.
func (c SGRColor) To24Bit() SGRColor {
if c&sgrColor24 != 0 {
return c
}
return RGB(c.RGB())
}
func (c SGRColor) appendFGTo(p []byte) []byte {
switch {
case c&sgrColor24 != 0:
return c.appendRGB(append(p, "38;2"...)) // TODO support color space identifier?
case c <= SGRWhite:
return append(p, '3', '0'+uint8(c))
case c <= SGRBrightWhite:
return append(p, '9', '0'+uint8(c)-8)
case c <= SGRGray24:
return append(append(p, "38;5"...), colorStrings[uint8(c)]...)
}
return p
}
func (c SGRColor) fgSize() int {
switch {
case c&sgrColor24 != 0:
return 4 + c.rgbSize()
case c <= SGRWhite:
return 2
case c <= SGRBrightWhite:
return 2
case c <= SGRGray24:
return 4 + len(colorStrings[uint8(c)])
}
return 0
}
func (c SGRColor) appendBGTo(p []byte) []byte {
switch {
case c&sgrColor24 != 0:
return c.appendRGB(append(p, "48;2"...)) // TODO support color space identifier?
case c <= SGRWhite:
return append(p, '4', '0'+uint8(c))
case c <= SGRBrightWhite:
return append(p, '1', '0', '0'+uint8(c)-8)
case c <= SGRGray24:
return append(append(p, "48;5"...), colorStrings[uint8(c)]...)
}
return p
}
func (c SGRColor) bgSize() int {
switch {
case c&sgrColor24 != 0:
return 4 + c.rgbSize()
case c <= SGRWhite:
return 2
case c <= SGRBrightWhite:
return 3
case c <= SGRGray24:
return 4 + len(colorStrings[uint8(c)])
}
return 0
}
func (c SGRColor) appendRGB(p []byte) []byte {
p = append(p, colorStrings[uint8(c)]...)
p = append(p, colorStrings[uint8(c>>8)]...)
p = append(p, colorStrings[uint8(c>>16)]...)
return p
}
func (c SGRColor) rgbSize() int {
return 0 +
len(colorStrings[uint8(c)]) +
len(colorStrings[uint8(c>>8)]) +
len(colorStrings[uint8(c>>16)])
}
var colorNames = [16]string{
"black",
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"white",
"bright-black",
"bright-red",
"bright-green",
"bright-yellow",
"bright-blue",
"bright-magenta",
"bright-cyan",
"bright-white",
}
var colorStrings [256]string
func init() {
for i := 0; i < 256; i++ {
colorStrings[i] = ";" + strconv.Itoa(i)
}
}
// FG returns any set foreground color, and a bool indicating if it was
// actually set (to distinguish from 0=black).
func (attr SGRAttr) FG() (c SGRColor, set bool) {
if set = attr&sgrAttrFGSet != 0; set {
c = SGRColor(attr>>sgrFGShift) & sgrColorMask
}
return c, set
}
// BG returns any set background color, and a bool indicating if it was
// actually set (to distinguish from 0=black).
func (attr SGRAttr) BG() (c SGRColor, set bool) {
if set = attr&sgrAttrBGSet != 0; set {
c = SGRColor(attr>>sgrBGShift) & sgrColorMask
}
return c, set
}
// SansFG returns a copy of the attribute with any FG color unset.
func (attr SGRAttr) SansFG() SGRAttr { return attr & ^SGRAttrFGMask }
// SansBG returns a copy of the attribute with any BG color unset.
func (attr SGRAttr) SansBG() SGRAttr { return attr & ^SGRAttrBGMask }
// Merge an other attr value into a copy of the receiver, returning it.
func (attr SGRAttr) Merge(other SGRAttr) SGRAttr {
if other&SGRAttrClear != 0 {
attr = SGRClear
}
attr |= other & SGRAttrMask
if c, set := other.FG(); set {
attr = attr.SansFG() | c.FG()
}
if c, set := other.BG(); set {
attr = attr.SansBG() | c.BG()
}
return attr
}
// Diff returns the attr value which must be merged with the receiver to result
// in the given value.
func (attr SGRAttr) | Diff | identifier_name | |
sgr.go | 6
SGRCube17
SGRCube18
SGRCube19
SGRCube20
SGRCube21 | SGRCube23
SGRCube24
SGRCube25
SGRCube26
SGRCube27
SGRCube28
SGRCube29
SGRCube30
SGRCube31
SGRCube32
SGRCube33
SGRCube34
SGRCube35
SGRCube36
SGRCube37
SGRCube38
SGRCube39
SGRCube40
SGRCube41
SGRCube42
SGRCube43
SGRCube44
SGRCube45
SGRCube46
SGRCube47
SGRCube48
SGRCube49
SGRCube50
SGRCube51
SGRCube52
SGRCube53
SGRCube54
SGRCube55
SGRCube56
SGRCube57
SGRCube58
SGRCube59
SGRCube60
SGRCube61
SGRCube62
SGRCube63
SGRCube64
SGRCube65
SGRCube66
SGRCube67
SGRCube68
SGRCube69
SGRCube70
SGRCube71
SGRCube72
SGRCube73
SGRCube74
SGRCube75
SGRCube76
SGRCube77
SGRCube78
SGRCube79
SGRCube80
SGRCube81
SGRCube82
SGRCube83
SGRCube84
SGRCube85
SGRCube86
SGRCube87
SGRCube88
SGRCube89
SGRCube90
SGRCube91
SGRCube92
SGRCube93
SGRCube94
SGRCube95
SGRCube96
SGRCube97
SGRCube98
SGRCube99
SGRCube100
SGRCube101
SGRCube102
SGRCube103
SGRCube104
SGRCube105
SGRCube106
SGRCube107
SGRCube108
SGRCube109
SGRCube110
SGRCube111
SGRCube112
SGRCube113
SGRCube114
SGRCube115
SGRCube116
SGRCube117
SGRCube118
SGRCube119
SGRCube120
SGRCube121
SGRCube122
SGRCube123
SGRCube124
SGRCube125
SGRCube126
SGRCube127
SGRCube128
SGRCube129
SGRCube130
SGRCube131
SGRCube132
SGRCube133
SGRCube134
SGRCube135
SGRCube136
SGRCube137
SGRCube138
SGRCube139
SGRCube140
SGRCube141
SGRCube142
SGRCube143
SGRCube144
SGRCube145
SGRCube146
SGRCube147
SGRCube148
SGRCube149
SGRCube150
SGRCube151
SGRCube152
SGRCube153
SGRCube154
SGRCube155
SGRCube156
SGRCube157
SGRCube158
SGRCube159
SGRCube160
SGRCube161
SGRCube162
SGRCube163
SGRCube164
SGRCube165
SGRCube166
SGRCube167
SGRCube168
SGRCube169
SGRCube170
SGRCube171
SGRCube172
SGRCube173
SGRCube174
SGRCube175
SGRCube176
SGRCube177
SGRCube178
SGRCube179
SGRCube180
SGRCube181
SGRCube182
SGRCube183
SGRCube184
SGRCube185
SGRCube186
SGRCube187
SGRCube188
SGRCube189
SGRCube190
SGRCube191
SGRCube192
SGRCube193
SGRCube194
SGRCube195
SGRCube196
SGRCube197
SGRCube198
SGRCube199
SGRCube200
SGRCube201
SGRCube202
SGRCube203
SGRCube204
SGRCube205
SGRCube206
SGRCube207
SGRCube208
SGRCube209
SGRCube210
SGRCube211
SGRCube212
SGRCube213
SGRCube214
SGRCube215
SGRCube216
SGRCube217
SGRCube218
SGRCube219
SGRCube220
SGRCube221
SGRCube222
SGRCube223
SGRCube224
SGRCube225
SGRCube226
SGRCube227
SGRCube228
SGRCube229
SGRCube230
SGRCube231
// 8-bit color space: 24 shades of gray; see colors.go.
SGRGray1
SGRGray2
SGRGray3
SGRGray4
SGRGray5
SGRGray6
SGRGray7
SGRGray8
SGRGray9
SGRGray10
SGRGray11
SGRGray12
SGRGray13
SGRGray14
SGRGray15
SGRGray16
SGRGray17
SGRGray18
SGRGray19
SGRGray20
SGRGray21
SGRGray22
SGRGray23
SGRGray24
)
// RGB constructs a 24-bit SGR color from component values.
func RGB(r, g, b uint8) SGRColor {
return sgrColor24 | SGRColor(r) | SGRColor(g)<<8 | SGRColor(b)<<16
}
// RGBA creates an SGRColor from color.Color() alpha-premultiplied values,
// ignoring the alpha value. Clips components to 0xffff before converting to
// 24-bit color (8-bit per channel).
func RGBA(r, g, b, _ uint32) SGRColor {
if r > 0xffff {
r = 0xffff
}
if g > 0xffff {
g = 0xffff
}
if b > 0xffff {
b = 0xffff
}
return RGB(uint8(r>>8), uint8(g>>8), uint8(b>>8))
}
func (c SGRColor) String() string {
switch {
case c&sgrColor24 != 0:
var tmp [12]byte
p := c.appendRGB(tmp[:0])[1:]
for i := range p {
if p[i] == ';' {
p[i] = ','
| SGRCube22 | random_line_split |
sgr.go | RCube71
SGRCube72
SGRCube73
SGRCube74
SGRCube75
SGRCube76
SGRCube77
SGRCube78
SGRCube79
SGRCube80
SGRCube81
SGRCube82
SGRCube83
SGRCube84
SGRCube85
SGRCube86
SGRCube87
SGRCube88
SGRCube89
SGRCube90
SGRCube91
SGRCube92
SGRCube93
SGRCube94
SGRCube95
SGRCube96
SGRCube97
SGRCube98
SGRCube99
SGRCube100
SGRCube101
SGRCube102
SGRCube103
SGRCube104
SGRCube105
SGRCube106
SGRCube107
SGRCube108
SGRCube109
SGRCube110
SGRCube111
SGRCube112
SGRCube113
SGRCube114
SGRCube115
SGRCube116
SGRCube117
SGRCube118
SGRCube119
SGRCube120
SGRCube121
SGRCube122
SGRCube123
SGRCube124
SGRCube125
SGRCube126
SGRCube127
SGRCube128
SGRCube129
SGRCube130
SGRCube131
SGRCube132
SGRCube133
SGRCube134
SGRCube135
SGRCube136
SGRCube137
SGRCube138
SGRCube139
SGRCube140
SGRCube141
SGRCube142
SGRCube143
SGRCube144
SGRCube145
SGRCube146
SGRCube147
SGRCube148
SGRCube149
SGRCube150
SGRCube151
SGRCube152
SGRCube153
SGRCube154
SGRCube155
SGRCube156
SGRCube157
SGRCube158
SGRCube159
SGRCube160
SGRCube161
SGRCube162
SGRCube163
SGRCube164
SGRCube165
SGRCube166
SGRCube167
SGRCube168
SGRCube169
SGRCube170
SGRCube171
SGRCube172
SGRCube173
SGRCube174
SGRCube175
SGRCube176
SGRCube177
SGRCube178
SGRCube179
SGRCube180
SGRCube181
SGRCube182
SGRCube183
SGRCube184
SGRCube185
SGRCube186
SGRCube187
SGRCube188
SGRCube189
SGRCube190
SGRCube191
SGRCube192
SGRCube193
SGRCube194
SGRCube195
SGRCube196
SGRCube197
SGRCube198
SGRCube199
SGRCube200
SGRCube201
SGRCube202
SGRCube203
SGRCube204
SGRCube205
SGRCube206
SGRCube207
SGRCube208
SGRCube209
SGRCube210
SGRCube211
SGRCube212
SGRCube213
SGRCube214
SGRCube215
SGRCube216
SGRCube217
SGRCube218
SGRCube219
SGRCube220
SGRCube221
SGRCube222
SGRCube223
SGRCube224
SGRCube225
SGRCube226
SGRCube227
SGRCube228
SGRCube229
SGRCube230
SGRCube231
// 8-bit color space: 24 shades of gray; see colors.go.
SGRGray1
SGRGray2
SGRGray3
SGRGray4
SGRGray5
SGRGray6
SGRGray7
SGRGray8
SGRGray9
SGRGray10
SGRGray11
SGRGray12
SGRGray13
SGRGray14
SGRGray15
SGRGray16
SGRGray17
SGRGray18
SGRGray19
SGRGray20
SGRGray21
SGRGray22
SGRGray23
SGRGray24
)
// RGB constructs a 24-bit SGR color from component values.
func RGB(r, g, b uint8) SGRColor {
return sgrColor24 | SGRColor(r) | SGRColor(g)<<8 | SGRColor(b)<<16
}
// RGBA creates an SGRColor from color.Color() alpha-premultiplied values,
// ignoring the alpha value. Clips components to 0xffff before converting to
// 24-bit color (8-bit per channel).
func RGBA(r, g, b, _ uint32) SGRColor {
if r > 0xffff {
r = 0xffff
}
if g > 0xffff {
g = 0xffff
}
if b > 0xffff {
b = 0xffff
}
return RGB(uint8(r>>8), uint8(g>>8), uint8(b>>8))
}
func (c SGRColor) String() string {
switch {
case c&sgrColor24 != 0:
var tmp [12]byte
p := c.appendRGB(tmp[:0])[1:]
for i := range p {
if p[i] == ';' {
p[i] = ','
}
}
return fmt.Sprintf("rgb(%s)", p)
case int(c) < len(colorNames):
return colorNames[c]
default:
return fmt.Sprintf("color%d", c)
}
}
// FG constructs an SGR attribute value with the color as foreground.
func (c SGRColor) FG() SGRAttr {
return sgrAttrFGSet | SGRAttr(c&sgrColorMask)<<sgrFGShift
}
// BG constructs an SGR attribute value with the color as background.
func (c SGRColor) BG() SGRAttr {
return sgrAttrBGSet | SGRAttr((c&sgrColorMask))<<sgrBGShift
}
// RGBA implements the color.Color interface.
func (c SGRColor) RGBA() (r, g, b, a uint32) {
r8, g8, b8 := c.RGB()
r = uint32(r8)
g = uint32(g8)
b = uint32(b8)
return r | r<<8, g | g<<8, b | b<<8, 0xffff
}
// RGB returns the equivalent RGB components.
func (c SGRColor) RGB() (r, g, b uint8) {
if c&sgrColor24 == 0 {
c = Palette8Colors[c&0xff]
}
return uint8(c), uint8(c >> 8), uint8(c >> 16)
}
// To24Bit converts the color to 24-bit mode, so that it won't encode as a
// legacy 3, 4, or 8-bit color.
func (c SGRColor) To24Bit() SGRColor {
if c&sgrColor24 != 0 | {
return c
} | conditional_block | |
sgr.go | GRCube61
SGRCube62
SGRCube63
SGRCube64
SGRCube65
SGRCube66
SGRCube67
SGRCube68
SGRCube69
SGRCube70
SGRCube71
SGRCube72
SGRCube73
SGRCube74
SGRCube75
SGRCube76
SGRCube77
SGRCube78
SGRCube79
SGRCube80
SGRCube81
SGRCube82
SGRCube83
SGRCube84
SGRCube85
SGRCube86
SGRCube87
SGRCube88
SGRCube89
SGRCube90
SGRCube91
SGRCube92
SGRCube93
SGRCube94
SGRCube95
SGRCube96
SGRCube97
SGRCube98
SGRCube99
SGRCube100
SGRCube101
SGRCube102
SGRCube103
SGRCube104
SGRCube105
SGRCube106
SGRCube107
SGRCube108
SGRCube109
SGRCube110
SGRCube111
SGRCube112
SGRCube113
SGRCube114
SGRCube115
SGRCube116
SGRCube117
SGRCube118
SGRCube119
SGRCube120
SGRCube121
SGRCube122
SGRCube123
SGRCube124
SGRCube125
SGRCube126
SGRCube127
SGRCube128
SGRCube129
SGRCube130
SGRCube131
SGRCube132
SGRCube133
SGRCube134
SGRCube135
SGRCube136
SGRCube137
SGRCube138
SGRCube139
SGRCube140
SGRCube141
SGRCube142
SGRCube143
SGRCube144
SGRCube145
SGRCube146
SGRCube147
SGRCube148
SGRCube149
SGRCube150
SGRCube151
SGRCube152
SGRCube153
SGRCube154
SGRCube155
SGRCube156
SGRCube157
SGRCube158
SGRCube159
SGRCube160
SGRCube161
SGRCube162
SGRCube163
SGRCube164
SGRCube165
SGRCube166
SGRCube167
SGRCube168
SGRCube169
SGRCube170
SGRCube171
SGRCube172
SGRCube173
SGRCube174
SGRCube175
SGRCube176
SGRCube177
SGRCube178
SGRCube179
SGRCube180
SGRCube181
SGRCube182
SGRCube183
SGRCube184
SGRCube185
SGRCube186
SGRCube187
SGRCube188
SGRCube189
SGRCube190
SGRCube191
SGRCube192
SGRCube193
SGRCube194
SGRCube195
SGRCube196
SGRCube197
SGRCube198
SGRCube199
SGRCube200
SGRCube201
SGRCube202
SGRCube203
SGRCube204
SGRCube205
SGRCube206
SGRCube207
SGRCube208
SGRCube209
SGRCube210
SGRCube211
SGRCube212
SGRCube213
SGRCube214
SGRCube215
SGRCube216
SGRCube217
SGRCube218
SGRCube219
SGRCube220
SGRCube221
SGRCube222
SGRCube223
SGRCube224
SGRCube225
SGRCube226
SGRCube227
SGRCube228
SGRCube229
SGRCube230
SGRCube231
// 8-bit color space: 24 shades of gray; see colors.go.
SGRGray1
SGRGray2
SGRGray3
SGRGray4
SGRGray5
SGRGray6
SGRGray7
SGRGray8
SGRGray9
SGRGray10
SGRGray11
SGRGray12
SGRGray13
SGRGray14
SGRGray15
SGRGray16
SGRGray17
SGRGray18
SGRGray19
SGRGray20
SGRGray21
SGRGray22
SGRGray23
SGRGray24
)
// RGB constructs a 24-bit SGR color from component values.
func RGB(r, g, b uint8) SGRColor {
return sgrColor24 | SGRColor(r) | SGRColor(g)<<8 | SGRColor(b)<<16
}
// RGBA creates an SGRColor from color.Color() alpha-premultiplied values,
// ignoring the alpha value. Clips components to 0xffff before converting to
// 24-bit color (8-bit per channel).
func RGBA(r, g, b, _ uint32) SGRColor {
if r > 0xffff {
r = 0xffff
}
if g > 0xffff {
g = 0xffff
}
if b > 0xffff {
b = 0xffff
}
return RGB(uint8(r>>8), uint8(g>>8), uint8(b>>8))
}
func (c SGRColor) String() string {
switch {
case c&sgrColor24 != 0:
var tmp [12]byte
p := c.appendRGB(tmp[:0])[1:]
for i := range p {
if p[i] == ';' {
p[i] = ','
}
}
return fmt.Sprintf("rgb(%s)", p)
case int(c) < len(colorNames):
return colorNames[c]
default:
return fmt.Sprintf("color%d", c)
}
}
// FG constructs an SGR attribute value with the color as foreground.
func (c SGRColor) FG() SGRAttr {
return sgrAttrFGSet | SGRAttr(c&sgrColorMask)<<sgrFGShift
}
// BG constructs an SGR attribute value with the color as background.
func (c SGRColor) BG() SGRAttr {
return sgrAttrBGSet | SGRAttr((c&sgrColorMask))<<sgrBGShift
}
// RGBA implements the color.Color interface.
func (c SGRColor) RGBA() (r, g, b, a uint32) {
r8, g8, b8 := c.RGB()
r = uint32(r8)
g = uint32(g8)
b = uint32(b8)
return r | r<<8, g | g<<8, b | b<<8, 0xffff
}
// RGB returns the equivalent RGB components.
func (c SGRColor) RGB() (r, g, b uint8) | {
if c&sgrColor24 == 0 {
c = Palette8Colors[c&0xff]
}
return uint8(c), uint8(c >> 8), uint8(c >> 16)
} | identifier_body | |
async_queue.js | log from './log';
import { Deferred } from './promise';
import { Code, FirestoreError } from './error';
/**
* Wellknown "timer" IDs used when scheduling delayed operations on the
* AsyncQueue. These IDs can then be used from tests to check for the presence
* of operations or to run them early.
*
* The string values are used when encoding these timer IDs in JSON spec tests.
*/
export var TimerId;
(function (TimerId) {
/** All can be used with runDelayedOperationsEarly() to run all timers. */
TimerId["All"] = "all";
/**
* The following 4 timers are used in persistent_stream.ts for the listen and
* write streams. The "Idle" timer is used to close the stream due to
* inactivity. The "ConnectionBackoff" timer is used to restart a stream once
* the appropriate backoff delay has elapsed.
*/
TimerId["ListenStreamIdle"] = "listen_stream_idle";
TimerId["ListenStreamConnectionBackoff"] = "listen_stream_connection_backoff";
TimerId["WriteStreamIdle"] = "write_stream_idle";
TimerId["WriteStreamConnectionBackoff"] = "write_stream_connection_backoff";
/**
* A timer used in online_state_tracker.ts to transition from
* OnlineState.Unknown to Offline after a set timeout, rather than waiting
* indefinitely for success or failure.
*/
TimerId["OnlineStateTimeout"] = "online_state_timeout";
})(TimerId || (TimerId = {}));
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*/
var DelayedOperation = /** @class */ (function () {
function DelayedOperation(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
this.catch = this.deferred.promise.catch.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(function (err) { });
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue The queue to schedule the operation on.
* @param id A Timer ID identifying the type of operation this is.
* @param delayMs The delay (ms) before the operation should be scheduled.
* @param op The operation to run.
* @param removalCallback A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
DelayedOperation.createAndSchedule = function (asyncQueue, timerId, delayMs, op, removalCallback) {
var targetTime = Date.now() + delayMs;
var delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
};
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
DelayedOperation.prototype.start = function (delayMs) {
var _this = this;
this.timerHandle = setTimeout(function () { return _this.handleDelayElapsed(); }, delayMs);
};
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
DelayedOperation.prototype.skipDelay = function () {
return this.handleDelayElapsed();
};
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
DelayedOperation.prototype.cancel = function (reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
};
DelayedOperation.prototype.handleDelayElapsed = function () {
var _this = this;
this.asyncQueue.enqueue(function () {
if (_this.timerHandle !== null) {
_this.clearTimeout();
return _this.op().then(function (result) {
return _this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
};
DelayedOperation.prototype.clearTimeout = function () {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
};
return DelayedOperation;
}());
var AsyncQueue = /** @class */ (function () {
function AsyncQueue() |
/**
* Adds a new operation to the queue. Returns a promise that will be resolved
* when the promise returned by the new operation is (with its value).
*/
AsyncQueue.prototype.enqueue = function (op) {
var _this = this;
this.verifyNotFailed();
var newTail = this.tail.then(function () {
_this.operationInProgress = true;
return op()
.catch(function (error) {
_this.failure = error;
_this.operationInProgress = false;
var message = error.stack || error.message || '';
log.error('INTERNAL UNHANDLED ERROR: ', message);
// Escape the promise chain and throw the error globally so that
// e.g. any global crash reporting library detects and reports it.
// (but not for simulated errors in our tests since this breaks mocha)
if (message.indexOf('Firestore Test Simulated Error') < 0) {
setTimeout(function () {
throw error;
}, 0);
}
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(function (result) {
_this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
};
/**
* Schedules an operation to be queued on the AsyncQueue once the specified
* `delayMs` has elapsed. The returned CancelablePromise can be used to cancel
* the operation prior to its running.
*/
AsyncQueue.prototype.enqueueAfterDelay = function (timerId, delayMs, op) {
var _this = this;
this.verifyNotFailed();
// While not necessarily harmful, we currently don't expect to have multiple
// ops with the same timer id in the queue, so defensively reject them.
assert(!this.containsDelayedOperation(timerId), "Attempted to schedule multiple operations with timer id " + timerId + ".");
var delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, function (op) { return _this.removeDelayedOperation(op); });
this.delayedOperations.push(delayedOp);
return delayedOp;
};
AsyncQueue.prototype.verifyNotFailed = function () {
if (this.failure) {
fail('AsyncQueue is already failed: ' +
(this.failure.stack || this.failure.message));
}
};
/**
* Verifies there's an operation currently in-progress on the AsyncQueue.
* Unfortunately we can't verify that the running code is in the promise chain
* of that operation, so this isn't a foolproof check, but it should be enough
* to catch some bugs.
*/
AsyncQueue.prototype.verifyOperationInProgress = function () {
assert(this.operationInProgress, 'verifyOpInProgress() called when no op in progress on this queue.');
};
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
AsyncQueue.prototype.drain = function () {
return this.enqueue(function () { return Promise.resolve(); });
};
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
AsyncQueue.prototype.containsDelayedOperation = function (timerId) {
return this.delayedOperations.findIndex(function (op) { return op.timerId === timerId; }) >= 0;
};
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId Delayed operations up to and including this TimerId will
* be drained. Throws if no such operation exists. Pass Timer | {
// The last promise in the queue.
this.tail = Promise.resolve();
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
} | identifier_body |
async_queue.js | log from './log';
import { Deferred } from './promise';
import { Code, FirestoreError } from './error';
/**
* Wellknown "timer" IDs used when scheduling delayed operations on the
* AsyncQueue. These IDs can then be used from tests to check for the presence
* of operations or to run them early.
*
* The string values are used when encoding these timer IDs in JSON spec tests.
*/
export var TimerId;
(function (TimerId) {
/** All can be used with runDelayedOperationsEarly() to run all timers. */
TimerId["All"] = "all";
/**
* The following 4 timers are used in persistent_stream.ts for the listen and
* write streams. The "Idle" timer is used to close the stream due to
* inactivity. The "ConnectionBackoff" timer is used to restart a stream once
* the appropriate backoff delay has elapsed.
*/
TimerId["ListenStreamIdle"] = "listen_stream_idle";
TimerId["ListenStreamConnectionBackoff"] = "listen_stream_connection_backoff";
TimerId["WriteStreamIdle"] = "write_stream_idle";
TimerId["WriteStreamConnectionBackoff"] = "write_stream_connection_backoff";
/**
* A timer used in online_state_tracker.ts to transition from
* OnlineState.Unknown to Offline after a set timeout, rather than waiting
* indefinitely for success or failure.
*/
TimerId["OnlineStateTimeout"] = "online_state_timeout";
})(TimerId || (TimerId = {}));
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*/
var DelayedOperation = /** @class */ (function () {
function DelayedOperation(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
this.catch = this.deferred.promise.catch.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(function (err) { });
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue The queue to schedule the operation on.
* @param id A Timer ID identifying the type of operation this is.
* @param delayMs The delay (ms) before the operation should be scheduled.
* @param op The operation to run.
* @param removalCallback A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
DelayedOperation.createAndSchedule = function (asyncQueue, timerId, delayMs, op, removalCallback) {
var targetTime = Date.now() + delayMs;
var delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
};
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
DelayedOperation.prototype.start = function (delayMs) {
var _this = this;
this.timerHandle = setTimeout(function () { return _this.handleDelayElapsed(); }, delayMs);
};
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
DelayedOperation.prototype.skipDelay = function () {
return this.handleDelayElapsed();
};
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
DelayedOperation.prototype.cancel = function (reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
};
DelayedOperation.prototype.handleDelayElapsed = function () {
var _this = this;
this.asyncQueue.enqueue(function () {
if (_this.timerHandle !== null) {
_this.clearTimeout();
return _this.op().then(function (result) {
return _this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
};
DelayedOperation.prototype.clearTimeout = function () {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
};
return DelayedOperation;
}());
var AsyncQueue = /** @class */ (function () {
function AsyncQueue() {
// The last promise in the queue.
this.tail = Promise.resolve();
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
}
/**
* Adds a new operation to the queue. Returns a promise that will be resolved
* when the promise returned by the new operation is (with its value).
*/
AsyncQueue.prototype.enqueue = function (op) {
var _this = this;
this.verifyNotFailed();
var newTail = this.tail.then(function () {
_this.operationInProgress = true;
return op()
.catch(function (error) {
_this.failure = error;
_this.operationInProgress = false;
var message = error.stack || error.message || '';
log.error('INTERNAL UNHANDLED ERROR: ', message);
// Escape the promise chain and throw the error globally so that
// e.g. any global crash reporting library detects and reports it.
// (but not for simulated errors in our tests since this breaks mocha)
if (message.indexOf('Firestore Test Simulated Error') < 0) {
setTimeout(function () {
throw error;
}, 0);
}
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(function (result) {
_this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
};
/**
* Schedules an operation to be queued on the AsyncQueue once the specified
* `delayMs` has elapsed. The returned CancelablePromise can be used to cancel
* the operation prior to its running.
*/
AsyncQueue.prototype.enqueueAfterDelay = function (timerId, delayMs, op) {
var _this = this;
this.verifyNotFailed();
// While not necessarily harmful, we currently don't expect to have multiple
// ops with the same timer id in the queue, so defensively reject them.
assert(!this.containsDelayedOperation(timerId), "Attempted to schedule multiple operations with timer id " + timerId + ".");
var delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, function (op) { return _this.removeDelayedOperation(op); });
this.delayedOperations.push(delayedOp);
return delayedOp;
};
AsyncQueue.prototype.verifyNotFailed = function () {
if (this.failure) |
};
/**
* Verifies there's an operation currently in-progress on the AsyncQueue.
* Unfortunately we can't verify that the running code is in the promise chain
* of that operation, so this isn't a foolproof check, but it should be enough
* to catch some bugs.
*/
AsyncQueue.prototype.verifyOperationInProgress = function () {
assert(this.operationInProgress, 'verifyOpInProgress() called when no op in progress on this queue.');
};
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
AsyncQueue.prototype.drain = function () {
return this.enqueue(function () { return Promise.resolve(); });
};
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
AsyncQueue.prototype.containsDelayedOperation = function (timerId) {
return this.delayedOperations.findIndex(function (op) { return op.timerId === timerId; }) >= 0;
};
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId Delayed operations up to and including this TimerId will
* be drained. Throws if no such operation exists. Pass Timer | {
fail('AsyncQueue is already failed: ' +
(this.failure.stack || this.failure.message));
} | conditional_block |
async_queue.js | as log from './log';
import { Deferred } from './promise';
import { Code, FirestoreError } from './error';
/**
* Wellknown "timer" IDs used when scheduling delayed operations on the
* AsyncQueue. These IDs can then be used from tests to check for the presence
* of operations or to run them early.
*
* The string values are used when encoding these timer IDs in JSON spec tests.
*/
export var TimerId;
(function (TimerId) {
/** All can be used with runDelayedOperationsEarly() to run all timers. */
TimerId["All"] = "all";
/**
* The following 4 timers are used in persistent_stream.ts for the listen and
* write streams. The "Idle" timer is used to close the stream due to
* inactivity. The "ConnectionBackoff" timer is used to restart a stream once
* the appropriate backoff delay has elapsed.
*/
TimerId["ListenStreamIdle"] = "listen_stream_idle";
TimerId["ListenStreamConnectionBackoff"] = "listen_stream_connection_backoff";
TimerId["WriteStreamIdle"] = "write_stream_idle";
TimerId["WriteStreamConnectionBackoff"] = "write_stream_connection_backoff";
/**
* A timer used in online_state_tracker.ts to transition from
* OnlineState.Unknown to Offline after a set timeout, rather than waiting
* indefinitely for success or failure.
*/
TimerId["OnlineStateTimeout"] = "online_state_timeout";
})(TimerId || (TimerId = {}));
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*/
var DelayedOperation = /** @class */ (function () {
function DelayedOperation(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
this.catch = this.deferred.promise.catch.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(function (err) { });
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue The queue to schedule the operation on.
* @param id A Timer ID identifying the type of operation this is.
* @param delayMs The delay (ms) before the operation should be scheduled.
* @param op The operation to run.
* @param removalCallback A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
DelayedOperation.createAndSchedule = function (asyncQueue, timerId, delayMs, op, removalCallback) {
var targetTime = Date.now() + delayMs;
var delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
};
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
DelayedOperation.prototype.start = function (delayMs) {
var _this = this;
this.timerHandle = setTimeout(function () { return _this.handleDelayElapsed(); }, delayMs);
};
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
DelayedOperation.prototype.skipDelay = function () {
return this.handleDelayElapsed();
};
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
DelayedOperation.prototype.cancel = function (reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
};
DelayedOperation.prototype.handleDelayElapsed = function () {
var _this = this;
this.asyncQueue.enqueue(function () {
if (_this.timerHandle !== null) {
_this.clearTimeout();
return _this.op().then(function (result) {
return _this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
};
DelayedOperation.prototype.clearTimeout = function () {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
};
return DelayedOperation;
}());
var AsyncQueue = /** @class */ (function () { | // Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
}
/**
* Adds a new operation to the queue. Returns a promise that will be resolved
* when the promise returned by the new operation is (with its value).
*/
AsyncQueue.prototype.enqueue = function (op) {
var _this = this;
this.verifyNotFailed();
var newTail = this.tail.then(function () {
_this.operationInProgress = true;
return op()
.catch(function (error) {
_this.failure = error;
_this.operationInProgress = false;
var message = error.stack || error.message || '';
log.error('INTERNAL UNHANDLED ERROR: ', message);
// Escape the promise chain and throw the error globally so that
// e.g. any global crash reporting library detects and reports it.
// (but not for simulated errors in our tests since this breaks mocha)
if (message.indexOf('Firestore Test Simulated Error') < 0) {
setTimeout(function () {
throw error;
}, 0);
}
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(function (result) {
_this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
};
/**
* Schedules an operation to be queued on the AsyncQueue once the specified
* `delayMs` has elapsed. The returned CancelablePromise can be used to cancel
* the operation prior to its running.
*/
AsyncQueue.prototype.enqueueAfterDelay = function (timerId, delayMs, op) {
var _this = this;
this.verifyNotFailed();
// While not necessarily harmful, we currently don't expect to have multiple
// ops with the same timer id in the queue, so defensively reject them.
assert(!this.containsDelayedOperation(timerId), "Attempted to schedule multiple operations with timer id " + timerId + ".");
var delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, function (op) { return _this.removeDelayedOperation(op); });
this.delayedOperations.push(delayedOp);
return delayedOp;
};
AsyncQueue.prototype.verifyNotFailed = function () {
if (this.failure) {
fail('AsyncQueue is already failed: ' +
(this.failure.stack || this.failure.message));
}
};
/**
* Verifies there's an operation currently in-progress on the AsyncQueue.
* Unfortunately we can't verify that the running code is in the promise chain
* of that operation, so this isn't a foolproof check, but it should be enough
* to catch some bugs.
*/
AsyncQueue.prototype.verifyOperationInProgress = function () {
assert(this.operationInProgress, 'verifyOpInProgress() called when no op in progress on this queue.');
};
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
AsyncQueue.prototype.drain = function () {
return this.enqueue(function () { return Promise.resolve(); });
};
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
AsyncQueue.prototype.containsDelayedOperation = function (timerId) {
return this.delayedOperations.findIndex(function (op) { return op.timerId === timerId; }) >= 0;
};
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId Delayed operations up to and including this TimerId will
* be drained. Throws if no such operation exists. Pass TimerId | function AsyncQueue() {
// The last promise in the queue.
this.tail = Promise.resolve(); | random_line_split |
async_queue.js | log from './log';
import { Deferred } from './promise';
import { Code, FirestoreError } from './error';
/**
* Wellknown "timer" IDs used when scheduling delayed operations on the
* AsyncQueue. These IDs can then be used from tests to check for the presence
* of operations or to run them early.
*
* The string values are used when encoding these timer IDs in JSON spec tests.
*/
export var TimerId;
(function (TimerId) {
/** All can be used with runDelayedOperationsEarly() to run all timers. */
TimerId["All"] = "all";
/**
* The following 4 timers are used in persistent_stream.ts for the listen and
* write streams. The "Idle" timer is used to close the stream due to
* inactivity. The "ConnectionBackoff" timer is used to restart a stream once
* the appropriate backoff delay has elapsed.
*/
TimerId["ListenStreamIdle"] = "listen_stream_idle";
TimerId["ListenStreamConnectionBackoff"] = "listen_stream_connection_backoff";
TimerId["WriteStreamIdle"] = "write_stream_idle";
TimerId["WriteStreamConnectionBackoff"] = "write_stream_connection_backoff";
/**
* A timer used in online_state_tracker.ts to transition from
* OnlineState.Unknown to Offline after a set timeout, rather than waiting
* indefinitely for success or failure.
*/
TimerId["OnlineStateTimeout"] = "online_state_timeout";
})(TimerId || (TimerId = {}));
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*/
var DelayedOperation = /** @class */ (function () {
function DelayedOperation(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
this.catch = this.deferred.promise.catch.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(function (err) { });
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue The queue to schedule the operation on.
* @param id A Timer ID identifying the type of operation this is.
* @param delayMs The delay (ms) before the operation should be scheduled.
* @param op The operation to run.
* @param removalCallback A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
DelayedOperation.createAndSchedule = function (asyncQueue, timerId, delayMs, op, removalCallback) {
var targetTime = Date.now() + delayMs;
var delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
};
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
DelayedOperation.prototype.start = function (delayMs) {
var _this = this;
this.timerHandle = setTimeout(function () { return _this.handleDelayElapsed(); }, delayMs);
};
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
DelayedOperation.prototype.skipDelay = function () {
return this.handleDelayElapsed();
};
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
DelayedOperation.prototype.cancel = function (reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
};
DelayedOperation.prototype.handleDelayElapsed = function () {
var _this = this;
this.asyncQueue.enqueue(function () {
if (_this.timerHandle !== null) {
_this.clearTimeout();
return _this.op().then(function (result) {
return _this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
};
DelayedOperation.prototype.clearTimeout = function () {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
};
return DelayedOperation;
}());
var AsyncQueue = /** @class */ (function () {
function | () {
// The last promise in the queue.
this.tail = Promise.resolve();
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
}
/**
* Adds a new operation to the queue. Returns a promise that will be resolved
* when the promise returned by the new operation is (with its value).
*/
AsyncQueue.prototype.enqueue = function (op) {
var _this = this;
this.verifyNotFailed();
var newTail = this.tail.then(function () {
_this.operationInProgress = true;
return op()
.catch(function (error) {
_this.failure = error;
_this.operationInProgress = false;
var message = error.stack || error.message || '';
log.error('INTERNAL UNHANDLED ERROR: ', message);
// Escape the promise chain and throw the error globally so that
// e.g. any global crash reporting library detects and reports it.
// (but not for simulated errors in our tests since this breaks mocha)
if (message.indexOf('Firestore Test Simulated Error') < 0) {
setTimeout(function () {
throw error;
}, 0);
}
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(function (result) {
_this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
};
/**
* Schedules an operation to be queued on the AsyncQueue once the specified
* `delayMs` has elapsed. The returned CancelablePromise can be used to cancel
* the operation prior to its running.
*/
AsyncQueue.prototype.enqueueAfterDelay = function (timerId, delayMs, op) {
var _this = this;
this.verifyNotFailed();
// While not necessarily harmful, we currently don't expect to have multiple
// ops with the same timer id in the queue, so defensively reject them.
assert(!this.containsDelayedOperation(timerId), "Attempted to schedule multiple operations with timer id " + timerId + ".");
var delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, function (op) { return _this.removeDelayedOperation(op); });
this.delayedOperations.push(delayedOp);
return delayedOp;
};
AsyncQueue.prototype.verifyNotFailed = function () {
if (this.failure) {
fail('AsyncQueue is already failed: ' +
(this.failure.stack || this.failure.message));
}
};
/**
* Verifies there's an operation currently in-progress on the AsyncQueue.
* Unfortunately we can't verify that the running code is in the promise chain
* of that operation, so this isn't a foolproof check, but it should be enough
* to catch some bugs.
*/
AsyncQueue.prototype.verifyOperationInProgress = function () {
assert(this.operationInProgress, 'verifyOpInProgress() called when no op in progress on this queue.');
};
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
AsyncQueue.prototype.drain = function () {
return this.enqueue(function () { return Promise.resolve(); });
};
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
AsyncQueue.prototype.containsDelayedOperation = function (timerId) {
return this.delayedOperations.findIndex(function (op) { return op.timerId === timerId; }) >= 0;
};
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId Delayed operations up to and including this TimerId will
* be drained. Throws if no such operation exists. Pass TimerId | AsyncQueue | identifier_name |
daemon.go | , err
}
// NB we could use the messages too if we decide to change the
// signature of the API to include it.
revs := make([]string, len(commits))
for i, commit := range commits {
revs[i] = commit.Revision
}
return revs, nil
}
func (d *Daemon) GitRepoConfig(ctx context.Context, regenerate bool) (v6.GitConfig, error) {
publicSSHKey, err := d.Cluster.PublicSSHKey(regenerate)
if err != nil {
return v6.GitConfig{}, err
}
origin := d.Repo.Origin()
// Sanitize the URL before sharing it
origin.URL = origin.SafeURL()
status, err := d.Repo.Status()
gitConfigError := ""
if err != nil {
gitConfigError = err.Error()
}
path := ""
if len(d.GitConfig.Paths) > 0 {
path = strings.Join(d.GitConfig.Paths, ",")
}
return v6.GitConfig{
Remote: v6.GitRemoteConfig{
Remote: origin,
Branch: d.GitConfig.Branch,
Path: path,
},
PublicSSHKey: publicSSHKey,
Status: status,
Error: gitConfigError,
}, nil
}
// Non-api.Server methods
// WithWorkingClone applies the given func to a fresh, writable clone
// of the git repo, and cleans it up afterwards. This may return an
// error in the case that the repo is read-only; use
// `WithReadonlyClone` if you only need to read the files in the git
// repo.
func (d *Daemon) WithWorkingClone(ctx context.Context, fn func(*git.Checkout) error) error {
co, err := d.Repo.Clone(ctx, d.GitConfig)
if err != nil {
return err
}
defer func() {
if err := co.Clean(); err != nil {
d.Logger.Log("error", fmt.Sprintf("cannot clean working clone: %s", err))
}
}()
if d.GitSecretEnabled {
if err := co.SecretUnseal(ctx); err != nil {
return err
}
}
return fn(co)
}
// WithReadonlyClone applies the given func to an export of the
// current revision of the git repo. Use this if you just need to
// consult the files.
func (d *Daemon) WithReadonlyClone(ctx context.Context, fn func(*git.Export) error) error {
head, err := d.Repo.BranchHead(ctx)
if err != nil {
return err
}
co, err := d.Repo.Export(ctx, head)
if err != nil {
return err
}
defer func() {
if err := co.Clean(); err != nil {
d.Logger.Log("error", fmt.Sprintf("cannot read-only clone: %s", err))
}
}()
if d.GitSecretEnabled {
if err := co.SecretUnseal(ctx); err != nil {
return err
}
}
return fn(co)
}
func (d *Daemon) LogEvent(ev event.Event) error {
if d.EventWriter == nil {
d.Logger.Log("event", ev, "logupstream", "false")
return nil
}
d.Logger.Log("event", ev, "logupstream", "true")
return d.EventWriter.LogEvent(ev)
}
// vvv helpers vvv
func containers2containers(cs []resource.Container) []v6.Container {
res := make([]v6.Container, len(cs))
for i, c := range cs {
res[i] = v6.Container{
Name: c.Name,
Current: image.Info{
ID: c.Image,
},
}
}
return res
}
// Much of the time, images will be sorted by timestamp. At marginal
// cost, we cache the result of sorting, so that other uses of the
// image can reuse it (if they are also sorted by timestamp).
type sortedImageRepo struct {
images []image.Info
imagesByTag map[string]image.Info
imagesSortedByCreated update.SortedImageInfos
}
func (r *sortedImageRepo) SortedImages(p policy.Pattern) update.SortedImageInfos {
// RequiresTimestamp means "ordered by timestamp" (it's required
// because no comparison to see which image is newer can be made
// if a timestamp is missing)
if p.RequiresTimestamp() {
if r.imagesSortedByCreated == nil {
r.imagesSortedByCreated = update.SortImages(r.images, p)
}
return r.imagesSortedByCreated
}
return update.SortImages(r.images, p)
}
func (r *sortedImageRepo) Images() []image.Info {
return r.images
}
func (r *sortedImageRepo) ImageByTag(tag string) image.Info {
return r.imagesByTag[tag]
}
func getWorkloadContainers(workload cluster.Workload, imageRepos update.ImageRepos, resource resource.Resource, fields []string) (res []v6.Container, err error) {
repos := map[image.Name]*sortedImageRepo{}
for _, c := range workload.ContainersOrNil() {
imageName := c.Image.Name
var policies policy.Set
if resource != nil {
policies = resource.Policies()
}
tagPattern := policy.GetTagPattern(policies, c.Name)
imageRepo, ok := repos[imageName]
if !ok {
repoMetadata := imageRepos.GetRepositoryMetadata(imageName)
var images []image.Info
// Build images, tolerating tags with missing metadata
for _, tag := range repoMetadata.Tags {
info, ok := repoMetadata.Images[tag]
if !ok {
info = image.Info{
ID: image.Ref{Tag: tag},
}
}
images = append(images, info)
}
imageRepo = &sortedImageRepo{images: images, imagesByTag: repoMetadata.Images}
repos[imageName] = imageRepo
}
currentImage := imageRepo.ImageByTag(c.Image.Tag)
container, err := v6.NewContainer(c.Name, imageRepo, currentImage, tagPattern, fields)
if err != nil {
return res, err
}
res = append(res, container)
}
return res, nil
}
func policyCommitMessage(us resource.PolicyUpdates, cause update.Cause) string {
// shortcut, since we want roughly the same information
events := policyEvents(us, time.Now())
commitMsg := &bytes.Buffer{}
prefix := "- "
switch {
case cause.Message != "":
fmt.Fprintf(commitMsg, "%s\n\n", cause.Message)
case len(events) > 1:
fmt.Fprintf(commitMsg, "Updated workload policies\n\n")
default:
prefix = ""
}
for _, event := range events {
fmt.Fprintf(commitMsg, "%s%v\n", prefix, event)
}
return commitMsg.String()
}
// policyEvents builds a map of events (by type), for all the events in this set of
// updates. There will be one event per type, containing all workload ids
// affected by that event. e.g. all automated workload will share an event.
func policyEvents(us resource.PolicyUpdates, now time.Time) map[string]event.Event {
eventsByType := map[string]event.Event{}
for workloadID, update := range us {
for _, eventType := range policyEventTypes(update) {
e, ok := eventsByType[eventType]
if !ok {
e = event.Event{
ServiceIDs: []resource.ID{},
Type: eventType,
StartedAt: now,
EndedAt: now,
LogLevel: event.LogLevelInfo,
}
}
e.ServiceIDs = append(e.ServiceIDs, workloadID)
eventsByType[eventType] = e
}
}
return eventsByType
}
// policyEventTypes is a deduped list of all event types this update contains
func policyEventTypes(u resource.PolicyUpdate) []string {
types := map[string]struct{}{}
for p := range u.Add {
switch {
case p == policy.Automated:
types[event.EventAutomate] = struct{}{}
case p == policy.Locked:
types[event.EventLock] = struct{}{}
default:
types[event.EventUpdatePolicy] = struct{}{}
}
}
for p := range u.Remove {
switch {
case p == policy.Automated:
types[event.EventDeautomate] = struct{}{}
case p == policy.Locked:
types[event.EventUnlock] = struct{}{}
default:
types[event.EventUpdatePolicy] = struct{}{}
}
}
var result []string
for t := range types {
result = append(result, t)
}
sort.Strings(result)
return result
}
// latestValidRevision returns the HEAD of the configured branch if it
// has a valid signature, or the SHA of the latest valid commit it
// could find plus the invalid commit thereafter.
//
// Signature validation happens for commits between the revision of the
// sync tag and the HEAD, after the signature of the sync tag itself
// has been validated, as the branch can not be trusted when the tag
// originates from an unknown source.
//
// In case the signature of the tag can not be verified, or it points
// towards a revision we can not get a commit range for, it returns an
// error.
func | latestValidRevision | identifier_name | |
daemon.go |
var res []v6.ControllerStatus
for _, workload := range clusterWorkloads {
readOnly := v6.ReadOnlyOK
repoIsReadonly := d.Repo.Readonly()
var policies policy.Set
if resource, ok := resources[workload.ID.String()]; ok {
policies = resource.Policies()
}
switch {
case policies == nil:
readOnly = missingReason
case repoIsReadonly:
readOnly = v6.ReadOnlyROMode
case workload.IsSystem:
readOnly = v6.ReadOnlySystem
}
var syncError string
if workload.SyncError != nil {
syncError = workload.SyncError.Error()
}
res = append(res, v6.ControllerStatus{
ID: workload.ID,
Containers: containers2containers(workload.ContainersOrNil()),
ReadOnly: readOnly,
Status: workload.Status,
Rollout: workload.Rollout,
SyncError: syncError,
Antecedent: workload.Antecedent,
Labels: workload.Labels,
Automated: policies.Has(policy.Automated),
Locked: policies.Has(policy.Locked),
Ignore: policies.Has(policy.Ignore),
Policies: policies.ToStringMap(),
})
}
return res, nil
}
type clusterContainers []cluster.Workload
func (cs clusterContainers) Len() int {
return len(cs)
}
func (cs clusterContainers) Containers(i int) []resource.Container {
return cs[i].ContainersOrNil()
}
// ListImages - deprecated from v10, lists the images available for set of workloads
func (d *Daemon) ListImages(ctx context.Context, spec update.ResourceSpec) ([]v6.ImageStatus, error) {
return d.ListImagesWithOptions(ctx, v10.ListImagesOptions{Spec: spec})
}
// ListImagesWithOptions lists the images available for set of workloads
func (d *Daemon) ListImagesWithOptions(ctx context.Context, opts v10.ListImagesOptions) ([]v6.ImageStatus, error) {
if opts.Namespace != "" && opts.Spec != update.ResourceSpecAll {
return nil, errors.New("cannot filter by 'namespace' and 'workload' at the same time")
}
var workloads []cluster.Workload
var err error
if opts.Spec != update.ResourceSpecAll {
id, err := opts.Spec.AsID()
if err != nil {
return nil, errors.Wrap(err, "treating workload spec as ID")
}
workloads, err = d.Cluster.SomeWorkloads(ctx, []resource.ID{id})
if err != nil {
return nil, errors.Wrap(err, "getting some workloads")
}
} else {
workloads, err = d.Cluster.AllWorkloads(ctx, opts.Namespace)
if err != nil {
return nil, errors.Wrap(err, "getting all workloads")
}
}
resources, _, err := d.getResources(ctx)
if err != nil {
return nil, err
}
imageRepos, err := update.FetchImageRepos(d.Registry, clusterContainers(workloads), d.Logger)
if err != nil {
return nil, errors.Wrap(err, "getting images for workloads")
}
var res []v6.ImageStatus
for _, workload := range workloads {
workloadContainers, err := getWorkloadContainers(workload, imageRepos, resources[workload.ID.String()], opts.OverrideContainerFields)
if err != nil {
return nil, err
}
res = append(res, v6.ImageStatus{
ID: workload.ID,
Containers: workloadContainers,
})
}
return res, nil
}
// jobFunc is a type for procedures that the daemon will execute in a job
type jobFunc func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error)
// updateFunc is a type for procedures that operate on a git checkout, to be run in a job
type updateFunc func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error)
// makeJobFromUpdate turns an updateFunc into a jobFunc that will run
// the update with a fresh clone, and log the result as an event.
func (d *Daemon) makeJobFromUpdate(update updateFunc) jobFunc {
return func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error) {
var result job.Result
err := d.WithWorkingClone(ctx, func(working *git.Checkout) error {
var err error
if err = verifyWorkingRepo(ctx, d.Repo, working, d.SyncState, d.GitVerifySignaturesMode); d.GitVerifySignaturesMode != sync.VerifySignaturesModeNone && err != nil {
return err
}
result, err = update(ctx, jobID, working, logger)
if err != nil {
return err
}
return nil
})
if err != nil {
return result, err
}
return result, nil
}
}
// executeJob runs a job func and keeps track of its status, so the
// daemon can report it when asked.
func (d *Daemon) executeJob(id job.ID, do jobFunc, logger log.Logger) (job.Result, error) {
ctx, cancel := context.WithTimeout(context.Background(), d.SyncTimeout)
defer cancel()
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusRunning})
result, err := do(ctx, id, logger)
if err != nil {
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusFailed, Err: err.Error(), Result: result})
return result, err
}
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusSucceeded, Result: result})
return result, nil
}
// makeLoggingFunc takes a jobFunc and returns a jobFunc that will log
// a commit event with the result.
func (d *Daemon) makeLoggingJobFunc(f jobFunc) jobFunc {
return func(ctx context.Context, id job.ID, logger log.Logger) (job.Result, error) {
started := time.Now().UTC()
result, err := f(ctx, id, logger)
if err != nil {
return result, err
}
logger.Log("revision", result.Revision)
if result.Revision != "" {
var workloadIDs []resource.ID
for id, result := range result.Result {
if result.Status == update.ReleaseStatusSuccess {
workloadIDs = append(workloadIDs, id)
}
}
metadata := &event.CommitEventMetadata{
Revision: result.Revision,
Spec: result.Spec,
Result: result.Result,
}
return result, d.LogEvent(event.Event{
ServiceIDs: workloadIDs,
Type: event.EventCommit,
StartedAt: started,
EndedAt: started,
LogLevel: event.LogLevelInfo,
Metadata: metadata,
})
}
return result, nil
}
}
// queueJob queues a job func to be executed.
func (d *Daemon) queueJob(do jobFunc) job.ID {
id := job.ID(guid.New())
enqueuedAt := time.Now()
d.Jobs.Enqueue(&job.Job{
ID: id,
Do: func(logger log.Logger) error {
queueDuration.Observe(time.Since(enqueuedAt).Seconds())
_, err := d.executeJob(id, do, logger)
if err != nil {
return err
}
return nil
},
})
queueLength.Set(float64(d.Jobs.Len()))
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusQueued})
return id
}
// Apply the desired changes to the config files
func (d *Daemon) UpdateManifests(ctx context.Context, spec update.Spec) (job.ID, error) {
var id job.ID
if spec.Type == "" {
return id, errors.New("no type in update spec")
}
switch s := spec.Spec.(type) {
case release.Changes:
if s.ReleaseKind() == update.ReleaseKindPlan {
id := job.ID(guid.New())
_, err := d.executeJob(id, d.makeJobFromUpdate(d.release(spec, s)), d.Logger)
return id, err
}
return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.release(spec, s)))), nil
case resource.PolicyUpdates:
return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.updatePolicies(spec, s)))), nil
case update.ManualSync:
return d.queueJob(d.sync()), nil
default:
return id, fmt.Errorf(`unknown update type "%s"`, spec.Type)
}
}
func (d *Daemon) sync() jobFunc {
return func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error) {
var result job.Result
ctx, cancel := context.WithTimeout(ctx, d.SyncTimeout)
defer cancel()
err := d.Repo.Refresh(ctx)
if err != nil {
return result, err
}
head, err := d.Repo.BranchHead(ctx)
if err != nil {
return result, err
}
if d.GitVerifySignaturesMode != sync.VerifySignaturesModeNone {
var latestValidRev string
if latestValidRev, | {
return nil, err
} | conditional_block | |
daemon.go | Ask the daemon how far it's got applying things; in particular, is it
// past the given commit? Return the list of commits between where
// we have applied (the sync tag) and the ref given, inclusive. E.g., if you send HEAD,
// you'll get all the commits yet to be applied. If you send a hash
// and it's applied at or _past_ it, you'll get an empty list.
func (d *Daemon) SyncStatus(ctx context.Context, commitRef string) ([]string, error) {
syncMarkerRevision, err := d.SyncState.GetRevision(ctx)
if err != nil {
return nil, err
}
commits, err := d.Repo.CommitsBetween(ctx, syncMarkerRevision, commitRef, false, d.GitConfig.Paths...)
if err != nil {
return nil, err
}
// NB we could use the messages too if we decide to change the
// signature of the API to include it.
revs := make([]string, len(commits))
for i, commit := range commits {
revs[i] = commit.Revision
}
return revs, nil
}
func (d *Daemon) GitRepoConfig(ctx context.Context, regenerate bool) (v6.GitConfig, error) {
publicSSHKey, err := d.Cluster.PublicSSHKey(regenerate)
if err != nil {
return v6.GitConfig{}, err
}
origin := d.Repo.Origin()
// Sanitize the URL before sharing it
origin.URL = origin.SafeURL()
status, err := d.Repo.Status()
gitConfigError := ""
if err != nil {
gitConfigError = err.Error()
}
path := ""
if len(d.GitConfig.Paths) > 0 {
path = strings.Join(d.GitConfig.Paths, ",")
}
return v6.GitConfig{
Remote: v6.GitRemoteConfig{
Remote: origin,
Branch: d.GitConfig.Branch,
Path: path,
},
PublicSSHKey: publicSSHKey,
Status: status,
Error: gitConfigError,
}, nil
}
// Non-api.Server methods
// WithWorkingClone applies the given func to a fresh, writable clone
// of the git repo, and cleans it up afterwards. This may return an
// error in the case that the repo is read-only; use
// `WithReadonlyClone` if you only need to read the files in the git
// repo.
func (d *Daemon) WithWorkingClone(ctx context.Context, fn func(*git.Checkout) error) error {
co, err := d.Repo.Clone(ctx, d.GitConfig)
if err != nil {
return err
}
defer func() {
if err := co.Clean(); err != nil {
d.Logger.Log("error", fmt.Sprintf("cannot clean working clone: %s", err))
}
}()
if d.GitSecretEnabled {
if err := co.SecretUnseal(ctx); err != nil {
return err
}
}
return fn(co)
}
// WithReadonlyClone applies the given func to an export of the
// current revision of the git repo. Use this if you just need to
// consult the files.
func (d *Daemon) WithReadonlyClone(ctx context.Context, fn func(*git.Export) error) error {
head, err := d.Repo.BranchHead(ctx)
if err != nil {
return err
}
co, err := d.Repo.Export(ctx, head)
if err != nil {
return err
}
defer func() {
if err := co.Clean(); err != nil {
d.Logger.Log("error", fmt.Sprintf("cannot read-only clone: %s", err))
}
}()
if d.GitSecretEnabled {
if err := co.SecretUnseal(ctx); err != nil {
return err
}
}
return fn(co)
}
func (d *Daemon) LogEvent(ev event.Event) error {
if d.EventWriter == nil {
d.Logger.Log("event", ev, "logupstream", "false")
return nil
}
d.Logger.Log("event", ev, "logupstream", "true")
return d.EventWriter.LogEvent(ev)
}
// vvv helpers vvv
func containers2containers(cs []resource.Container) []v6.Container {
res := make([]v6.Container, len(cs))
for i, c := range cs {
res[i] = v6.Container{
Name: c.Name,
Current: image.Info{
ID: c.Image,
},
}
}
return res
}
// Much of the time, images will be sorted by timestamp. At marginal
// cost, we cache the result of sorting, so that other uses of the
// image can reuse it (if they are also sorted by timestamp).
type sortedImageRepo struct {
images []image.Info
imagesByTag map[string]image.Info
imagesSortedByCreated update.SortedImageInfos
}
func (r *sortedImageRepo) SortedImages(p policy.Pattern) update.SortedImageInfos {
// RequiresTimestamp means "ordered by timestamp" (it's required
// because no comparison to see which image is newer can be made
// if a timestamp is missing)
if p.RequiresTimestamp() {
if r.imagesSortedByCreated == nil {
r.imagesSortedByCreated = update.SortImages(r.images, p)
}
return r.imagesSortedByCreated
}
return update.SortImages(r.images, p)
}
func (r *sortedImageRepo) Images() []image.Info {
return r.images
}
func (r *sortedImageRepo) ImageByTag(tag string) image.Info {
return r.imagesByTag[tag]
}
func getWorkloadContainers(workload cluster.Workload, imageRepos update.ImageRepos, resource resource.Resource, fields []string) (res []v6.Container, err error) {
repos := map[image.Name]*sortedImageRepo{}
for _, c := range workload.ContainersOrNil() {
imageName := c.Image.Name
var policies policy.Set
if resource != nil {
policies = resource.Policies()
}
tagPattern := policy.GetTagPattern(policies, c.Name)
imageRepo, ok := repos[imageName]
if !ok {
repoMetadata := imageRepos.GetRepositoryMetadata(imageName)
var images []image.Info
// Build images, tolerating tags with missing metadata
for _, tag := range repoMetadata.Tags {
info, ok := repoMetadata.Images[tag]
if !ok {
info = image.Info{
ID: image.Ref{Tag: tag},
}
}
images = append(images, info)
}
imageRepo = &sortedImageRepo{images: images, imagesByTag: repoMetadata.Images}
repos[imageName] = imageRepo
}
currentImage := imageRepo.ImageByTag(c.Image.Tag)
container, err := v6.NewContainer(c.Name, imageRepo, currentImage, tagPattern, fields)
if err != nil {
return res, err
}
res = append(res, container)
}
return res, nil
}
func policyCommitMessage(us resource.PolicyUpdates, cause update.Cause) string {
// shortcut, since we want roughly the same information
events := policyEvents(us, time.Now())
commitMsg := &bytes.Buffer{}
prefix := "- "
switch {
case cause.Message != "":
fmt.Fprintf(commitMsg, "%s\n\n", cause.Message)
case len(events) > 1:
fmt.Fprintf(commitMsg, "Updated workload policies\n\n")
default:
prefix = ""
}
for _, event := range events {
fmt.Fprintf(commitMsg, "%s%v\n", prefix, event)
}
return commitMsg.String()
}
// policyEvents builds a map of events (by type), for all the events in this set of
// updates. There will be one event per type, containing all workload ids
// affected by that event. e.g. all automated workload will share an event.
func policyEvents(us resource.PolicyUpdates, now time.Time) map[string]event.Event {
eventsByType := map[string]event.Event{}
for workloadID, update := range us {
for _, eventType := range policyEventTypes(update) {
e, ok := eventsByType[eventType]
if !ok {
e = event.Event{
ServiceIDs: []resource.ID{},
Type: eventType,
StartedAt: now,
EndedAt: now,
LogLevel: event.LogLevelInfo,
}
}
e.ServiceIDs = append(e.ServiceIDs, workloadID)
eventsByType[eventType] = e
}
}
return eventsByType
}
// policyEventTypes is a deduped list of all event types this update contains
func policyEventTypes(u resource.PolicyUpdate) []string | {
types := map[string]struct{}{}
for p := range u.Add {
switch {
case p == policy.Automated:
types[event.EventAutomate] = struct{}{}
case p == policy.Locked:
types[event.EventLock] = struct{}{}
default:
types[event.EventUpdatePolicy] = struct{}{}
}
}
for p := range u.Remove {
switch {
case p == policy.Automated:
types[event.EventDeautomate] = struct{}{}
case p == policy.Locked:
types[event.EventUnlock] = struct{}{}
default: | identifier_body | |
daemon.go | nil, errors.Wrap(err, "getting images for workloads")
}
var res []v6.ImageStatus
for _, workload := range workloads {
workloadContainers, err := getWorkloadContainers(workload, imageRepos, resources[workload.ID.String()], opts.OverrideContainerFields)
if err != nil {
return nil, err
}
res = append(res, v6.ImageStatus{
ID: workload.ID,
Containers: workloadContainers,
})
}
return res, nil
}
// jobFunc is a type for procedures that the daemon will execute in a job
type jobFunc func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error)
// updateFunc is a type for procedures that operate on a git checkout, to be run in a job
type updateFunc func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error)
// makeJobFromUpdate turns an updateFunc into a jobFunc that will run
// the update with a fresh clone, and log the result as an event.
func (d *Daemon) makeJobFromUpdate(update updateFunc) jobFunc {
return func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error) {
var result job.Result
err := d.WithWorkingClone(ctx, func(working *git.Checkout) error {
var err error
if err = verifyWorkingRepo(ctx, d.Repo, working, d.SyncState, d.GitVerifySignaturesMode); d.GitVerifySignaturesMode != sync.VerifySignaturesModeNone && err != nil {
return err
}
result, err = update(ctx, jobID, working, logger)
if err != nil {
return err
}
return nil
})
if err != nil {
return result, err
}
return result, nil
}
}
// executeJob runs a job func and keeps track of its status, so the
// daemon can report it when asked.
func (d *Daemon) executeJob(id job.ID, do jobFunc, logger log.Logger) (job.Result, error) {
ctx, cancel := context.WithTimeout(context.Background(), d.SyncTimeout)
defer cancel()
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusRunning})
result, err := do(ctx, id, logger)
if err != nil {
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusFailed, Err: err.Error(), Result: result})
return result, err
}
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusSucceeded, Result: result})
return result, nil
}
// makeLoggingFunc takes a jobFunc and returns a jobFunc that will log
// a commit event with the result.
func (d *Daemon) makeLoggingJobFunc(f jobFunc) jobFunc {
return func(ctx context.Context, id job.ID, logger log.Logger) (job.Result, error) {
started := time.Now().UTC()
result, err := f(ctx, id, logger)
if err != nil {
return result, err
}
logger.Log("revision", result.Revision)
if result.Revision != "" {
var workloadIDs []resource.ID
for id, result := range result.Result {
if result.Status == update.ReleaseStatusSuccess {
workloadIDs = append(workloadIDs, id)
}
}
metadata := &event.CommitEventMetadata{
Revision: result.Revision,
Spec: result.Spec,
Result: result.Result,
}
return result, d.LogEvent(event.Event{
ServiceIDs: workloadIDs,
Type: event.EventCommit,
StartedAt: started,
EndedAt: started,
LogLevel: event.LogLevelInfo,
Metadata: metadata,
})
}
return result, nil
}
}
// queueJob queues a job func to be executed.
func (d *Daemon) queueJob(do jobFunc) job.ID {
id := job.ID(guid.New())
enqueuedAt := time.Now()
d.Jobs.Enqueue(&job.Job{
ID: id,
Do: func(logger log.Logger) error {
queueDuration.Observe(time.Since(enqueuedAt).Seconds())
_, err := d.executeJob(id, do, logger)
if err != nil {
return err
}
return nil
},
})
queueLength.Set(float64(d.Jobs.Len()))
d.JobStatusCache.SetStatus(id, job.Status{StatusString: job.StatusQueued})
return id
}
// Apply the desired changes to the config files
func (d *Daemon) UpdateManifests(ctx context.Context, spec update.Spec) (job.ID, error) {
var id job.ID
if spec.Type == "" {
return id, errors.New("no type in update spec")
}
switch s := spec.Spec.(type) {
case release.Changes:
if s.ReleaseKind() == update.ReleaseKindPlan {
id := job.ID(guid.New())
_, err := d.executeJob(id, d.makeJobFromUpdate(d.release(spec, s)), d.Logger)
return id, err
}
return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.release(spec, s)))), nil
case resource.PolicyUpdates:
return d.queueJob(d.makeLoggingJobFunc(d.makeJobFromUpdate(d.updatePolicies(spec, s)))), nil
case update.ManualSync:
return d.queueJob(d.sync()), nil
default:
return id, fmt.Errorf(`unknown update type "%s"`, spec.Type)
}
}
func (d *Daemon) sync() jobFunc {
return func(ctx context.Context, jobID job.ID, logger log.Logger) (job.Result, error) {
var result job.Result
ctx, cancel := context.WithTimeout(ctx, d.SyncTimeout)
defer cancel() | if err != nil {
return result, err
}
head, err := d.Repo.BranchHead(ctx)
if err != nil {
return result, err
}
if d.GitVerifySignaturesMode != sync.VerifySignaturesModeNone {
var latestValidRev string
if latestValidRev, _, err = latestValidRevision(ctx, d.Repo, d.SyncState, d.GitVerifySignaturesMode); err != nil {
return result, err
} else if head != latestValidRev {
result.Revision = latestValidRev
return result, fmt.Errorf(
"The branch HEAD in the git repo is not verified, and fluxd is unable to sync to it. The last verified commit was %.8s. HEAD is %.8s.",
latestValidRev,
head,
)
}
}
result.Revision = head
return result, err
}
}
func (d *Daemon) updatePolicies(spec update.Spec, updates resource.PolicyUpdates) updateFunc {
return func(ctx context.Context, jobID job.ID, working *git.Checkout, logger log.Logger) (job.Result, error) {
// For each update
var workloadIDs []resource.ID
result := job.Result{
Spec: &spec,
Result: update.Result{},
}
// A shortcut to make things more responsive: if anything
// was (probably) set to automated, we will ask for an
// automation run straight ASAP.
var anythingAutomated bool
for workloadID, u := range updates {
if d.Cluster.IsAllowedResource(workloadID) {
result.Result[workloadID] = update.WorkloadResult{
Status: update.ReleaseStatusSkipped,
}
}
if u.Add.Has(policy.Automated) {
anythingAutomated = true
}
cm, err := d.getManifestStore(working)
if err != nil {
return result, err
}
updated, err := cm.UpdateWorkloadPolicies(ctx, workloadID, u)
if err != nil {
result.Result[workloadID] = update.WorkloadResult{
Status: update.ReleaseStatusFailed,
Error: err.Error(),
}
switch err := err.(type) {
case manifests.StoreError:
result.Result[workloadID] = update.WorkloadResult{
Status: update.ReleaseStatusFailed,
Error: err.Error(),
}
default:
return result, err
}
}
if !updated {
result.Result[workloadID] = update.WorkloadResult{
Status: update.ReleaseStatusSkipped,
}
} else {
workloadIDs = append(workloadIDs, workloadID)
result.Result[workloadID] = update.WorkloadResult{
Status: update.ReleaseStatusSuccess,
}
}
}
if len(workloadIDs) == 0 {
return result, nil
}
commitAuthor := ""
if d.GitConfig.SetAuthor {
commitAuthor = spec.Cause.User
}
commitAction := git.CommitAction{
Author: commitAuthor,
Message: policyCommitMessage(updates, spec.Cause),
}
if err := working.CommitAndPush(ctx, commitAction, ¬e{JobID: jobID, Spec: spec}, d.ManifestGenerationEnabled); err != nil {
// On | err := d.Repo.Refresh(ctx) | random_line_split |
WSU-PDF_process_incoming_to_Fedora_152.py | return item_ID_list
# change permissions en bulk
def permissionsChange(item_ID_list):
for item_ID in item_ID_list:
os.system(Template("chmod -R 755 /processing/$item_ID").substitute(item_ID=item_ID))
#try to get MODS file
# remove PDF artifacts (PNGs), renames images to match item_ID
def fileRename(item_ID):
#remove PNG files
os.system(Template("rm /processing/$item_ID/*.png").substitute(item_ID=item_ID))
images = os.listdir("/processing/"+item_ID)
images.sort() #sorts images
image_num = 1 #this method only works with htm, pdf, and image / if
count = 0
for image in images:
# get image type
image_type = image.split(".")
image_type = image_type[len(image_type)-1]
# contruct image name
new_image_name = item_ID + str(image_num).zfill(5) + "." + image_type
# rename file
os.system(Template("mv /processing/$item_ID/$image /processing/$item_ID/$new_image_name").substitute(item_ID=item_ID, image=image, new_image_name=new_image_name))
#bumps counters
if count == 3:
count = 0
image_num = image_num + 1
else:
count = count + 1
def imageResize (item_ID):
images = os.listdir("/processing/"+item_ID)
for image in images:
# accepted_image_types = ['jpg','jpeg','tif','tiff','gif','png']
accepted_image_types = ['jpg'] #expecting only jpg images from WSU PDF route, because essentially the PDF is the definitive copy, not archival TIFFs
image_basename, image_type = image.split('.')[0], image.split('.')[-1]
if image_type.lower() in accepted_image_types: #looking for jpg or tif
# max dimensions, change this for finding "sweet spot" in bookreader
max_width = 1700
max_height = 1700
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print Template("Portait - resizing from $orig_height to 1700").substitute(orig_height=height)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
print Template("Portait - resizing from $orig_width to 1700").substitute(orig_width=width)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
continue
def createThumbs (item_ID):
os.system(Template("mkdir /processing/$item_ID/thumbs").substitute(item_ID=item_ID))
images = os.listdir("/processing/"+item_ID)
for image in images:
if image.endswith('jpg'): #derivative copy from tiffs
image_basename = image.split('.')[0]
thumb_width = 200
thumb_height = 200
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print "Portait - creating thumbnail"
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
print Template("Portait - creating thumbnail").substitute(orig_width=width)
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
continue
# function to move and create file structure
def createStructure(item_ID):
#images
os.system(Template("mkdir /processing/$item_ID/images").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.jpg /processing/$item_ID/images").substitute(item_ID=item_ID))
except:
print "no images to move."
########################################################################################################################
#original tiffs
os.system(Template("mkdir /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.tif /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
except:
print "no original images to move."
########################################################################################################################
#images/thumbs
os.system(Template("mkdir /processing/$item_ID/images/thumbs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/thumbs/* /processing/$item_ID/images/thumbs/").substitute(item_ID=item_ID))
os.system(Template("rm -R /processing/$item_ID/thumbs/").substitute(item_ID=item_ID))
except:
print "no images to move."
#OCR
os.system(Template("mkdir /processing/$item_ID/OCR").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.htm /processing/$item_ID/OCR").substitute(item_ID=item_ID))
except:
print "no OCR / html to move."
#altoXML
os.system(Template("mkdir /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.xml /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
except:
print "no altoXML to move."
#pdf
os.system(Template("mkdir /processing/$item_ID/pdf").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.pdf /processing/$item_ID/pdf").substitute(item_ID=item_ID))
except:
print "no pdf's to move."
#fullbook
os.system(Template("mkdir /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
print Template("file structure created for $item_ID").substitute(item_ID=item_ID)
######################################################################################################################
# WORKING FROM /processing now
######################################################################################################################
# function to create metadata file for each item_ID from "/processing" dir
def createMetadata(item_ID):
# get number of files
images = os.listdir("/processing/"+item_ID+"/images")
leaf_count = len(images) - 1 #accounts for /thumbs directory in there
# get dimensions of cover and create cover image
cover_path = Template('/processing/$item_ID/images/$item_ID').substitute(item_ID=item_ID) + "00001.jpg"
im = Image.open(cover_path)
width, height = im.size #returns tuple
# generate cover thumbnail
max_cover_width = 200
max_cover_height = 200
im.thumbnail((max_cover_width, max_cover_height), Image.ANTIALIAS)
im.save(Template('/processing/$item_ID/$item_ID').substitute(item_ID=item_ID) + "_cover.jpg")
print "Cover created for",item_ID
# write to xml
fhand = Template("/processing/$item_ID/$item_ID$suffix.xml").substitute(item_ID=item_ID, suffix="_metadata")
w = XMLWriter(fhand, "utf-8")
metadata = w.start("add")
w.start("doc")
w.element("field", Template("meta:$item_ID").substitute | pre_item_ID_list = os.listdir('/processing')
if len(pre_item_ID_list) == 0:
print "Nothing to do!"
else:
print "Processing these items: ",pre_item_ID_list
#prepare normalized list
item_ID_list = []
for item_ID in pre_item_ID_list:
#apostrophes
item_ID_handle = re.sub("'","\\'", item_ID)
n_item_ID = re.sub(" ","_", item_ID)
#spaces
item_ID_handle = re.sub(" ","\ ", item_ID_handle)
n_item_ID = re.sub("'","", n_item_ID)
# item_ID = unidecode(item_ID) #uncomment to use - works, but ugly
print item_ID,"-->",item_ID_handle
os.system(Template("mv /processing/$item_ID_handle /processing/$n_item_ID").substitute(item_ID_handle=item_ID_handle,n_item_ID=n_item_ID))
item_ID_list = os.listdir('/processing') | identifier_body | |
WSU-PDF_process_incoming_to_Fedora_152.py | image in images:
# get image type
image_type = image.split(".")
image_type = image_type[len(image_type)-1]
# contruct image name
new_image_name = item_ID + str(image_num).zfill(5) + "." + image_type
# rename file
os.system(Template("mv /processing/$item_ID/$image /processing/$item_ID/$new_image_name").substitute(item_ID=item_ID, image=image, new_image_name=new_image_name))
#bumps counters
if count == 3:
count = 0
image_num = image_num + 1
else:
count = count + 1
def imageResize (item_ID):
images = os.listdir("/processing/"+item_ID)
for image in images:
# accepted_image_types = ['jpg','jpeg','tif','tiff','gif','png']
accepted_image_types = ['jpg'] #expecting only jpg images from WSU PDF route, because essentially the PDF is the definitive copy, not archival TIFFs
image_basename, image_type = image.split('.')[0], image.split('.')[-1]
if image_type.lower() in accepted_image_types: #looking for jpg or tif
# max dimensions, change this for finding "sweet spot" in bookreader
max_width = 1700
max_height = 1700
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print Template("Portait - resizing from $orig_height to 1700").substitute(orig_height=height)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
| im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
continue
def createThumbs (item_ID):
os.system(Template("mkdir /processing/$item_ID/thumbs").substitute(item_ID=item_ID))
images = os.listdir("/processing/"+item_ID)
for image in images:
if image.endswith('jpg'): #derivative copy from tiffs
image_basename = image.split('.')[0]
thumb_width = 200
thumb_height = 200
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print "Portait - creating thumbnail"
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
print Template("Portait - creating thumbnail").substitute(orig_width=width)
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
continue
# function to move and create file structure
def createStructure(item_ID):
#images
os.system(Template("mkdir /processing/$item_ID/images").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.jpg /processing/$item_ID/images").substitute(item_ID=item_ID))
except:
print "no images to move."
########################################################################################################################
#original tiffs
os.system(Template("mkdir /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.tif /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
except:
print "no original images to move."
########################################################################################################################
#images/thumbs
os.system(Template("mkdir /processing/$item_ID/images/thumbs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/thumbs/* /processing/$item_ID/images/thumbs/").substitute(item_ID=item_ID))
os.system(Template("rm -R /processing/$item_ID/thumbs/").substitute(item_ID=item_ID))
except:
print "no images to move."
#OCR
os.system(Template("mkdir /processing/$item_ID/OCR").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.htm /processing/$item_ID/OCR").substitute(item_ID=item_ID))
except:
print "no OCR / html to move."
#altoXML
os.system(Template("mkdir /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.xml /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
except:
print "no altoXML to move."
#pdf
os.system(Template("mkdir /processing/$item_ID/pdf").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.pdf /processing/$item_ID/pdf").substitute(item_ID=item_ID))
except:
print "no pdf's to move."
#fullbook
os.system(Template("mkdir /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
print Template("file structure created for $item_ID").substitute(item_ID=item_ID)
######################################################################################################################
# WORKING FROM /processing now
######################################################################################################################
# function to create metadata file for each item_ID from "/processing" dir
def createMetadata(item_ID):
# get number of files
images = os.listdir("/processing/"+item_ID+"/images")
leaf_count = len(images) - 1 #accounts for /thumbs directory in there
# get dimensions of cover and create cover image
cover_path = Template('/processing/$item_ID/images/$item_ID').substitute(item_ID=item_ID) + "00001.jpg"
im = Image.open(cover_path)
width, height = im.size #returns tuple
# generate cover thumbnail
max_cover_width = 200
max_cover_height = 200
im.thumbnail((max_cover_width, max_cover_height), Image.ANTIALIAS)
im.save(Template('/processing/$item_ID/$item_ID').substitute(item_ID=item_ID) + "_cover.jpg")
print "Cover created for",item_ID
# write to xml
fhand = Template("/processing/$item_ID/$item_ID$suffix.xml").substitute(item_ID=item_ID, suffix="_metadata")
w = XMLWriter(fhand, "utf-8")
metadata = w.start("add")
w.start("doc")
w.element("field", Template("meta:$item_ID").substitute(item_ID=item_ID), name="id")
w.element("field", Template("$item_ID").substitute(item_ID=item_ID), name="ItemID") #no underscore for solr index in "ItemID"
#creats overall ratio - height / width
w.element("field", str(height), name="pheight")
w.element("field", str(width), name="pwidth")
w.element("field", str(leaf_count), name="leafs")
w.element("field", Template("$item_ID").substitute(item_ID=item_ID), name="item_title") #how will we generate this? ItemID for now...
w.end() #closes <doc>
w.close(metadata)
def createSinglePDF(item_ID):
os.system(Template("pdftk /processing/$item_ID/pdf/*.pdf cat output /processing/$item_ID/pdf/$item_ID.pdf").substitute(item_ID=item_ID))
print "merged pdf created."
#move concatenated file to "fullbook"
try:
os.system(Template("mv /processing/$item_ID/pdf/$item_ID.pdf /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
except:
print "could not move concatenated PDF."
def createSingleHTML(item_ID):
#create meta-HTML file
html_concat = ''
#get list of HTML docs
html_docs = os.listdir("/processing/"+item_ID+"/OCR")
html_docs = sorted(html_docs) #sorts by ascending filename
#iterate through them
html_count = 1
for html_doc in html_docs:
fhand = open(Template('/processing/$item_ID/OCR/$html_doc').substitute(item_ID=item_ID, html_doc=html_doc),'r')
html_parsed = BeautifulSoup(fhand)
print "HTML document parsed..."
| else:
print Template("Portait - resizing from $orig_width to 1700").substitute(orig_width=width) | random_line_split |
WSU-PDF_process_incoming_to_Fedora_152.py | in images:
# get image type
image_type = image.split(".")
image_type = image_type[len(image_type)-1]
# contruct image name
new_image_name = item_ID + str(image_num).zfill(5) + "." + image_type
# rename file
os.system(Template("mv /processing/$item_ID/$image /processing/$item_ID/$new_image_name").substitute(item_ID=item_ID, image=image, new_image_name=new_image_name))
#bumps counters
if count == 3:
count = 0
image_num = image_num + 1
else:
count = count + 1
def imageResize (item_ID):
images = os.listdir("/processing/"+item_ID)
for image in images:
# accepted_image_types = ['jpg','jpeg','tif','tiff','gif','png']
accepted_image_types = ['jpg'] #expecting only jpg images from WSU PDF route, because essentially the PDF is the definitive copy, not archival TIFFs
image_basename, image_type = image.split('.')[0], image.split('.')[-1]
if image_type.lower() in accepted_image_types: #looking for jpg or tif
# max dimensions, change this for finding "sweet spot" in bookreader
max_width = 1700
max_height = 1700
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print Template("Portait - resizing from $orig_height to 1700").substitute(orig_height=height)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
print Template("Portait - resizing from $orig_width to 1700").substitute(orig_width=width)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
continue
def createThumbs (item_ID):
os.system(Template("mkdir /processing/$item_ID/thumbs").substitute(item_ID=item_ID))
images = os.listdir("/processing/"+item_ID)
for image in images:
if image.endswith('jpg'): #derivative copy from tiffs
image_basename = image.split('.')[0]
thumb_width = 200
thumb_height = 200
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print "Portait - creating thumbnail"
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
print Template("Portait - creating thumbnail").substitute(orig_width=width)
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
continue
# function to move and create file structure
def createStructure(item_ID):
#images
os.system(Template("mkdir /processing/$item_ID/images").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.jpg /processing/$item_ID/images").substitute(item_ID=item_ID))
except:
print "no images to move."
########################################################################################################################
#original tiffs
os.system(Template("mkdir /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.tif /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
except:
print "no original images to move."
########################################################################################################################
#images/thumbs
os.system(Template("mkdir /processing/$item_ID/images/thumbs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/thumbs/* /processing/$item_ID/images/thumbs/").substitute(item_ID=item_ID))
os.system(Template("rm -R /processing/$item_ID/thumbs/").substitute(item_ID=item_ID))
except:
print "no images to move."
#OCR
os.system(Template("mkdir /processing/$item_ID/OCR").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.htm /processing/$item_ID/OCR").substitute(item_ID=item_ID))
except:
print "no OCR / html to move."
#altoXML
os.system(Template("mkdir /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.xml /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
except:
print "no altoXML to move."
#pdf
os.system(Template("mkdir /processing/$item_ID/pdf").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.pdf /processing/$item_ID/pdf").substitute(item_ID=item_ID))
except:
print "no pdf's to move."
#fullbook
os.system(Template("mkdir /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
print Template("file structure created for $item_ID").substitute(item_ID=item_ID)
######################################################################################################################
# WORKING FROM /processing now
######################################################################################################################
# function to create metadata file for each item_ID from "/processing" dir
def | (item_ID):
# get number of files
images = os.listdir("/processing/"+item_ID+"/images")
leaf_count = len(images) - 1 #accounts for /thumbs directory in there
# get dimensions of cover and create cover image
cover_path = Template('/processing/$item_ID/images/$item_ID').substitute(item_ID=item_ID) + "00001.jpg"
im = Image.open(cover_path)
width, height = im.size #returns tuple
# generate cover thumbnail
max_cover_width = 200
max_cover_height = 200
im.thumbnail((max_cover_width, max_cover_height), Image.ANTIALIAS)
im.save(Template('/processing/$item_ID/$item_ID').substitute(item_ID=item_ID) + "_cover.jpg")
print "Cover created for",item_ID
# write to xml
fhand = Template("/processing/$item_ID/$item_ID$suffix.xml").substitute(item_ID=item_ID, suffix="_metadata")
w = XMLWriter(fhand, "utf-8")
metadata = w.start("add")
w.start("doc")
w.element("field", Template("meta:$item_ID").substitute(item_ID=item_ID), name="id")
w.element("field", Template("$item_ID").substitute(item_ID=item_ID), name="ItemID") #no underscore for solr index in "ItemID"
#creats overall ratio - height / width
w.element("field", str(height), name="pheight")
w.element("field", str(width), name="pwidth")
w.element("field", str(leaf_count), name="leafs")
w.element("field", Template("$item_ID").substitute(item_ID=item_ID), name="item_title") #how will we generate this? ItemID for now...
w.end() #closes <doc>
w.close(metadata)
def createSinglePDF(item_ID):
os.system(Template("pdftk /processing/$item_ID/pdf/*.pdf cat output /processing/$item_ID/pdf/$item_ID.pdf").substitute(item_ID=item_ID))
print "merged pdf created."
#move concatenated file to "fullbook"
try:
os.system(Template("mv /processing/$item_ID/pdf/$item_ID.pdf /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
except:
print "could not move concatenated PDF."
def createSingleHTML(item_ID):
#create meta-HTML file
html_concat = ''
#get list of HTML docs
html_docs = os.listdir("/processing/"+item_ID+"/OCR")
html_docs = sorted(html_docs) #sorts by ascending filename
#iterate through them
html_count = 1
for html_doc in html_docs:
fhand = open(Template('/processing/$item_ID/OCR/$html_doc').substitute(item_ID=item_ID, html_doc=html_doc),'r')
html_parsed = BeautifulSoup(fhand)
print "HTML document parsed | createMetadata | identifier_name |
WSU-PDF_process_incoming_to_Fedora_152.py | /$new_image_name").substitute(item_ID=item_ID, image=image, new_image_name=new_image_name))
#bumps counters
if count == 3:
count = 0
image_num = image_num + 1
else:
count = count + 1
def imageResize (item_ID):
images = os.listdir("/processing/"+item_ID)
for image in images:
# accepted_image_types = ['jpg','jpeg','tif','tiff','gif','png']
accepted_image_types = ['jpg'] #expecting only jpg images from WSU PDF route, because essentially the PDF is the definitive copy, not archival TIFFs
image_basename, image_type = image.split('.')[0], image.split('.')[-1]
if image_type.lower() in accepted_image_types: #looking for jpg or tif
# max dimensions, change this for finding "sweet spot" in bookreader
max_width = 1700
max_height = 1700
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print Template("Portait - resizing from $orig_height to 1700").substitute(orig_height=height)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
print Template("Portait - resizing from $orig_width to 1700").substitute(orig_width=width)
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
# converts to RGB if necessary...
if im.mode != "RGB":
im = im.convert("RGB")
im.save(Template('/processing/$item_ID/$image_basename.jpg').substitute(item_ID=item_ID, image_basename=image_basename))
print image,"resized to: ",im.size,", converted to JPEG."
else:
continue
def createThumbs (item_ID):
os.system(Template("mkdir /processing/$item_ID/thumbs").substitute(item_ID=item_ID))
images = os.listdir("/processing/"+item_ID)
for image in images:
if image.endswith('jpg'): #derivative copy from tiffs
image_basename = image.split('.')[0]
thumb_width = 200
thumb_height = 200
# get dimensions
im = Image.open(Template('/processing/$item_ID/$image').substitute(item_ID=item_ID, image=image))
width, height = im.size #returns tuple
if height > width:
print "Portait - creating thumbnail"
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
print Template("Portait - creating thumbnail").substitute(orig_width=width)
im.thumbnail((thumb_width, thumb_height), Image.ANTIALIAS)
thumbname = image_basename
im.save(Template('/processing/$item_ID/thumbs/$thumbname.jpg').substitute(item_ID=item_ID, thumbname=thumbname))
print image,"thumbnail created."
else:
continue
# function to move and create file structure
def createStructure(item_ID):
#images
os.system(Template("mkdir /processing/$item_ID/images").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.jpg /processing/$item_ID/images").substitute(item_ID=item_ID))
except:
print "no images to move."
########################################################################################################################
#original tiffs
os.system(Template("mkdir /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.tif /processing/$item_ID/tiffs").substitute(item_ID=item_ID))
except:
print "no original images to move."
########################################################################################################################
#images/thumbs
os.system(Template("mkdir /processing/$item_ID/images/thumbs").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/thumbs/* /processing/$item_ID/images/thumbs/").substitute(item_ID=item_ID))
os.system(Template("rm -R /processing/$item_ID/thumbs/").substitute(item_ID=item_ID))
except:
print "no images to move."
#OCR
os.system(Template("mkdir /processing/$item_ID/OCR").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.htm /processing/$item_ID/OCR").substitute(item_ID=item_ID))
except:
print "no OCR / html to move."
#altoXML
os.system(Template("mkdir /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.xml /processing/$item_ID/altoXML").substitute(item_ID=item_ID))
except:
print "no altoXML to move."
#pdf
os.system(Template("mkdir /processing/$item_ID/pdf").substitute(item_ID=item_ID))
try:
os.system(Template("mv /processing/$item_ID/*.pdf /processing/$item_ID/pdf").substitute(item_ID=item_ID))
except:
print "no pdf's to move."
#fullbook
os.system(Template("mkdir /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
print Template("file structure created for $item_ID").substitute(item_ID=item_ID)
######################################################################################################################
# WORKING FROM /processing now
######################################################################################################################
# function to create metadata file for each item_ID from "/processing" dir
def createMetadata(item_ID):
# get number of files
images = os.listdir("/processing/"+item_ID+"/images")
leaf_count = len(images) - 1 #accounts for /thumbs directory in there
# get dimensions of cover and create cover image
cover_path = Template('/processing/$item_ID/images/$item_ID').substitute(item_ID=item_ID) + "00001.jpg"
im = Image.open(cover_path)
width, height = im.size #returns tuple
# generate cover thumbnail
max_cover_width = 200
max_cover_height = 200
im.thumbnail((max_cover_width, max_cover_height), Image.ANTIALIAS)
im.save(Template('/processing/$item_ID/$item_ID').substitute(item_ID=item_ID) + "_cover.jpg")
print "Cover created for",item_ID
# write to xml
fhand = Template("/processing/$item_ID/$item_ID$suffix.xml").substitute(item_ID=item_ID, suffix="_metadata")
w = XMLWriter(fhand, "utf-8")
metadata = w.start("add")
w.start("doc")
w.element("field", Template("meta:$item_ID").substitute(item_ID=item_ID), name="id")
w.element("field", Template("$item_ID").substitute(item_ID=item_ID), name="ItemID") #no underscore for solr index in "ItemID"
#creats overall ratio - height / width
w.element("field", str(height), name="pheight")
w.element("field", str(width), name="pwidth")
w.element("field", str(leaf_count), name="leafs")
w.element("field", Template("$item_ID").substitute(item_ID=item_ID), name="item_title") #how will we generate this? ItemID for now...
w.end() #closes <doc>
w.close(metadata)
def createSinglePDF(item_ID):
os.system(Template("pdftk /processing/$item_ID/pdf/*.pdf cat output /processing/$item_ID/pdf/$item_ID.pdf").substitute(item_ID=item_ID))
print "merged pdf created."
#move concatenated file to "fullbook"
try:
os.system(Template("mv /processing/$item_ID/pdf/$item_ID.pdf /processing/$item_ID/fullbook").substitute(item_ID=item_ID))
except:
print "could not move concatenated PDF."
def createSingleHTML(item_ID):
#create meta-HTML file
html_concat = ''
#get list of HTML docs
html_docs = os.listdir("/processing/"+item_ID+"/OCR")
html_docs = sorted(html_docs) #sorts by ascending filename
#iterate through them
html_count = 1
for html_doc in html_docs:
fhand = open(Template('/processing/$item_ID/OCR/$html_doc').substitute(item_ID=item_ID, html_doc=html_doc),'r')
html_parsed = BeautifulSoup(fhand)
print "HTML document parsed..."
#sets div with page_ID
html_concat = html_concat + Template('<div id="page_ID_$html_count" class="html_page">').substitute(html_count=html_count)
#Set in try / except block, as some HTML documents contain no elements within <body> tag
try:
for block in html_parsed.body:
| html_concat = html_concat + unicode(block) | conditional_block | |
render.py | super(ZoomGroup, self).__init__(parent)
self.zoom = zoom
def set_state(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glScalef(self.zoom, self.zoom, 1)
def unset_state(self):
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.zoom == other.zoom and
self.parent == other.parent
)
def __hash__(self):
return hash((self.zoom, self.parent))
def __repr__(self):
return '%s(zoom=%d)' % (self.__class__.__name__, self.zoom)
class CameraGroup(pyglet.graphics.Group):
def __init__(self, window, zoom_factor, focus=None, parent=None):
super(CameraGroup, self).__init__(parent)
self.window = window
self.zoom_factor = zoom_factor
self.focus = focus
def set_state(self):
if self.focus is not None:
cam_x = self.window.width / 2 - self.focus.x * self.zoom_factor
cam_y = self.window.height / 2 - self.focus.y * self.zoom_factor
pyglet.gl.gl.glPushMatrix()
pyglet.gl.gl.glTranslatef(cam_x, cam_y, 0)
def unset_state(self):
if self.focus is not None:
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.window is other.window and
self.zoom_factor == other.zoom_factor and
self.parent == other.parent
)
def __hash__(self):
return hash((self.window, self.zoom_factor, self.parent))
class Animation(pyglet.event.EventDispatcher):
def __init__(self, duration):
self.elapsed = 0.0
self.duration = duration
pyglet.clock.schedule_interval(self._animate, 0.001)
def cancel(self):
pyglet.clock.unschedule(self._animate)
self.dispatch_event('on_finish', self)
def get_elapsed_ratio(self):
return self.elapsed / self.duration
def _animate(self, dt):
self.elapsed += dt
if self.elapsed > self.duration:
self.cancel()
else:
self.dispatch_event('on_update', self, dt)
Animation.register_event_type('on_update')
Animation.register_event_type('on_finish')
class Renderable(Component):
COMPONENT_NAME = 'renderable'
def __init__(self, image, memorable=False):
self._image = image
self.memorable = memorable
image = event_property('_image', 'image_change')
class LayoutRenderable(Component):
COMPONENT_NAME = 'layout_renderable'
def __init__(self, tile):
self.tile = tile
class RenderSystem(object):
zoom = 3
GROUP_LEVEL = pyglet.graphics.OrderedGroup(0)
GROUP_DIGITS = pyglet.graphics.OrderedGroup(1)
GROUP_HUD = pyglet.graphics.OrderedGroup(2)
def __init__(self, level):
self._level = level
self._window = level.game.game.window
self._batch = pyglet.graphics.Batch()
self._animations = set()
self._sprites = {}
self._level_vlist = None
self._light_overlay = None
self._last_messages_view = LastMessagesView(level.game.message_log, self._window.width, self._window.height, batch=self._batch, group=self.GROUP_HUD)
self._hud = HUD(batch=self._batch, group=self.GROUP_HUD)
self._level_group = ZoomGroup(self.zoom, CameraGroup(self._window, self.zoom, self.GROUP_LEVEL))
self._digits_group = CameraGroup(self._window, self.zoom, self.GROUP_DIGITS)
self._memory = collections.defaultdict(list)
def update_player(self):
player_sprite = self._sprites[self._level.player]
self._digits_group.focus = player_sprite
self._level_group.parent.focus = player_sprite
self._hud.player = self._level.player
def render_level(self):
vertices = []
tex_coords = []
for x in xrange(self._level.size_x):
for y in xrange(self._level.size_y):
x1 = x * 8
x2 = x1 + 8
y1 = y * 8
y2 = y1 + 8
| tile = renderable.tile
break
else:
continue
# always add floor, because we wanna draw walls above floor
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(floor_tex.tex_coords)
if tile == LayoutGenerator.TILE_WALL:
# if we got wall, draw it above floor
tex = get_wall_tex(self._level.get_wall_transition(x, y))
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(tex.tex_coords)
group = TextureGroup(dungeon_tex, pyglet.graphics.OrderedGroup(Position.ORDER_FLOOR, self._level_group))
self._level_vlist = self._batch.add(len(vertices) / 2, pyglet.gl.GL_QUADS, group,
('v2i/static', vertices),
('t3f/statc', tex_coords),
)
group = pyglet.graphics.OrderedGroup(Position.ORDER_PLAYER + 1, self._level_group)
self._light_overlay = LightOverlay(self._level.size_x, self._level.size_y, self._batch, group)
def update_light(self, old_lightmap, new_lightmap):
# for all changed cells
for key in set(old_lightmap).union(new_lightmap):
lit = key in new_lightmap
memory = self._memory[key]
# if cell is lit, add it to memory and clear all memory sprites, if there are any
if lit:
for sprite in memory:
sprite.delete()
memory[:] = []
# for every entity in cell
for entity in self._level.position_system.get_entities_at(*key):
# set in_fov flag
# TODO: this doesnt belong to rendering, but i don't want to loop twice
infov = entity.get(InFOV)
if infov:
infov.in_fov = key in new_lightmap
# if renderable, manage sprites/memory
renderable = entity.get(Renderable)
if not renderable:
continue
# if object is lit, show its sprite
sprite = self._sprites[entity]
if lit:
sprite.visible = True
else:
sprite.visible = False
# if it's memorable, add its current image to the memory
if renderable.memorable:
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(renderable.image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
memory.append(sprite)
# update light overlay
self._light_overlay.update_light(new_lightmap, self._memory)
def add_entity(self, entity):
image = entity.get(Renderable).image
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
self._sprites[entity] = sprite
entity.listen('image_change', self._on_image_change)
entity.listen('move', self._on_move)
def remove_entity(self, entity):
sprite = self._sprites.pop(entity)
sprite.delete()
entity.unlisten('image_change', self._on_image_change)
entity.unlisten('move', self._on_move)
def _on_image_change(self, entity):
self._sprites[entity].image = entity.get(Renderable).image
def _on_move(self, entity, old_x, old_y, new_x, new_y):
sprite = self._sprites[entity]
target_x = new_x * 8
target_y = new_y * 8
if not sprite.visible:
# don't animate invisible sprites
sprite.set_position(target_x, target_y)
else:
start_x = sprite.x
start_y = sprite.y
anim = Animation(0.25)
@anim.event
def on_update(animation, dt, sprite=sprite, dx=(target_x - start_x), dy=(target_y - start_y)):
ratio = animation.get_elapsed_ratio()
x = round(start_x + dx * ratio)
y = round(start_y + dy * ratio)
sprite.set_position(x, y)
@anim.event
def on_finish(animation, sprite=sprite):
sprite.set_position(target_x, target_y)
self.add_animation(anim)
def draw(self):
self._window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
| for entity in self._level.position_system.get_entities_at(x, y):
renderable = entity.get(LayoutRenderable)
if renderable:
| random_line_split |
render.py | super(ZoomGroup, self).__init__(parent)
self.zoom = zoom
def set_state(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glScalef(self.zoom, self.zoom, 1)
def unset_state(self):
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.zoom == other.zoom and
self.parent == other.parent
)
def __hash__(self):
return hash((self.zoom, self.parent))
def __repr__(self):
return '%s(zoom=%d)' % (self.__class__.__name__, self.zoom)
class CameraGroup(pyglet.graphics.Group):
def __init__(self, window, zoom_factor, focus=None, parent=None):
super(CameraGroup, self).__init__(parent)
self.window = window
self.zoom_factor = zoom_factor
self.focus = focus
def set_state(self):
if self.focus is not None:
cam_x = self.window.width / 2 - self.focus.x * self.zoom_factor
cam_y = self.window.height / 2 - self.focus.y * self.zoom_factor
pyglet.gl.gl.glPushMatrix()
pyglet.gl.gl.glTranslatef(cam_x, cam_y, 0)
def unset_state(self):
if self.focus is not None:
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.window is other.window and
self.zoom_factor == other.zoom_factor and
self.parent == other.parent
)
def __hash__(self):
return hash((self.window, self.zoom_factor, self.parent))
class Animation(pyglet.event.EventDispatcher):
def __init__(self, duration):
self.elapsed = 0.0
self.duration = duration
pyglet.clock.schedule_interval(self._animate, 0.001)
def cancel(self):
pyglet.clock.unschedule(self._animate)
self.dispatch_event('on_finish', self)
def get_elapsed_ratio(self):
return self.elapsed / self.duration
def _animate(self, dt):
self.elapsed += dt
if self.elapsed > self.duration:
self.cancel()
else:
self.dispatch_event('on_update', self, dt)
Animation.register_event_type('on_update')
Animation.register_event_type('on_finish')
class Renderable(Component):
COMPONENT_NAME = 'renderable'
def __init__(self, image, memorable=False):
self._image = image
self.memorable = memorable
image = event_property('_image', 'image_change')
class LayoutRenderable(Component):
COMPONENT_NAME = 'layout_renderable'
def __init__(self, tile):
self.tile = tile
class RenderSystem(object):
zoom = 3
GROUP_LEVEL = pyglet.graphics.OrderedGroup(0)
GROUP_DIGITS = pyglet.graphics.OrderedGroup(1)
GROUP_HUD = pyglet.graphics.OrderedGroup(2)
def __init__(self, level):
self._level = level
self._window = level.game.game.window
self._batch = pyglet.graphics.Batch()
self._animations = set()
self._sprites = {}
self._level_vlist = None
self._light_overlay = None
self._last_messages_view = LastMessagesView(level.game.message_log, self._window.width, self._window.height, batch=self._batch, group=self.GROUP_HUD)
self._hud = HUD(batch=self._batch, group=self.GROUP_HUD)
self._level_group = ZoomGroup(self.zoom, CameraGroup(self._window, self.zoom, self.GROUP_LEVEL))
self._digits_group = CameraGroup(self._window, self.zoom, self.GROUP_DIGITS)
self._memory = collections.defaultdict(list)
def update_player(self):
player_sprite = self._sprites[self._level.player]
self._digits_group.focus = player_sprite
self._level_group.parent.focus = player_sprite
self._hud.player = self._level.player
def render_level(self):
vertices = []
tex_coords = []
for x in xrange(self._level.size_x):
for y in xrange(self._level.size_y):
x1 = x * 8
x2 = x1 + 8
y1 = y * 8
y2 = y1 + 8
for entity in self._level.position_system.get_entities_at(x, y):
renderable = entity.get(LayoutRenderable)
if renderable:
tile = renderable.tile
break
else:
continue
# always add floor, because we wanna draw walls above floor
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(floor_tex.tex_coords)
if tile == LayoutGenerator.TILE_WALL:
# if we got wall, draw it above floor
tex = get_wall_tex(self._level.get_wall_transition(x, y))
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(tex.tex_coords)
group = TextureGroup(dungeon_tex, pyglet.graphics.OrderedGroup(Position.ORDER_FLOOR, self._level_group))
self._level_vlist = self._batch.add(len(vertices) / 2, pyglet.gl.GL_QUADS, group,
('v2i/static', vertices),
('t3f/statc', tex_coords),
)
group = pyglet.graphics.OrderedGroup(Position.ORDER_PLAYER + 1, self._level_group)
self._light_overlay = LightOverlay(self._level.size_x, self._level.size_y, self._batch, group)
def update_light(self, old_lightmap, new_lightmap):
# for all changed cells
for key in set(old_lightmap).union(new_lightmap):
lit = key in new_lightmap
memory = self._memory[key]
# if cell is lit, add it to memory and clear all memory sprites, if there are any
if lit:
for sprite in memory:
sprite.delete()
memory[:] = []
# for every entity in cell
for entity in self._level.position_system.get_entities_at(*key):
# set in_fov flag
# TODO: this doesnt belong to rendering, but i don't want to loop twice
infov = entity.get(InFOV)
if infov:
infov.in_fov = key in new_lightmap
# if renderable, manage sprites/memory
renderable = entity.get(Renderable)
if not renderable:
continue
# if object is lit, show its sprite
sprite = self._sprites[entity]
if lit:
sprite.visible = True
else:
sprite.visible = False
# if it's memorable, add its current image to the memory
if renderable.memorable:
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(renderable.image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
memory.append(sprite)
# update light overlay
self._light_overlay.update_light(new_lightmap, self._memory)
def add_entity(self, entity):
|
def remove_entity(self, entity):
sprite = self._sprites.pop(entity)
sprite.delete()
entity.unlisten('image_change', self._on_image_change)
entity.unlisten('move', self._on_move)
def _on_image_change(self, entity):
self._sprites[entity].image = entity.get(Renderable).image
def _on_move(self, entity, old_x, old_y, new_x, new_y):
sprite = self._sprites[entity]
target_x = new_x * 8
target_y = new_y * 8
if not sprite.visible:
# don't animate invisible sprites
sprite.set_position(target_x, target_y)
else:
start_x = sprite.x
start_y = sprite.y
anim = Animation(0.25)
@anim.event
def on_update(animation, dt, sprite=sprite, dx=(target_x - start_x), dy=(target_y - start_y)):
ratio = animation.get_elapsed_ratio()
x = round(start_x + dx * ratio)
y = round(start_y + dy * ratio)
sprite.set_position(x, y)
@anim.event
def on_finish(animation, sprite=sprite):
sprite.set_position(target_x, target_y)
self.add_animation(anim)
def draw(self):
self._window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
self | image = entity.get(Renderable).image
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
self._sprites[entity] = sprite
entity.listen('image_change', self._on_image_change)
entity.listen('move', self._on_move) | identifier_body |
render.py | super(ZoomGroup, self).__init__(parent)
self.zoom = zoom
def set_state(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glScalef(self.zoom, self.zoom, 1)
def unset_state(self):
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.zoom == other.zoom and
self.parent == other.parent
)
def __hash__(self):
return hash((self.zoom, self.parent))
def __repr__(self):
return '%s(zoom=%d)' % (self.__class__.__name__, self.zoom)
class CameraGroup(pyglet.graphics.Group):
def __init__(self, window, zoom_factor, focus=None, parent=None):
super(CameraGroup, self).__init__(parent)
self.window = window
self.zoom_factor = zoom_factor
self.focus = focus
def set_state(self):
if self.focus is not None:
cam_x = self.window.width / 2 - self.focus.x * self.zoom_factor
cam_y = self.window.height / 2 - self.focus.y * self.zoom_factor
pyglet.gl.gl.glPushMatrix()
pyglet.gl.gl.glTranslatef(cam_x, cam_y, 0)
def unset_state(self):
if self.focus is not None:
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.window is other.window and
self.zoom_factor == other.zoom_factor and
self.parent == other.parent
)
def __hash__(self):
return hash((self.window, self.zoom_factor, self.parent))
class Animation(pyglet.event.EventDispatcher):
def __init__(self, duration):
self.elapsed = 0.0
self.duration = duration
pyglet.clock.schedule_interval(self._animate, 0.001)
def cancel(self):
pyglet.clock.unschedule(self._animate)
self.dispatch_event('on_finish', self)
def get_elapsed_ratio(self):
return self.elapsed / self.duration
def _animate(self, dt):
self.elapsed += dt
if self.elapsed > self.duration:
self.cancel()
else:
self.dispatch_event('on_update', self, dt)
Animation.register_event_type('on_update')
Animation.register_event_type('on_finish')
class Renderable(Component):
COMPONENT_NAME = 'renderable'
def __init__(self, image, memorable=False):
self._image = image
self.memorable = memorable
image = event_property('_image', 'image_change')
class LayoutRenderable(Component):
COMPONENT_NAME = 'layout_renderable'
def __init__(self, tile):
self.tile = tile
class RenderSystem(object):
zoom = 3
GROUP_LEVEL = pyglet.graphics.OrderedGroup(0)
GROUP_DIGITS = pyglet.graphics.OrderedGroup(1)
GROUP_HUD = pyglet.graphics.OrderedGroup(2)
def __init__(self, level):
self._level = level
self._window = level.game.game.window
self._batch = pyglet.graphics.Batch()
self._animations = set()
self._sprites = {}
self._level_vlist = None
self._light_overlay = None
self._last_messages_view = LastMessagesView(level.game.message_log, self._window.width, self._window.height, batch=self._batch, group=self.GROUP_HUD)
self._hud = HUD(batch=self._batch, group=self.GROUP_HUD)
self._level_group = ZoomGroup(self.zoom, CameraGroup(self._window, self.zoom, self.GROUP_LEVEL))
self._digits_group = CameraGroup(self._window, self.zoom, self.GROUP_DIGITS)
self._memory = collections.defaultdict(list)
def update_player(self):
player_sprite = self._sprites[self._level.player]
self._digits_group.focus = player_sprite
self._level_group.parent.focus = player_sprite
self._hud.player = self._level.player
def render_level(self):
vertices = []
tex_coords = []
for x in xrange(self._level.size_x):
for y in xrange(self._level.size_y):
x1 = x * 8
x2 = x1 + 8
y1 = y * 8
y2 = y1 + 8
for entity in self._level.position_system.get_entities_at(x, y):
|
else:
continue
# always add floor, because we wanna draw walls above floor
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(floor_tex.tex_coords)
if tile == LayoutGenerator.TILE_WALL:
# if we got wall, draw it above floor
tex = get_wall_tex(self._level.get_wall_transition(x, y))
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(tex.tex_coords)
group = TextureGroup(dungeon_tex, pyglet.graphics.OrderedGroup(Position.ORDER_FLOOR, self._level_group))
self._level_vlist = self._batch.add(len(vertices) / 2, pyglet.gl.GL_QUADS, group,
('v2i/static', vertices),
('t3f/statc', tex_coords),
)
group = pyglet.graphics.OrderedGroup(Position.ORDER_PLAYER + 1, self._level_group)
self._light_overlay = LightOverlay(self._level.size_x, self._level.size_y, self._batch, group)
def update_light(self, old_lightmap, new_lightmap):
# for all changed cells
for key in set(old_lightmap).union(new_lightmap):
lit = key in new_lightmap
memory = self._memory[key]
# if cell is lit, add it to memory and clear all memory sprites, if there are any
if lit:
for sprite in memory:
sprite.delete()
memory[:] = []
# for every entity in cell
for entity in self._level.position_system.get_entities_at(*key):
# set in_fov flag
# TODO: this doesnt belong to rendering, but i don't want to loop twice
infov = entity.get(InFOV)
if infov:
infov.in_fov = key in new_lightmap
# if renderable, manage sprites/memory
renderable = entity.get(Renderable)
if not renderable:
continue
# if object is lit, show its sprite
sprite = self._sprites[entity]
if lit:
sprite.visible = True
else:
sprite.visible = False
# if it's memorable, add its current image to the memory
if renderable.memorable:
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(renderable.image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
memory.append(sprite)
# update light overlay
self._light_overlay.update_light(new_lightmap, self._memory)
def add_entity(self, entity):
image = entity.get(Renderable).image
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
self._sprites[entity] = sprite
entity.listen('image_change', self._on_image_change)
entity.listen('move', self._on_move)
def remove_entity(self, entity):
sprite = self._sprites.pop(entity)
sprite.delete()
entity.unlisten('image_change', self._on_image_change)
entity.unlisten('move', self._on_move)
def _on_image_change(self, entity):
self._sprites[entity].image = entity.get(Renderable).image
def _on_move(self, entity, old_x, old_y, new_x, new_y):
sprite = self._sprites[entity]
target_x = new_x * 8
target_y = new_y * 8
if not sprite.visible:
# don't animate invisible sprites
sprite.set_position(target_x, target_y)
else:
start_x = sprite.x
start_y = sprite.y
anim = Animation(0.25)
@anim.event
def on_update(animation, dt, sprite=sprite, dx=(target_x - start_x), dy=(target_y - start_y)):
ratio = animation.get_elapsed_ratio()
x = round(start_x + dx * ratio)
y = round(start_y + dy * ratio)
sprite.set_position(x, y)
@anim.event
def on_finish(animation, sprite=sprite):
sprite.set_position(target_x, target_y)
self.add_animation(anim)
def draw(self):
self._window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
self._ | renderable = entity.get(LayoutRenderable)
if renderable:
tile = renderable.tile
break | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.