file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
parser.js
var xhrgoform = require('xhrgoform'); var querystring = require('querystring'); var http = require('http'); var request = require('request'); var jsdom = require('jsdom'); var fs = require('fs'); var baseLink2013 = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=2013&VarRaceYearHighID=0"; function run(){ function callback (arg) { console.log("Yay"); console.log(arg); }; function error (err) { console.log("Oops"); console.log(err); } //runAllYearsSafe(error, callback); // Not recommended - eats your brains (memory) //runYearsSafe(error, callback, ['2001']); // One at a time - spits out a file every 1000 runners //stitchYearTogether('2001'); // Which must be stitched together //cleanYear('2001'); // And then cleaned, because I messed up //cleanAllYears(); // Or all cleaned at once stitchAllYearsTogether(pretty=true); // Stick 'em togther into one big file } run(); var YEARS = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013']; function fetchRunnersFromPage(error, callback, url, start, year) { var options = { 'url': url, 'form': { 'start':start, 'next':'Next 25 Records' }, 'headers': { 'User-Agent':"Rested/2009 CFNetwork/673.2.1 Darwin/13.1.0 (x86_64) (MacBookPro11%2C2)" } } request.post( options, function (err, httpResponse, body) { if (body == null) { callback([]); } jsdom.env( { html: body, scripts: [ 'http://code.jquery.com/jquery-1.5.min.js' ], done: function (err, window) { var $ = window.jQuery; var runners = []; var lastRunnerWithHeader; $($($('.tablegrid_table')[0]).find('tbody')[0]).find('tr').each(function(trIndex, row) { var c = $(row).attr('class'); if (c === 'tr_header') { var runner = parseRunnerHeader($, row); lastRunnerWithHeader = runner; //console.log(runner); } else { var runner = parseRunnerBody($, row, lastRunnerWithHeader); if (runner) { runner.year = year; runners.push(runner); } } }); callback(runners); } } ); } ); } function runYear(error, callback, year) { var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0"; var start = 1; var yearsRunners = []; runYearRecursive( error, callback, url, start, year ); } function runYearRecursive(error, callback, url, start, year) { fetchRunnersFromPage( function (err) { console.log(err); error(err); }, function (pagesRunners) { if (pagesRunners.length < 25) { // We're on the last page callback(pagesRunners); } else { // Go get the next page console.log(pagesRunners.length + " runners from start: " + start); runYearRecursive( error, function (recursiveRunners) { // add the next page's list onto the end of ours var runners = pagesRunners.concat(recursiveRunners); callback(runners); }, url, start + 25, year ); } }, url, start, year ); } function runYearSafe(error, callback, year) { var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0"; var start = 1; //var start = 21001; var yearsRunners = []; var outputFileNumber = 1; //var outputFileNumber = 22; function save () { saveRunners(yearsRunners, 'marathonResults' + year + '-' + outputFileNumber + '.json'); outputFileNumber += 1; yearsRunners = []; } function doNext (runners) { yearsRunners = yearsRunners.concat(runners); if (runners.length < 25) { // We're on the last page save(); callback(); } else { // See if we should save if (yearsRunners.length == 1000) { save(); } // Go get the next page start += 25; runYearSubproblem ( error, doNext, url, start, year ); } } runYearSubproblem ( error, doNext, url, start, year ); } function runYearSubproblem(error, callback, url, start, year) { fetchRunnersFromPage( function (err) { console.log(err); error(err); }, function (pagesRunners) { console.log(pagesRunners.length + " runners from subproblem start: " + start); callback(pagesRunners); }, url, start, year ); } function runAllYears(error, callback) { var currentYearIndex = 0; var years = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013'] var runners = []; function handleError (err) { currentYearIndex += 1; } function doNext() { if (currentYearIndex >= years.length) { console.log("Done"); callback(runners); return; } runYear( handleError, function (runners) { var thatYear = years[currentYearIndex]; console.log("### " + runners.length + " runners in year " + thatYear); currentYearIndex += 1; saveRunners(runners, 'marathonResults' + thatYear + '.json') doNext(); }, years[currentYearIndex] ); } doNext(); } function runYearsSafe(error, callback, years) { var currentYearIndex = 0; var runners = []; function handleError (err) { currentYearIndex += 1; } function doNext() { if (currentYearIndex >= years.length) { console.log("Done"); callback(runners); return; } runYearSafe( handleError, function () { currentYearIndex += 1; doNext(); }, years[currentYearIndex] ); } doNext(); } function runAllYearsSafe(error, callback) { runYearsSafe(error, callback, YEARS); } function saveRunners(runners, outputFilename, indent) { if (indent == undefined) { indent = 4; } console.log("Saving to " + outputFilename + "..."); try { fs.writeFile( outputFilename, JSON.stringify(runners, null, indent), function(err) { if(err) { console.log(err); } else { console.log("JSON saved to " + outputFilename + " with " + runners.length + " runners"); } } ); } catch (e) { console.log("ooops"); console.log("got in catch loop for " + outputFilename); console.log(e); } } function loadRunnersFromFile (fileName) { console.log("loading from " + fileName + "..."); var runners = JSON.parse(fs.readFileSync(fileName, 'utf8')); console.log("Loaded " + runners.length + " runners from " + fileName); return runners; } function loadRunnersFromYear (year) { var fileName = 'marathonResults' + year + '.json'; var runners = loadRunnersFromFile(fileName); return runners; } function stitchYearTogether (year) { fileNamesArray = fs.readdirSync('./'); fileNames = {}; for (var i = fileNamesArray.length - 1; i >= 0; i--) { fileNames[fileNamesArray[i]] = true; }; var runners = []; var outputFileNumber = 1; while (true) { var nextName = 'marathonResults' + year + '-' + outputFileNumber + '.json'; console.log(nextName); if (nextName in fileNames) { var obj = loadRunnersFromFile(nextName); runners = runners.concat(obj); outputFileNumber += 1; } else { break; } } var outputFilename = 'marathonResults' + year + '.json'; saveRunners(runners, outputFilename); } function
(pretty) { var years = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013']; var runners = []; for (var i = 0; i < years.length; i++) { var year = years[i]; var nextName = 'marathonResults' + year + "+clean.json"; var theseRunners = loadRunnersFromFile(nextName); runners = runners.concat(theseRunners); }; var outputFilename = 'marathonResults.json'; var indent; if (pretty) { indent = 4; outputFilename = 'marathonResults+pretty.json'; } else { indent = 0; } saveRunners(runners, outputFilename, indent); } function cleanAllYears() { var years = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013']; for (var i = 0; i < years.length; i++) { var year = years[i]; cleanYear(year); }; } function cleanYear (year) { var runners = loadRunnersFromYear(year); cleanRunners(runners); saveRunners(runners, 'marathonResults' + year + "+clean.json"); } function cleanRunners (runners) { for (var i = runners.length - 1; i >= 0; i--) { cleanRunner(runners[i]); }; } // "overallPlace": "8566 / 22672", // "genderPlace": "6661 / 13120", // "divisionPlace": "237 / 1118", function cleanRunner (runner) { splitOnSlashMakeIntsAndSaveIntoObjectAtKeys( runner.overallPlace, runner, "overallPlace", "overallTotal" ); splitOnSlashMakeIntsAndSaveIntoObjectAtKeys( runner.genderPlace, runner, "genderPlace", "genderTotal" ); splitOnSlashMakeIntsAndSaveIntoObjectAtKeys( runner.divisionPlace, runner, "divisionPlace", "divisionTotal" ); } function splitOnSlashMakeIntsAndSaveIntoObjectAtKeys (value, object, numeratorKey, denominatorKey) { parts = value.replace(/ /g,'').split('/'); var numerator, denominator; if (parts.length != 2) { console.log("oh crap: " + value + " of " + numeratorKey + " and " + denominatorKey + " on " + JSON.stringify(object)); numerator = null; denominator = null; } else { numerator = parseInt(parts[0]); denominator = parseInt(parts[1]); } object[numeratorKey] = numerator; object[denominatorKey] = denominator; return object; } var headerColumns = ["year","bib","name","age","gender","city","state","country"]; function parseRunnerHeader($, headerRow) { var runner = {}; $(headerRow).find('td').each( function (index, cell) { if (index >= headerColumns.length) return; var property = headerColumns[index]; var value = trim(cell.innerHTML); runner[property] = value; } ); return runner; } var bodyColumns = ["overallPlace","genderPlace","divisionPlace","officialTime","netTime"]; function parseRunnerBody($, bodyRow, runner) { var isActualData = false; $($(bodyRow).find('tr')[1]).find('td').each( function (index, cell) { if (index >= bodyColumns.length) return; var property = bodyColumns[index]; var value = trim(cell.innerHTML); runner[property] = value; isActualData = true; } ); if (!isActualData) { return null; } else { return runner; } } function trim(string) { result = string.trim(); if (result === "&nbsp;") return null; return result; }
stitchAllYearsTogether
identifier_name
klogd.rs
use crate::libbb::ptr_to_globals::bb_errno; use libc; use libc::openlog; use libc::syslog; extern "C" { #[no_mangle] fn strtoul( __nptr: *const libc::c_char, __endptr: *mut *mut libc::c_char, __base: libc::c_int, ) -> libc::c_ulong; #[no_mangle] fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t; #[no_mangle] fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char; #[no_mangle] fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int; #[no_mangle] fn bb_signals_recursive_norestart( sigs: libc::c_int, f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>, ); #[no_mangle] fn kill_myself_with_sig(sig: libc::c_int) -> !; #[no_mangle] static mut bb_got_signal: smallint; #[no_mangle] fn record_signo(signo: libc::c_int); #[no_mangle] fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char); #[no_mangle] fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint; #[no_mangle] fn bb_daemonize_or_rexec(flags: libc::c_int); #[no_mangle] fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn write_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] fn remove_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] static mut logmode: smallint; #[no_mangle] fn bb_simple_perror_msg(s: *const libc::c_char); #[no_mangle] static bb_banner: [libc::c_char; 0]; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } use crate::librb::signal::__sighandler_t; use crate::librb::smallint; pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; pub type C2RustUnnamed_0 = libc::c_uint; pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8; pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4; pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2; pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1; pub type C2RustUnnamed_1 = libc::c_uint; pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3; pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2; pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1; pub const LOGMODE_NONE: C2RustUnnamed_1 = 0; pub type C2RustUnnamed_2 = libc::c_uint; pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2; pub const OPT_LEVEL: C2RustUnnamed_2 = 1; pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024; /* * Mini klogd implementation for busybox * * Copyright (C) 2001 by Gennady Feldman <gfeldman@gena01.com>. * Changes: Made this a standalone busybox module which uses standalone * syslog() client interface. * * Copyright (C) 1999-2004 by Erik Andersen <andersen@codepoet.org> * * Copyright (C) 2000 by Karl M. Hegbloom <karlheg@debian.org> * * "circular buffer" Copyright (C) 2000 by Gennady Feldman <gfeldman@gena01.com> * * Maintainer: Gennady Feldman <gfeldman@gena01.com> as of Mar 12, 2001 * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //config:config KLOGD //config: bool "klogd (5.7 kb)" //config: default y //config: help //config: klogd is a utility which intercepts and logs all //config: messages from the Linux kernel and sends the messages //config: out to the 'syslogd' utility so they can be logged. If //config: you wish to record the messages produced by the kernel, //config: you should enable this option. //config: //config:comment "klogd should not be used together with syslog to kernel printk buffer" //config: depends on KLOGD && FEATURE_KMSG_SYSLOG //config: //config:config FEATURE_KLOGD_KLOGCTL //config: bool "Use the klogctl() interface" //config: default y //config: depends on KLOGD //config: select PLATFORM_LINUX //config: help //config: The klogd applet supports two interfaces for reading //config: kernel messages. Linux provides the klogctl() interface //config: which allows reading messages from the kernel ring buffer //config: independently from the file system. //config: //config: If you answer 'N' here, klogd will use the more portable //config: approach of reading them from /proc or a device node. //config: However, this method requires the file to be available. //config: //config: If in doubt, say 'Y'. //applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP)) //kbuild:lib-$(CONFIG_KLOGD) += klogd.o //usage:#define klogd_trivial_usage //usage: "[-c N] [-n]" //usage:#define klogd_full_usage "\n\n" //usage: "Log kernel messages to syslog\n" //usage: "\n -c N Print to console messages more urgent than prio N (1-8)" //usage: "\n -n Run in foreground" /* The Linux-specific klogctl(3) interface does not rely on the filesystem and * allows us to change the console loglevel. Alternatively, we read the * messages from _PATH_KLOG. */ unsafe extern "C" fn klogd_open() { /* "Open the log. Currently a NOP" */ klogctl(1i32, 0 as *mut libc::c_char, 0i32); } unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) { /* "printk() prints a message on the console only if it has a loglevel * less than console_loglevel". Here we set console_loglevel = lvl. */ klogctl(8i32, 0 as *mut libc::c_char, lvl); } unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int { /* "2 -- Read from the log." */ return klogctl(2i32, bufp, len); } unsafe extern "C" fn klogd_close() { /* FYI: cmd 7 is equivalent to setting console_loglevel to 7 * via klogctl(8, NULL, 7). */ klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */ klogctl(0i32, 0 as *mut libc::c_char, 0i32); /* "0 -- Close the log. Currently a NOP" */ } /* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead, * because that's how they interpret word "default" * in the openlog() manpage: * LOG_USER (default) * generic user-level messages * and the fact that LOG_KERN is a constant 0. * glibc interprets it as "0 in openlog() call means 'use default'". * I think it means "if openlog wasn't called before syslog() is called, * use default". * Convincing glibc maintainers otherwise is, as usual, nearly impossible. * Should we open-code syslog() here to use correct facility? */ #[no_mangle] pub unsafe extern "C" fn
( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut i: libc::c_int = 0i32; let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char; let mut opt: libc::c_int = 0; let mut used: libc::c_int = 0; opt = getopt32( argv, b"c:n\x00" as *const u8 as *const libc::c_char, &mut opt_c as *mut *mut libc::c_char, ) as libc::c_int; if opt & OPT_LEVEL as libc::c_int != 0 { /* Valid levels are between 1 and 8 */ i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int } if opt & OPT_FOREGROUND as libc::c_int == 0 { bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int); } logmode = LOGMODE_SYSLOG as libc::c_int as smallint; /* klogd_open() before openlog(), since it might use fixed fd 3, * and openlog() also may use the same fd 3 if we swap them: */ klogd_open(); openlog( b"kernel\x00" as *const u8 as *const libc::c_char, 0i32, 0i32 << 3i32, ); /* * glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER * above. The logic behind this is that standard * http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html * says the following about openlog and syslog: * "LOG_USER * Messages generated by arbitrary processes. * This is the default facility identifier if none is specified." * * I believe glibc misinterpreted this text as "if openlog's * third parameter is 0 (=LOG_KERN), treat it as LOG_USER". * Whereas it was meant to say "if *syslog* is called with facility * 0 in its 1st parameter without prior call to openlog, then perform * implicit openlog(LOG_USER)". * * As a result of this, eh, feature, standard klogd was forced * to open-code its own openlog and syslog implementation (!). * * Note that prohibiting openlog(LOG_KERN) on libc level does not * add any security: any process can open a socket to "/dev/log" * and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message" * * Google code search tells me there is no widespread use of * openlog("foo", 0, 0), thus fixing glibc won't break userspace. * * The bug against glibc was filed: * bugzilla.redhat.com/show_bug.cgi?id=547000 */ if i != 0 { klogd_setloglevel(i); } signal( 1i32, ::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t), ); /* We want klogd_read to not be restarted, thus _norestart: */ bb_signals_recursive_norestart( BB_FATAL_SIGS as libc::c_int, Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()), ); syslog( 5i32, b"klogd started: %s\x00" as *const u8 as *const libc::c_char, bb_banner.as_ptr(), ); write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); used = 0i32; while bb_got_signal == 0 { let mut n: libc::c_int = 0; let mut priority: libc::c_int = 0; let mut start: *mut libc::c_char = 0 as *mut libc::c_char; start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize); n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used); if n < 0i32 { if *bb_errno == 4i32 { continue; } bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char); break; } else { *start.offset(n as isize) = '\u{0}' as i32 as libc::c_char; /* Process each newline-terminated line in the buffer */ start = bb_common_bufsiz1.as_mut_ptr(); loop { let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32); if *newline as libc::c_int == '\u{0}' as i32 { /* This line is incomplete */ /* move it to the front of the buffer */ overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start); used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int; if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 { break; } /* buffer is full, log it anyway */ used = 0i32; newline = 0 as *mut libc::c_char } else { let fresh0 = newline; newline = newline.offset(1); *fresh0 = '\u{0}' as i32 as libc::c_char } /* Extract the priority */ priority = 6i32; if *start as libc::c_int == '<' as i32 { start = start.offset(1); if *start != 0 { let mut end: *mut libc::c_char = 0 as *mut libc::c_char; priority = strtoul(start, &mut end, 10i32) as libc::c_int; if *end as libc::c_int == '>' as i32 { end = end.offset(1) } start = end } } /* Log (only non-empty lines) */ if *start != 0 { syslog( priority, b"%s\x00" as *const u8 as *const libc::c_char, start, ); } if newline.is_null() { break; } start = newline } } } klogd_close(); syslog( 5i32, b"klogd: exiting\x00" as *const u8 as *const libc::c_char, ); remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); if bb_got_signal != 0 { kill_myself_with_sig(bb_got_signal as libc::c_int); } return 1i32; }
klogd_main
identifier_name
klogd.rs
use crate::libbb::ptr_to_globals::bb_errno; use libc; use libc::openlog; use libc::syslog; extern "C" { #[no_mangle] fn strtoul( __nptr: *const libc::c_char, __endptr: *mut *mut libc::c_char, __base: libc::c_int, ) -> libc::c_ulong; #[no_mangle] fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t; #[no_mangle] fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char; #[no_mangle] fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int; #[no_mangle] fn bb_signals_recursive_norestart( sigs: libc::c_int, f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>, ); #[no_mangle] fn kill_myself_with_sig(sig: libc::c_int) -> !; #[no_mangle] static mut bb_got_signal: smallint; #[no_mangle] fn record_signo(signo: libc::c_int); #[no_mangle] fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char); #[no_mangle] fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint; #[no_mangle] fn bb_daemonize_or_rexec(flags: libc::c_int); #[no_mangle]
fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn write_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] fn remove_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] static mut logmode: smallint; #[no_mangle] fn bb_simple_perror_msg(s: *const libc::c_char); #[no_mangle] static bb_banner: [libc::c_char; 0]; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } use crate::librb::signal::__sighandler_t; use crate::librb::smallint; pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; pub type C2RustUnnamed_0 = libc::c_uint; pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8; pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4; pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2; pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1; pub type C2RustUnnamed_1 = libc::c_uint; pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3; pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2; pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1; pub const LOGMODE_NONE: C2RustUnnamed_1 = 0; pub type C2RustUnnamed_2 = libc::c_uint; pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2; pub const OPT_LEVEL: C2RustUnnamed_2 = 1; pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024; /* * Mini klogd implementation for busybox * * Copyright (C) 2001 by Gennady Feldman <gfeldman@gena01.com>. * Changes: Made this a standalone busybox module which uses standalone * syslog() client interface. * * Copyright (C) 1999-2004 by Erik Andersen <andersen@codepoet.org> * * Copyright (C) 2000 by Karl M. Hegbloom <karlheg@debian.org> * * "circular buffer" Copyright (C) 2000 by Gennady Feldman <gfeldman@gena01.com> * * Maintainer: Gennady Feldman <gfeldman@gena01.com> as of Mar 12, 2001 * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //config:config KLOGD //config: bool "klogd (5.7 kb)" //config: default y //config: help //config: klogd is a utility which intercepts and logs all //config: messages from the Linux kernel and sends the messages //config: out to the 'syslogd' utility so they can be logged. If //config: you wish to record the messages produced by the kernel, //config: you should enable this option. //config: //config:comment "klogd should not be used together with syslog to kernel printk buffer" //config: depends on KLOGD && FEATURE_KMSG_SYSLOG //config: //config:config FEATURE_KLOGD_KLOGCTL //config: bool "Use the klogctl() interface" //config: default y //config: depends on KLOGD //config: select PLATFORM_LINUX //config: help //config: The klogd applet supports two interfaces for reading //config: kernel messages. Linux provides the klogctl() interface //config: which allows reading messages from the kernel ring buffer //config: independently from the file system. //config: //config: If you answer 'N' here, klogd will use the more portable //config: approach of reading them from /proc or a device node. //config: However, this method requires the file to be available. //config: //config: If in doubt, say 'Y'. //applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP)) //kbuild:lib-$(CONFIG_KLOGD) += klogd.o //usage:#define klogd_trivial_usage //usage: "[-c N] [-n]" //usage:#define klogd_full_usage "\n\n" //usage: "Log kernel messages to syslog\n" //usage: "\n -c N Print to console messages more urgent than prio N (1-8)" //usage: "\n -n Run in foreground" /* The Linux-specific klogctl(3) interface does not rely on the filesystem and * allows us to change the console loglevel. Alternatively, we read the * messages from _PATH_KLOG. */ unsafe extern "C" fn klogd_open() { /* "Open the log. Currently a NOP" */ klogctl(1i32, 0 as *mut libc::c_char, 0i32); } unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) { /* "printk() prints a message on the console only if it has a loglevel * less than console_loglevel". Here we set console_loglevel = lvl. */ klogctl(8i32, 0 as *mut libc::c_char, lvl); } unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int { /* "2 -- Read from the log." */ return klogctl(2i32, bufp, len); } unsafe extern "C" fn klogd_close() { /* FYI: cmd 7 is equivalent to setting console_loglevel to 7 * via klogctl(8, NULL, 7). */ klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */ klogctl(0i32, 0 as *mut libc::c_char, 0i32); /* "0 -- Close the log. Currently a NOP" */ } /* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead, * because that's how they interpret word "default" * in the openlog() manpage: * LOG_USER (default) * generic user-level messages * and the fact that LOG_KERN is a constant 0. * glibc interprets it as "0 in openlog() call means 'use default'". * I think it means "if openlog wasn't called before syslog() is called, * use default". * Convincing glibc maintainers otherwise is, as usual, nearly impossible. * Should we open-code syslog() here to use correct facility? */ #[no_mangle] pub unsafe extern "C" fn klogd_main( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut i: libc::c_int = 0i32; let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char; let mut opt: libc::c_int = 0; let mut used: libc::c_int = 0; opt = getopt32( argv, b"c:n\x00" as *const u8 as *const libc::c_char, &mut opt_c as *mut *mut libc::c_char, ) as libc::c_int; if opt & OPT_LEVEL as libc::c_int != 0 { /* Valid levels are between 1 and 8 */ i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int } if opt & OPT_FOREGROUND as libc::c_int == 0 { bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int); } logmode = LOGMODE_SYSLOG as libc::c_int as smallint; /* klogd_open() before openlog(), since it might use fixed fd 3, * and openlog() also may use the same fd 3 if we swap them: */ klogd_open(); openlog( b"kernel\x00" as *const u8 as *const libc::c_char, 0i32, 0i32 << 3i32, ); /* * glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER * above. The logic behind this is that standard * http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html * says the following about openlog and syslog: * "LOG_USER * Messages generated by arbitrary processes. * This is the default facility identifier if none is specified." * * I believe glibc misinterpreted this text as "if openlog's * third parameter is 0 (=LOG_KERN), treat it as LOG_USER". * Whereas it was meant to say "if *syslog* is called with facility * 0 in its 1st parameter without prior call to openlog, then perform * implicit openlog(LOG_USER)". * * As a result of this, eh, feature, standard klogd was forced * to open-code its own openlog and syslog implementation (!). * * Note that prohibiting openlog(LOG_KERN) on libc level does not * add any security: any process can open a socket to "/dev/log" * and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message" * * Google code search tells me there is no widespread use of * openlog("foo", 0, 0), thus fixing glibc won't break userspace. * * The bug against glibc was filed: * bugzilla.redhat.com/show_bug.cgi?id=547000 */ if i != 0 { klogd_setloglevel(i); } signal( 1i32, ::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t), ); /* We want klogd_read to not be restarted, thus _norestart: */ bb_signals_recursive_norestart( BB_FATAL_SIGS as libc::c_int, Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()), ); syslog( 5i32, b"klogd started: %s\x00" as *const u8 as *const libc::c_char, bb_banner.as_ptr(), ); write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); used = 0i32; while bb_got_signal == 0 { let mut n: libc::c_int = 0; let mut priority: libc::c_int = 0; let mut start: *mut libc::c_char = 0 as *mut libc::c_char; start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize); n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used); if n < 0i32 { if *bb_errno == 4i32 { continue; } bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char); break; } else { *start.offset(n as isize) = '\u{0}' as i32 as libc::c_char; /* Process each newline-terminated line in the buffer */ start = bb_common_bufsiz1.as_mut_ptr(); loop { let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32); if *newline as libc::c_int == '\u{0}' as i32 { /* This line is incomplete */ /* move it to the front of the buffer */ overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start); used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int; if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 { break; } /* buffer is full, log it anyway */ used = 0i32; newline = 0 as *mut libc::c_char } else { let fresh0 = newline; newline = newline.offset(1); *fresh0 = '\u{0}' as i32 as libc::c_char } /* Extract the priority */ priority = 6i32; if *start as libc::c_int == '<' as i32 { start = start.offset(1); if *start != 0 { let mut end: *mut libc::c_char = 0 as *mut libc::c_char; priority = strtoul(start, &mut end, 10i32) as libc::c_int; if *end as libc::c_int == '>' as i32 { end = end.offset(1) } start = end } } /* Log (only non-empty lines) */ if *start != 0 { syslog( priority, b"%s\x00" as *const u8 as *const libc::c_char, start, ); } if newline.is_null() { break; } start = newline } } } klogd_close(); syslog( 5i32, b"klogd: exiting\x00" as *const u8 as *const libc::c_char, ); remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); if bb_got_signal != 0 { kill_myself_with_sig(bb_got_signal as libc::c_int); } return 1i32; }
random_line_split
klogd.rs
use crate::libbb::ptr_to_globals::bb_errno; use libc; use libc::openlog; use libc::syslog; extern "C" { #[no_mangle] fn strtoul( __nptr: *const libc::c_char, __endptr: *mut *mut libc::c_char, __base: libc::c_int, ) -> libc::c_ulong; #[no_mangle] fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t; #[no_mangle] fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char; #[no_mangle] fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int; #[no_mangle] fn bb_signals_recursive_norestart( sigs: libc::c_int, f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>, ); #[no_mangle] fn kill_myself_with_sig(sig: libc::c_int) -> !; #[no_mangle] static mut bb_got_signal: smallint; #[no_mangle] fn record_signo(signo: libc::c_int); #[no_mangle] fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char); #[no_mangle] fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint; #[no_mangle] fn bb_daemonize_or_rexec(flags: libc::c_int); #[no_mangle] fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn write_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] fn remove_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] static mut logmode: smallint; #[no_mangle] fn bb_simple_perror_msg(s: *const libc::c_char); #[no_mangle] static bb_banner: [libc::c_char; 0]; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } use crate::librb::signal::__sighandler_t; use crate::librb::smallint; pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; pub type C2RustUnnamed_0 = libc::c_uint; pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8; pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4; pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2; pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1; pub type C2RustUnnamed_1 = libc::c_uint; pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3; pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2; pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1; pub const LOGMODE_NONE: C2RustUnnamed_1 = 0; pub type C2RustUnnamed_2 = libc::c_uint; pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2; pub const OPT_LEVEL: C2RustUnnamed_2 = 1; pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024; /* * Mini klogd implementation for busybox * * Copyright (C) 2001 by Gennady Feldman <gfeldman@gena01.com>. * Changes: Made this a standalone busybox module which uses standalone * syslog() client interface. * * Copyright (C) 1999-2004 by Erik Andersen <andersen@codepoet.org> * * Copyright (C) 2000 by Karl M. Hegbloom <karlheg@debian.org> * * "circular buffer" Copyright (C) 2000 by Gennady Feldman <gfeldman@gena01.com> * * Maintainer: Gennady Feldman <gfeldman@gena01.com> as of Mar 12, 2001 * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //config:config KLOGD //config: bool "klogd (5.7 kb)" //config: default y //config: help //config: klogd is a utility which intercepts and logs all //config: messages from the Linux kernel and sends the messages //config: out to the 'syslogd' utility so they can be logged. If //config: you wish to record the messages produced by the kernel, //config: you should enable this option. //config: //config:comment "klogd should not be used together with syslog to kernel printk buffer" //config: depends on KLOGD && FEATURE_KMSG_SYSLOG //config: //config:config FEATURE_KLOGD_KLOGCTL //config: bool "Use the klogctl() interface" //config: default y //config: depends on KLOGD //config: select PLATFORM_LINUX //config: help //config: The klogd applet supports two interfaces for reading //config: kernel messages. Linux provides the klogctl() interface //config: which allows reading messages from the kernel ring buffer //config: independently from the file system. //config: //config: If you answer 'N' here, klogd will use the more portable //config: approach of reading them from /proc or a device node. //config: However, this method requires the file to be available. //config: //config: If in doubt, say 'Y'. //applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP)) //kbuild:lib-$(CONFIG_KLOGD) += klogd.o //usage:#define klogd_trivial_usage //usage: "[-c N] [-n]" //usage:#define klogd_full_usage "\n\n" //usage: "Log kernel messages to syslog\n" //usage: "\n -c N Print to console messages more urgent than prio N (1-8)" //usage: "\n -n Run in foreground" /* The Linux-specific klogctl(3) interface does not rely on the filesystem and * allows us to change the console loglevel. Alternatively, we read the * messages from _PATH_KLOG. */ unsafe extern "C" fn klogd_open() { /* "Open the log. Currently a NOP" */ klogctl(1i32, 0 as *mut libc::c_char, 0i32); } unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) { /* "printk() prints a message on the console only if it has a loglevel * less than console_loglevel". Here we set console_loglevel = lvl. */ klogctl(8i32, 0 as *mut libc::c_char, lvl); } unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int
unsafe extern "C" fn klogd_close() { /* FYI: cmd 7 is equivalent to setting console_loglevel to 7 * via klogctl(8, NULL, 7). */ klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */ klogctl(0i32, 0 as *mut libc::c_char, 0i32); /* "0 -- Close the log. Currently a NOP" */ } /* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead, * because that's how they interpret word "default" * in the openlog() manpage: * LOG_USER (default) * generic user-level messages * and the fact that LOG_KERN is a constant 0. * glibc interprets it as "0 in openlog() call means 'use default'". * I think it means "if openlog wasn't called before syslog() is called, * use default". * Convincing glibc maintainers otherwise is, as usual, nearly impossible. * Should we open-code syslog() here to use correct facility? */ #[no_mangle] pub unsafe extern "C" fn klogd_main( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut i: libc::c_int = 0i32; let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char; let mut opt: libc::c_int = 0; let mut used: libc::c_int = 0; opt = getopt32( argv, b"c:n\x00" as *const u8 as *const libc::c_char, &mut opt_c as *mut *mut libc::c_char, ) as libc::c_int; if opt & OPT_LEVEL as libc::c_int != 0 { /* Valid levels are between 1 and 8 */ i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int } if opt & OPT_FOREGROUND as libc::c_int == 0 { bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int); } logmode = LOGMODE_SYSLOG as libc::c_int as smallint; /* klogd_open() before openlog(), since it might use fixed fd 3, * and openlog() also may use the same fd 3 if we swap them: */ klogd_open(); openlog( b"kernel\x00" as *const u8 as *const libc::c_char, 0i32, 0i32 << 3i32, ); /* * glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER * above. The logic behind this is that standard * http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html * says the following about openlog and syslog: * "LOG_USER * Messages generated by arbitrary processes. * This is the default facility identifier if none is specified." * * I believe glibc misinterpreted this text as "if openlog's * third parameter is 0 (=LOG_KERN), treat it as LOG_USER". * Whereas it was meant to say "if *syslog* is called with facility * 0 in its 1st parameter without prior call to openlog, then perform * implicit openlog(LOG_USER)". * * As a result of this, eh, feature, standard klogd was forced * to open-code its own openlog and syslog implementation (!). * * Note that prohibiting openlog(LOG_KERN) on libc level does not * add any security: any process can open a socket to "/dev/log" * and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message" * * Google code search tells me there is no widespread use of * openlog("foo", 0, 0), thus fixing glibc won't break userspace. * * The bug against glibc was filed: * bugzilla.redhat.com/show_bug.cgi?id=547000 */ if i != 0 { klogd_setloglevel(i); } signal( 1i32, ::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t), ); /* We want klogd_read to not be restarted, thus _norestart: */ bb_signals_recursive_norestart( BB_FATAL_SIGS as libc::c_int, Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()), ); syslog( 5i32, b"klogd started: %s\x00" as *const u8 as *const libc::c_char, bb_banner.as_ptr(), ); write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); used = 0i32; while bb_got_signal == 0 { let mut n: libc::c_int = 0; let mut priority: libc::c_int = 0; let mut start: *mut libc::c_char = 0 as *mut libc::c_char; start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize); n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used); if n < 0i32 { if *bb_errno == 4i32 { continue; } bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char); break; } else { *start.offset(n as isize) = '\u{0}' as i32 as libc::c_char; /* Process each newline-terminated line in the buffer */ start = bb_common_bufsiz1.as_mut_ptr(); loop { let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32); if *newline as libc::c_int == '\u{0}' as i32 { /* This line is incomplete */ /* move it to the front of the buffer */ overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start); used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int; if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 { break; } /* buffer is full, log it anyway */ used = 0i32; newline = 0 as *mut libc::c_char } else { let fresh0 = newline; newline = newline.offset(1); *fresh0 = '\u{0}' as i32 as libc::c_char } /* Extract the priority */ priority = 6i32; if *start as libc::c_int == '<' as i32 { start = start.offset(1); if *start != 0 { let mut end: *mut libc::c_char = 0 as *mut libc::c_char; priority = strtoul(start, &mut end, 10i32) as libc::c_int; if *end as libc::c_int == '>' as i32 { end = end.offset(1) } start = end } } /* Log (only non-empty lines) */ if *start != 0 { syslog( priority, b"%s\x00" as *const u8 as *const libc::c_char, start, ); } if newline.is_null() { break; } start = newline } } } klogd_close(); syslog( 5i32, b"klogd: exiting\x00" as *const u8 as *const libc::c_char, ); remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); if bb_got_signal != 0 { kill_myself_with_sig(bb_got_signal as libc::c_int); } return 1i32; }
{ /* "2 -- Read from the log." */ return klogctl(2i32, bufp, len); }
identifier_body
klogd.rs
use crate::libbb::ptr_to_globals::bb_errno; use libc; use libc::openlog; use libc::syslog; extern "C" { #[no_mangle] fn strtoul( __nptr: *const libc::c_char, __endptr: *mut *mut libc::c_char, __base: libc::c_int, ) -> libc::c_ulong; #[no_mangle] fn signal(__sig: libc::c_int, __handler: __sighandler_t) -> __sighandler_t; #[no_mangle] fn strchrnul(__s: *const libc::c_char, __c: libc::c_int) -> *mut libc::c_char; #[no_mangle] fn klogctl(type_0: libc::c_int, b: *mut libc::c_char, len: libc::c_int) -> libc::c_int; #[no_mangle] fn bb_signals_recursive_norestart( sigs: libc::c_int, f: Option<unsafe extern "C" fn(_: libc::c_int) -> ()>, ); #[no_mangle] fn kill_myself_with_sig(sig: libc::c_int) -> !; #[no_mangle] static mut bb_got_signal: smallint; #[no_mangle] fn record_signo(signo: libc::c_int); #[no_mangle] fn overlapping_strcpy(dst: *mut libc::c_char, src: *const libc::c_char); #[no_mangle] fn xatou_range(str: *const libc::c_char, l: libc::c_uint, u: libc::c_uint) -> libc::c_uint; #[no_mangle] fn bb_daemonize_or_rexec(flags: libc::c_int); #[no_mangle] fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32; #[no_mangle] fn write_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] fn remove_pidfile_std_path_and_ext(path: *const libc::c_char); #[no_mangle] static mut logmode: smallint; #[no_mangle] fn bb_simple_perror_msg(s: *const libc::c_char); #[no_mangle] static bb_banner: [libc::c_char; 0]; #[no_mangle] static mut bb_common_bufsiz1: [libc::c_char; 0]; } use crate::librb::signal::__sighandler_t; use crate::librb::smallint; pub type C2RustUnnamed = libc::c_uint; pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054; pub type C2RustUnnamed_0 = libc::c_uint; pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8; pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4; pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2; pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1; pub type C2RustUnnamed_1 = libc::c_uint; pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3; pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2; pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1; pub const LOGMODE_NONE: C2RustUnnamed_1 = 0; pub type C2RustUnnamed_2 = libc::c_uint; pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2; pub const OPT_LEVEL: C2RustUnnamed_2 = 1; pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024; /* * Mini klogd implementation for busybox * * Copyright (C) 2001 by Gennady Feldman <gfeldman@gena01.com>. * Changes: Made this a standalone busybox module which uses standalone * syslog() client interface. * * Copyright (C) 1999-2004 by Erik Andersen <andersen@codepoet.org> * * Copyright (C) 2000 by Karl M. Hegbloom <karlheg@debian.org> * * "circular buffer" Copyright (C) 2000 by Gennady Feldman <gfeldman@gena01.com> * * Maintainer: Gennady Feldman <gfeldman@gena01.com> as of Mar 12, 2001 * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //config:config KLOGD //config: bool "klogd (5.7 kb)" //config: default y //config: help //config: klogd is a utility which intercepts and logs all //config: messages from the Linux kernel and sends the messages //config: out to the 'syslogd' utility so they can be logged. If //config: you wish to record the messages produced by the kernel, //config: you should enable this option. //config: //config:comment "klogd should not be used together with syslog to kernel printk buffer" //config: depends on KLOGD && FEATURE_KMSG_SYSLOG //config: //config:config FEATURE_KLOGD_KLOGCTL //config: bool "Use the klogctl() interface" //config: default y //config: depends on KLOGD //config: select PLATFORM_LINUX //config: help //config: The klogd applet supports two interfaces for reading //config: kernel messages. Linux provides the klogctl() interface //config: which allows reading messages from the kernel ring buffer //config: independently from the file system. //config: //config: If you answer 'N' here, klogd will use the more portable //config: approach of reading them from /proc or a device node. //config: However, this method requires the file to be available. //config: //config: If in doubt, say 'Y'. //applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP)) //kbuild:lib-$(CONFIG_KLOGD) += klogd.o //usage:#define klogd_trivial_usage //usage: "[-c N] [-n]" //usage:#define klogd_full_usage "\n\n" //usage: "Log kernel messages to syslog\n" //usage: "\n -c N Print to console messages more urgent than prio N (1-8)" //usage: "\n -n Run in foreground" /* The Linux-specific klogctl(3) interface does not rely on the filesystem and * allows us to change the console loglevel. Alternatively, we read the * messages from _PATH_KLOG. */ unsafe extern "C" fn klogd_open() { /* "Open the log. Currently a NOP" */ klogctl(1i32, 0 as *mut libc::c_char, 0i32); } unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) { /* "printk() prints a message on the console only if it has a loglevel * less than console_loglevel". Here we set console_loglevel = lvl. */ klogctl(8i32, 0 as *mut libc::c_char, lvl); } unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int { /* "2 -- Read from the log." */ return klogctl(2i32, bufp, len); } unsafe extern "C" fn klogd_close() { /* FYI: cmd 7 is equivalent to setting console_loglevel to 7 * via klogctl(8, NULL, 7). */ klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */ klogctl(0i32, 0 as *mut libc::c_char, 0i32); /* "0 -- Close the log. Currently a NOP" */ } /* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead, * because that's how they interpret word "default" * in the openlog() manpage: * LOG_USER (default) * generic user-level messages * and the fact that LOG_KERN is a constant 0. * glibc interprets it as "0 in openlog() call means 'use default'". * I think it means "if openlog wasn't called before syslog() is called, * use default". * Convincing glibc maintainers otherwise is, as usual, nearly impossible. * Should we open-code syslog() here to use correct facility? */ #[no_mangle] pub unsafe extern "C" fn klogd_main( mut _argc: libc::c_int, mut argv: *mut *mut libc::c_char, ) -> libc::c_int { let mut i: libc::c_int = 0i32; let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char; let mut opt: libc::c_int = 0; let mut used: libc::c_int = 0; opt = getopt32( argv, b"c:n\x00" as *const u8 as *const libc::c_char, &mut opt_c as *mut *mut libc::c_char, ) as libc::c_int; if opt & OPT_LEVEL as libc::c_int != 0 { /* Valid levels are between 1 and 8 */ i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int } if opt & OPT_FOREGROUND as libc::c_int == 0 { bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int); } logmode = LOGMODE_SYSLOG as libc::c_int as smallint; /* klogd_open() before openlog(), since it might use fixed fd 3, * and openlog() also may use the same fd 3 if we swap them: */ klogd_open(); openlog( b"kernel\x00" as *const u8 as *const libc::c_char, 0i32, 0i32 << 3i32, ); /* * glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER * above. The logic behind this is that standard * http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html * says the following about openlog and syslog: * "LOG_USER * Messages generated by arbitrary processes. * This is the default facility identifier if none is specified." * * I believe glibc misinterpreted this text as "if openlog's * third parameter is 0 (=LOG_KERN), treat it as LOG_USER". * Whereas it was meant to say "if *syslog* is called with facility * 0 in its 1st parameter without prior call to openlog, then perform * implicit openlog(LOG_USER)". * * As a result of this, eh, feature, standard klogd was forced * to open-code its own openlog and syslog implementation (!). * * Note that prohibiting openlog(LOG_KERN) on libc level does not * add any security: any process can open a socket to "/dev/log" * and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message" * * Google code search tells me there is no widespread use of * openlog("foo", 0, 0), thus fixing glibc won't break userspace. * * The bug against glibc was filed: * bugzilla.redhat.com/show_bug.cgi?id=547000 */ if i != 0 { klogd_setloglevel(i); } signal( 1i32, ::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t), ); /* We want klogd_read to not be restarted, thus _norestart: */ bb_signals_recursive_norestart( BB_FATAL_SIGS as libc::c_int, Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()), ); syslog( 5i32, b"klogd started: %s\x00" as *const u8 as *const libc::c_char, bb_banner.as_ptr(), ); write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); used = 0i32; while bb_got_signal == 0 { let mut n: libc::c_int = 0; let mut priority: libc::c_int = 0; let mut start: *mut libc::c_char = 0 as *mut libc::c_char; start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize); n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used); if n < 0i32 { if *bb_errno == 4i32 { continue; } bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char); break; } else
} klogd_close(); syslog( 5i32, b"klogd: exiting\x00" as *const u8 as *const libc::c_char, ); remove_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char); if bb_got_signal != 0 { kill_myself_with_sig(bb_got_signal as libc::c_int); } return 1i32; }
{ *start.offset(n as isize) = '\u{0}' as i32 as libc::c_char; /* Process each newline-terminated line in the buffer */ start = bb_common_bufsiz1.as_mut_ptr(); loop { let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32); if *newline as libc::c_int == '\u{0}' as i32 { /* This line is incomplete */ /* move it to the front of the buffer */ overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start); used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int; if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 { break; } /* buffer is full, log it anyway */ used = 0i32; newline = 0 as *mut libc::c_char } else { let fresh0 = newline; newline = newline.offset(1); *fresh0 = '\u{0}' as i32 as libc::c_char } /* Extract the priority */ priority = 6i32; if *start as libc::c_int == '<' as i32 { start = start.offset(1); if *start != 0 { let mut end: *mut libc::c_char = 0 as *mut libc::c_char; priority = strtoul(start, &mut end, 10i32) as libc::c_int; if *end as libc::c_int == '>' as i32 { end = end.offset(1) } start = end } } /* Log (only non-empty lines) */ if *start != 0 { syslog( priority, b"%s\x00" as *const u8 as *const libc::c_char, start, ); } if newline.is_null() { break; } start = newline } }
conditional_block
sandbox.go
package main import ( "fmt" "golang.org/x/tour/pic" "golang.org/x/tour/wc" "math" "math/cmplx" "math/rand" "os" "runtime" "strings" "time" ) func add1(x int, y int) int { return x + y } func add2(x, y int) int { return x + y } func swap(x, y string) (string, string) { return y, x } func split(sum int) (x, y int) { x = sum * 4 / 9 y = sum - x return //naked return } //1 var c1, python1, java1 bool //2 var i2, j2 int = 1, 2 var ( ToBe bool = false MaxInt uint64 = 1<<64 - 1 z4 complex128 = cmplx.Sqrt(-5 + 12i) ) const Pi = 3.14 const ( Big = 1 << 100 Small = Big >> 99 ) func needInt(x int) int { return x*10 + 1 } func needFloat(x float64) float64 { return x * 0.1 } func sqrt(x float64) string { if x < 0 { return sqrt(-x) + "i" } return fmt.Sprint(math.Sqrt(x)) } func pow1(x, n, lim float64) float64 { if v := math.Pow(x, n); v < lim { return v } else { fmt.Printf("%g >= %g\n", v, lim) } // can't use v here, though return lim } func DeferFunc() { // defer defer fmt.Println("world") fmt.Println("hello") // stacking defer fmt.Println("counting") for i := 0; i < 10; i++ { defer fmt.Println(i) } fmt.Println("done") } type Vertex struct { X int Y int } var ( v3 = Vertex{1, 2} // has type Vertex v4 = Vertex{X: 1} // Y:0 is implicit v5 = Vertex{} // X:0 and Y:0 p2 = &Vertex{1, 2} // has type *Vertex ) // making slices func printSlice(s string, x []int) { fmt.Printf("%s len=%d cap=%d %v\n", s, len(x), cap(x), x) } // range var pow2 = []int{1, 2, 4, 8, 16, 32, 64, 128} //slicing exercise func Pic(dx, dy int) [][]uint8 { outer := make([][]uint8, dy) for i := range outer { outer[i] = make([]uint8, dx) for j := range outer[i] { outer[i][j] = uint8(j) // also try (x+y)/2, x*y, and x^y. !Wow! :-) } } return outer } //maps type LocationCoordinate struct { Lat, Long float64 } var map1 map[string]LocationCoordinate var map2 = map[string]LocationCoordinate{ "Bell Labs": LocationCoordinate{ 40.68433, -74.39967, }, "Google": LocationCoordinate{ 37.42202, -122.08408, }, "Apple": {37.42202, -122.08408}, } //map exercise func WordCount(s string) map[string]int { var map4 = make(map[string]int) for i, v := range strings.Fields(s) { if map4[v] != 0 { continue } fmt.Println("Looping", i) var count int = 0 for _, word := range strings.Fields(s) { if word == v { count++ } } map4[v] = count } return map4 } //closure func
() func(int) int { sum := 0 return func(x int) int { sum += x return sum } } var ( previous, current int ) func fibonacci() func() int { return func() int { sum := previous + current if sum == 0 { previous = 0 current = 1 return previous + current } else { previous = current current = sum return current } } } type FloatVertex struct { X, Y float64 } func (v *FloatVertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.Y*v.Y) } type MyFloat float64 func (f MyFloat) Abs() float64 { if f < 0 { return float64(-f) } return float64(f) } func (v *FloatVertex) Scale(f float64) { v.X = v.X * f v.Y = v.Y * f } //interface type Abser interface { Abs() float64 } func runInterface() { var abser Abser f2 := MyFloat(-math.Sqrt2) v9 := FloatVertex{3, 4} abser = f2 // a MyFloat implements Abser abser = &v9 // a *Vertex implements Abser // In the following line, v is a Vertex (not *Vertex) // and does NOT implement Abser. // abser = v9 fmt.Println(abser.Abs()) } // implicit interface type Reader interface { Read(b []byte) (n int, err error) } type Writer interface { Write(b []byte) (n int, err error) } type ReadWriter interface { Reader Writer } type Person struct { Name string Age int } func (p Person) String() string { return fmt.Sprintf("%v (%v years)", p.Name, p.Age) } func runImplicitInterface() { fmt.Println("Implicit interface") var w Writer // os.Stdout implements Writer w = os.Stdout fmt.Fprintf(w, "hello, writer\n") person := Person{"Arthur Dent", 42} anotherPerson := Person{"Zaphod Beeblebrox", 9001} fmt.Println(person, anotherPerson) } //stringer type IPAddr [4]byte func (ip IPAddr) String() string { return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3]) } func runStringer() { fmt.Println("stringer---") addrs := map[string]IPAddr{ "loopback": {127, 0, 0, 1}, "googleDNS": {8, 8, 8, 8}, } for n, a := range addrs { fmt.Printf("%v: %v\n", n, a) } } // errors type MyError struct { When time.Time What string } func (e *MyError) Error() string { return fmt.Sprintf("at %v, %s", e.When, e.What) } func run() error { return &MyError{ time.Now(), "it didn't work", } } func runErrors() { fmt.Println("errors") if err := run(); err != nil { fmt.Println(err) } } // go routine func say(s string) { for i := 0; i < 5; i++ { time.Sleep(100 * time.Millisecond) fmt.Println(s) } } func sum(a []int, c chan int) { sum := 0 for _, v := range a { sum += v } c <- sum // send sum to channel c } func runGoRoutine() { a := []int{7, 2, 8, -9, 4, 0} c := make(chan int) go sum(a[:len(a)/2], c) go sum(a[len(a)/2:], c) x, y := <-c, <-c // receive from channel c and assign value to x and y fmt.Println(x, y, x+y) } func runBufferedChannel() { c := make(chan int, 2) c <- 1 c <- 2 fmt.Println(<-c) fmt.Println(<-c) } func fibonacci2(n int, c chan int) { x, y := 0, 1 for i := 0; i < n; i++ { c <- x x, y = y, x+y } close(c) } func runRangeAndClose() { fmt.Println("run fibonacci") c := make(chan int, 10) go fibonacci2(cap(c), c) for i := range c { fmt.Println(i) } } var ( startTime time.Time ) func fibonacci3(c, quit chan int) { x, y := 0, 1 for { select { case c <- x: fmt.Println("Before write", time.Since(startTime)) fmt.Println("Send on chan", time.Since(startTime)) x, y = y, x+y case <-quit: fmt.Println("quit") return } } } func runFibonacci3() { c := make(chan int) quit := make(chan int) go func() { for i := 0; i < 10; i++ { fmt.Println("receivedChan", <-c, time.Since(startTime)) } quit <- 0 }() startTime = time.Now() fibonacci3(c, quit) } func runDefaultSelection() { tick := time.Tick(100 * time.Millisecond) boom := time.After(500 * time.Millisecond) for { select { case <-tick: fmt.Println("tick.") case <-boom: fmt.Println("BOOM!") return default: fmt.Println(" .") time.Sleep(50 * time.Millisecond) } } } func main() { fmt.Println("Welcome to the playground!") fmt.Println("The time is", time.Now()) fmt.Println("My favorite number is", rand.Intn(10)) fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3)) fmt.Println(math.Pi) fmt.Println(add1(42, 13)) fmt.Println(add2(42, 13)) a, b := swap("hello", "world") fmt.Println(a, b) fmt.Println(split(17)) //1 var i1 int fmt.Println(i1, c1, python1, java1) //2 var c2, python2, java2 = true, false, "no!" fmt.Println(i2, j2, c2, python2, java2) //3 var i3, j3 int = 1, 2 k3 := 3 c3, python3, java3 := true, false, "no!" fmt.Println(i3, j3, k3, c3, python3, java3) const f4 = "%T(%v)\n" fmt.Printf(f4, ToBe, ToBe) fmt.Printf(f4, MaxInt, MaxInt) fmt.Printf(f4, z4, z4) // data types // bool // string // int int8 int16 int32 int64 // uint uint8 uint16 uint32 uint64 uintptr // byte // alias for uint8 // rune // alias for int32 // // represents a Unicode code point // float32 float64 // complex64 complex128 // default value var i5 int var f5 float64 var b5 bool var s5 string fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5) var x6, y6 int = 3, 4 var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6)) var z6 int = int(f6) fmt.Println(x6, y6, z6) v2 := 1.3 // change me! fmt.Printf("v is of type %T\n", v2) const World = "世界" fmt.Println("Hello", World) fmt.Println("Happy", Pi, "Day") const Truth = true fmt.Println("Go rules?", Truth) fmt.Println(needInt(Small)) fmt.Println(needFloat(Small)) fmt.Println(needFloat(Big)) sum1 := 0 for i := 0; i < 10; i++ { sum1 += i } fmt.Println(sum1) sum2 := 1 for sum2 < 1000 { sum2 += sum2 } fmt.Println(sum2) // For is Go's "while" sum3 := 1 for sum3 < 1000 { sum3 += sum3 } fmt.Println(sum3) fmt.Println(sqrt(2), sqrt(-4)) fmt.Println( pow1(3, 2, 10), pow1(3, 3, 20), ) //switch switch os := runtime.GOOS; os { case "darwin": fmt.Println("OS X.") case "linux": fmt.Println("Linux.") default: // freebsd, openbsd, // plan9, windows... fmt.Printf("%s.", os) } fmt.Println("When's Saturday?") today := time.Now().Weekday() fmt.Println(today, time.Saturday) switch time.Saturday { case today + 0: fmt.Println("Today.") case today + 1: fmt.Println("Tomorrow.") case today + 2: fmt.Println("In two days.") default: fmt.Println("Too far away.") } t := time.Now() switch { case t.Hour() < 12: fmt.Println("Good morning!") case t.Hour() < 17: fmt.Println("Good afternoon.") default: fmt.Println("Good evening.") } DeferFunc() // end of flow control statements // pointers fmt.Println("Pointers ---") i, j := 42, 2701 var p *int // declaring a pointer to an int. p = &i // point to i fmt.Println(*p) // read i through the pointer. This is known as "dereferencing" or "indirecting". *p = 21 // set i through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(i) // see the new value of i p = &j // point to j. *p = *p / 37 // divide j through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(j) // see the new value of j fmt.Println("Pointers ---") // Structs fmt.Println(Vertex{1, 2}) v1 := Vertex{1, 2} v1.X = 4 fmt.Println(v1.X) p1 := &v1 p1.X = 1e9 fmt.Println(v1) fmt.Println(v3, p2, v4, v5) //arrays var array [2]string array[0] = "Hello" array[1] = "World" fmt.Println(array[0], array[1]) fmt.Println(array) s := []int{2, 3, 5, 7, 11, 13} fmt.Println("s ==", s) for i := 0; i < len(s); i++ { fmt.Printf("s[%d] == %d\n", i, s[i]) } // slicing fmt.Println("s[1:4] ==", s[1:4]) // missing low index implies 0 fmt.Println("s[:3] ==", s[:3]) // missing high index implies len(s) fmt.Println("s[4:] ==", s[4:]) a1 := make([]int, 5) printSlice("a1", a1) b1 := make([]int, 0, 5) printSlice("b1", b1) c1 := b1[:2] printSlice("c1", c1) d1 := c1[2:5] printSlice("d1", d1) //Nil slices var zNil []int fmt.Println(zNil, len(zNil), cap(zNil)) if zNil == nil { fmt.Println("nil!") } var a2 []int printSlice("a2", a2) // append works on nil slices. a2 = append(a2, 0) printSlice("a2", a2) // the slice grows as needed. a2 = append(a2, 1) printSlice("a2", a2) // we can add more than one element at a time. a2 = append(a2, 2, 3, 4) printSlice("a2", a2) // range for i, v := range pow2 { fmt.Printf("2**%d = %d\n", i, v) } pow3 := make([]int, 10) for i := range pow3 { pow3[i] = 1 << uint(i) } for _, value := range pow3 { fmt.Printf("%d\n", value) } pic.Show(Pic) // Maps map1 = make(map[string]LocationCoordinate) map1["Bell Labs"] = LocationCoordinate{ 40.68433, -74.39967, } fmt.Println(map1["Bell Labs"]) fmt.Println(map1) fmt.Println(map2) //Mutating Maps map3 := make(map[string]int) map3["Answer"] = 42 fmt.Println("The value:", map3["Answer"]) v6, ok1 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok1) map3["Answer"] = 48 fmt.Println("The value:", map3["Answer"]) delete(map3, "Answer") fmt.Println("The value:", map3["Answer"]) v6, ok2 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok2) // map exercise wc.Test(WordCount) //functions arevalues too hypot := func(x, y float64) float64 { return math.Sqrt(x*x + y*y) } fmt.Println(hypot(3, 4)) pos, neg := adder(), adder() for i := 0; i < 10; i++ { fmt.Println( pos(i), neg(-2*i), ) } fib := fibonacci() for i := 0; i < 10; i++ { fmt.Println(fib()) } v7 := &FloatVertex{3, 4} fmt.Println("FloatVertex", v7.Abs()) f1 := MyFloat(-math.Sqrt2) fmt.Println(f1.Abs()) v8 := &FloatVertex{3, 4} v8.Scale(5) fmt.Println(v8, v8.Abs()) runInterface() runImplicitInterface() runStringer() runErrors() go say("world") say("hello") runGoRoutine() runBufferedChannel() runRangeAndClose() runFibonacci3() runDefaultSelection() }
adder
identifier_name
sandbox.go
package main import ( "fmt" "golang.org/x/tour/pic" "golang.org/x/tour/wc" "math" "math/cmplx" "math/rand" "os" "runtime" "strings" "time" ) func add1(x int, y int) int { return x + y } func add2(x, y int) int { return x + y } func swap(x, y string) (string, string) { return y, x } func split(sum int) (x, y int) { x = sum * 4 / 9 y = sum - x return //naked return } //1 var c1, python1, java1 bool //2 var i2, j2 int = 1, 2 var ( ToBe bool = false MaxInt uint64 = 1<<64 - 1 z4 complex128 = cmplx.Sqrt(-5 + 12i) ) const Pi = 3.14 const ( Big = 1 << 100 Small = Big >> 99 ) func needInt(x int) int { return x*10 + 1 } func needFloat(x float64) float64 { return x * 0.1 } func sqrt(x float64) string { if x < 0 { return sqrt(-x) + "i" } return fmt.Sprint(math.Sqrt(x)) } func pow1(x, n, lim float64) float64 { if v := math.Pow(x, n); v < lim { return v } else { fmt.Printf("%g >= %g\n", v, lim) } // can't use v here, though return lim } func DeferFunc() { // defer defer fmt.Println("world") fmt.Println("hello") // stacking defer fmt.Println("counting") for i := 0; i < 10; i++ { defer fmt.Println(i) } fmt.Println("done") } type Vertex struct { X int Y int } var ( v3 = Vertex{1, 2} // has type Vertex v4 = Vertex{X: 1} // Y:0 is implicit v5 = Vertex{} // X:0 and Y:0 p2 = &Vertex{1, 2} // has type *Vertex ) // making slices func printSlice(s string, x []int) { fmt.Printf("%s len=%d cap=%d %v\n", s, len(x), cap(x), x) } // range var pow2 = []int{1, 2, 4, 8, 16, 32, 64, 128} //slicing exercise func Pic(dx, dy int) [][]uint8 { outer := make([][]uint8, dy) for i := range outer { outer[i] = make([]uint8, dx) for j := range outer[i] { outer[i][j] = uint8(j) // also try (x+y)/2, x*y, and x^y. !Wow! :-) } } return outer } //maps type LocationCoordinate struct { Lat, Long float64 } var map1 map[string]LocationCoordinate var map2 = map[string]LocationCoordinate{ "Bell Labs": LocationCoordinate{ 40.68433, -74.39967, }, "Google": LocationCoordinate{ 37.42202, -122.08408, }, "Apple": {37.42202, -122.08408}, } //map exercise func WordCount(s string) map[string]int { var map4 = make(map[string]int) for i, v := range strings.Fields(s) { if map4[v] != 0 { continue } fmt.Println("Looping", i) var count int = 0 for _, word := range strings.Fields(s) { if word == v
} map4[v] = count } return map4 } //closure func adder() func(int) int { sum := 0 return func(x int) int { sum += x return sum } } var ( previous, current int ) func fibonacci() func() int { return func() int { sum := previous + current if sum == 0 { previous = 0 current = 1 return previous + current } else { previous = current current = sum return current } } } type FloatVertex struct { X, Y float64 } func (v *FloatVertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.Y*v.Y) } type MyFloat float64 func (f MyFloat) Abs() float64 { if f < 0 { return float64(-f) } return float64(f) } func (v *FloatVertex) Scale(f float64) { v.X = v.X * f v.Y = v.Y * f } //interface type Abser interface { Abs() float64 } func runInterface() { var abser Abser f2 := MyFloat(-math.Sqrt2) v9 := FloatVertex{3, 4} abser = f2 // a MyFloat implements Abser abser = &v9 // a *Vertex implements Abser // In the following line, v is a Vertex (not *Vertex) // and does NOT implement Abser. // abser = v9 fmt.Println(abser.Abs()) } // implicit interface type Reader interface { Read(b []byte) (n int, err error) } type Writer interface { Write(b []byte) (n int, err error) } type ReadWriter interface { Reader Writer } type Person struct { Name string Age int } func (p Person) String() string { return fmt.Sprintf("%v (%v years)", p.Name, p.Age) } func runImplicitInterface() { fmt.Println("Implicit interface") var w Writer // os.Stdout implements Writer w = os.Stdout fmt.Fprintf(w, "hello, writer\n") person := Person{"Arthur Dent", 42} anotherPerson := Person{"Zaphod Beeblebrox", 9001} fmt.Println(person, anotherPerson) } //stringer type IPAddr [4]byte func (ip IPAddr) String() string { return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3]) } func runStringer() { fmt.Println("stringer---") addrs := map[string]IPAddr{ "loopback": {127, 0, 0, 1}, "googleDNS": {8, 8, 8, 8}, } for n, a := range addrs { fmt.Printf("%v: %v\n", n, a) } } // errors type MyError struct { When time.Time What string } func (e *MyError) Error() string { return fmt.Sprintf("at %v, %s", e.When, e.What) } func run() error { return &MyError{ time.Now(), "it didn't work", } } func runErrors() { fmt.Println("errors") if err := run(); err != nil { fmt.Println(err) } } // go routine func say(s string) { for i := 0; i < 5; i++ { time.Sleep(100 * time.Millisecond) fmt.Println(s) } } func sum(a []int, c chan int) { sum := 0 for _, v := range a { sum += v } c <- sum // send sum to channel c } func runGoRoutine() { a := []int{7, 2, 8, -9, 4, 0} c := make(chan int) go sum(a[:len(a)/2], c) go sum(a[len(a)/2:], c) x, y := <-c, <-c // receive from channel c and assign value to x and y fmt.Println(x, y, x+y) } func runBufferedChannel() { c := make(chan int, 2) c <- 1 c <- 2 fmt.Println(<-c) fmt.Println(<-c) } func fibonacci2(n int, c chan int) { x, y := 0, 1 for i := 0; i < n; i++ { c <- x x, y = y, x+y } close(c) } func runRangeAndClose() { fmt.Println("run fibonacci") c := make(chan int, 10) go fibonacci2(cap(c), c) for i := range c { fmt.Println(i) } } var ( startTime time.Time ) func fibonacci3(c, quit chan int) { x, y := 0, 1 for { select { case c <- x: fmt.Println("Before write", time.Since(startTime)) fmt.Println("Send on chan", time.Since(startTime)) x, y = y, x+y case <-quit: fmt.Println("quit") return } } } func runFibonacci3() { c := make(chan int) quit := make(chan int) go func() { for i := 0; i < 10; i++ { fmt.Println("receivedChan", <-c, time.Since(startTime)) } quit <- 0 }() startTime = time.Now() fibonacci3(c, quit) } func runDefaultSelection() { tick := time.Tick(100 * time.Millisecond) boom := time.After(500 * time.Millisecond) for { select { case <-tick: fmt.Println("tick.") case <-boom: fmt.Println("BOOM!") return default: fmt.Println(" .") time.Sleep(50 * time.Millisecond) } } } func main() { fmt.Println("Welcome to the playground!") fmt.Println("The time is", time.Now()) fmt.Println("My favorite number is", rand.Intn(10)) fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3)) fmt.Println(math.Pi) fmt.Println(add1(42, 13)) fmt.Println(add2(42, 13)) a, b := swap("hello", "world") fmt.Println(a, b) fmt.Println(split(17)) //1 var i1 int fmt.Println(i1, c1, python1, java1) //2 var c2, python2, java2 = true, false, "no!" fmt.Println(i2, j2, c2, python2, java2) //3 var i3, j3 int = 1, 2 k3 := 3 c3, python3, java3 := true, false, "no!" fmt.Println(i3, j3, k3, c3, python3, java3) const f4 = "%T(%v)\n" fmt.Printf(f4, ToBe, ToBe) fmt.Printf(f4, MaxInt, MaxInt) fmt.Printf(f4, z4, z4) // data types // bool // string // int int8 int16 int32 int64 // uint uint8 uint16 uint32 uint64 uintptr // byte // alias for uint8 // rune // alias for int32 // // represents a Unicode code point // float32 float64 // complex64 complex128 // default value var i5 int var f5 float64 var b5 bool var s5 string fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5) var x6, y6 int = 3, 4 var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6)) var z6 int = int(f6) fmt.Println(x6, y6, z6) v2 := 1.3 // change me! fmt.Printf("v is of type %T\n", v2) const World = "世界" fmt.Println("Hello", World) fmt.Println("Happy", Pi, "Day") const Truth = true fmt.Println("Go rules?", Truth) fmt.Println(needInt(Small)) fmt.Println(needFloat(Small)) fmt.Println(needFloat(Big)) sum1 := 0 for i := 0; i < 10; i++ { sum1 += i } fmt.Println(sum1) sum2 := 1 for sum2 < 1000 { sum2 += sum2 } fmt.Println(sum2) // For is Go's "while" sum3 := 1 for sum3 < 1000 { sum3 += sum3 } fmt.Println(sum3) fmt.Println(sqrt(2), sqrt(-4)) fmt.Println( pow1(3, 2, 10), pow1(3, 3, 20), ) //switch switch os := runtime.GOOS; os { case "darwin": fmt.Println("OS X.") case "linux": fmt.Println("Linux.") default: // freebsd, openbsd, // plan9, windows... fmt.Printf("%s.", os) } fmt.Println("When's Saturday?") today := time.Now().Weekday() fmt.Println(today, time.Saturday) switch time.Saturday { case today + 0: fmt.Println("Today.") case today + 1: fmt.Println("Tomorrow.") case today + 2: fmt.Println("In two days.") default: fmt.Println("Too far away.") } t := time.Now() switch { case t.Hour() < 12: fmt.Println("Good morning!") case t.Hour() < 17: fmt.Println("Good afternoon.") default: fmt.Println("Good evening.") } DeferFunc() // end of flow control statements // pointers fmt.Println("Pointers ---") i, j := 42, 2701 var p *int // declaring a pointer to an int. p = &i // point to i fmt.Println(*p) // read i through the pointer. This is known as "dereferencing" or "indirecting". *p = 21 // set i through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(i) // see the new value of i p = &j // point to j. *p = *p / 37 // divide j through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(j) // see the new value of j fmt.Println("Pointers ---") // Structs fmt.Println(Vertex{1, 2}) v1 := Vertex{1, 2} v1.X = 4 fmt.Println(v1.X) p1 := &v1 p1.X = 1e9 fmt.Println(v1) fmt.Println(v3, p2, v4, v5) //arrays var array [2]string array[0] = "Hello" array[1] = "World" fmt.Println(array[0], array[1]) fmt.Println(array) s := []int{2, 3, 5, 7, 11, 13} fmt.Println("s ==", s) for i := 0; i < len(s); i++ { fmt.Printf("s[%d] == %d\n", i, s[i]) } // slicing fmt.Println("s[1:4] ==", s[1:4]) // missing low index implies 0 fmt.Println("s[:3] ==", s[:3]) // missing high index implies len(s) fmt.Println("s[4:] ==", s[4:]) a1 := make([]int, 5) printSlice("a1", a1) b1 := make([]int, 0, 5) printSlice("b1", b1) c1 := b1[:2] printSlice("c1", c1) d1 := c1[2:5] printSlice("d1", d1) //Nil slices var zNil []int fmt.Println(zNil, len(zNil), cap(zNil)) if zNil == nil { fmt.Println("nil!") } var a2 []int printSlice("a2", a2) // append works on nil slices. a2 = append(a2, 0) printSlice("a2", a2) // the slice grows as needed. a2 = append(a2, 1) printSlice("a2", a2) // we can add more than one element at a time. a2 = append(a2, 2, 3, 4) printSlice("a2", a2) // range for i, v := range pow2 { fmt.Printf("2**%d = %d\n", i, v) } pow3 := make([]int, 10) for i := range pow3 { pow3[i] = 1 << uint(i) } for _, value := range pow3 { fmt.Printf("%d\n", value) } pic.Show(Pic) // Maps map1 = make(map[string]LocationCoordinate) map1["Bell Labs"] = LocationCoordinate{ 40.68433, -74.39967, } fmt.Println(map1["Bell Labs"]) fmt.Println(map1) fmt.Println(map2) //Mutating Maps map3 := make(map[string]int) map3["Answer"] = 42 fmt.Println("The value:", map3["Answer"]) v6, ok1 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok1) map3["Answer"] = 48 fmt.Println("The value:", map3["Answer"]) delete(map3, "Answer") fmt.Println("The value:", map3["Answer"]) v6, ok2 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok2) // map exercise wc.Test(WordCount) //functions arevalues too hypot := func(x, y float64) float64 { return math.Sqrt(x*x + y*y) } fmt.Println(hypot(3, 4)) pos, neg := adder(), adder() for i := 0; i < 10; i++ { fmt.Println( pos(i), neg(-2*i), ) } fib := fibonacci() for i := 0; i < 10; i++ { fmt.Println(fib()) } v7 := &FloatVertex{3, 4} fmt.Println("FloatVertex", v7.Abs()) f1 := MyFloat(-math.Sqrt2) fmt.Println(f1.Abs()) v8 := &FloatVertex{3, 4} v8.Scale(5) fmt.Println(v8, v8.Abs()) runInterface() runImplicitInterface() runStringer() runErrors() go say("world") say("hello") runGoRoutine() runBufferedChannel() runRangeAndClose() runFibonacci3() runDefaultSelection() }
{ count++ }
conditional_block
sandbox.go
package main import ( "fmt" "golang.org/x/tour/pic" "golang.org/x/tour/wc" "math" "math/cmplx" "math/rand" "os" "runtime" "strings" "time" ) func add1(x int, y int) int { return x + y } func add2(x, y int) int { return x + y } func swap(x, y string) (string, string) { return y, x } func split(sum int) (x, y int) { x = sum * 4 / 9 y = sum - x return //naked return } //1 var c1, python1, java1 bool //2 var i2, j2 int = 1, 2 var ( ToBe bool = false MaxInt uint64 = 1<<64 - 1 z4 complex128 = cmplx.Sqrt(-5 + 12i) ) const Pi = 3.14 const ( Big = 1 << 100 Small = Big >> 99 ) func needInt(x int) int { return x*10 + 1 } func needFloat(x float64) float64 { return x * 0.1 } func sqrt(x float64) string { if x < 0 { return sqrt(-x) + "i" } return fmt.Sprint(math.Sqrt(x)) } func pow1(x, n, lim float64) float64 { if v := math.Pow(x, n); v < lim { return v } else { fmt.Printf("%g >= %g\n", v, lim) } // can't use v here, though return lim } func DeferFunc() { // defer defer fmt.Println("world") fmt.Println("hello") // stacking defer fmt.Println("counting") for i := 0; i < 10; i++ { defer fmt.Println(i) } fmt.Println("done") } type Vertex struct { X int Y int } var ( v3 = Vertex{1, 2} // has type Vertex v4 = Vertex{X: 1} // Y:0 is implicit v5 = Vertex{} // X:0 and Y:0 p2 = &Vertex{1, 2} // has type *Vertex ) // making slices func printSlice(s string, x []int) { fmt.Printf("%s len=%d cap=%d %v\n", s, len(x), cap(x), x) } // range var pow2 = []int{1, 2, 4, 8, 16, 32, 64, 128} //slicing exercise func Pic(dx, dy int) [][]uint8 { outer := make([][]uint8, dy) for i := range outer { outer[i] = make([]uint8, dx) for j := range outer[i] { outer[i][j] = uint8(j) // also try (x+y)/2, x*y, and x^y. !Wow! :-) } } return outer } //maps type LocationCoordinate struct { Lat, Long float64 } var map1 map[string]LocationCoordinate var map2 = map[string]LocationCoordinate{ "Bell Labs": LocationCoordinate{ 40.68433, -74.39967, }, "Google": LocationCoordinate{ 37.42202, -122.08408, }, "Apple": {37.42202, -122.08408}, } //map exercise func WordCount(s string) map[string]int { var map4 = make(map[string]int) for i, v := range strings.Fields(s) { if map4[v] != 0 { continue } fmt.Println("Looping", i) var count int = 0 for _, word := range strings.Fields(s) { if word == v { count++ } } map4[v] = count } return map4 } //closure func adder() func(int) int { sum := 0 return func(x int) int { sum += x return sum } } var ( previous, current int ) func fibonacci() func() int { return func() int { sum := previous + current if sum == 0 { previous = 0 current = 1 return previous + current } else { previous = current current = sum return current } } } type FloatVertex struct { X, Y float64 } func (v *FloatVertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.Y*v.Y) } type MyFloat float64 func (f MyFloat) Abs() float64 { if f < 0 { return float64(-f) } return float64(f) } func (v *FloatVertex) Scale(f float64) { v.X = v.X * f v.Y = v.Y * f } //interface type Abser interface { Abs() float64 } func runInterface() { var abser Abser f2 := MyFloat(-math.Sqrt2) v9 := FloatVertex{3, 4} abser = f2 // a MyFloat implements Abser abser = &v9 // a *Vertex implements Abser // In the following line, v is a Vertex (not *Vertex) // and does NOT implement Abser. // abser = v9 fmt.Println(abser.Abs()) } // implicit interface type Reader interface { Read(b []byte) (n int, err error) } type Writer interface { Write(b []byte) (n int, err error) } type ReadWriter interface { Reader Writer } type Person struct { Name string Age int } func (p Person) String() string { return fmt.Sprintf("%v (%v years)", p.Name, p.Age) } func runImplicitInterface() { fmt.Println("Implicit interface") var w Writer // os.Stdout implements Writer w = os.Stdout fmt.Fprintf(w, "hello, writer\n") person := Person{"Arthur Dent", 42} anotherPerson := Person{"Zaphod Beeblebrox", 9001} fmt.Println(person, anotherPerson) } //stringer type IPAddr [4]byte func (ip IPAddr) String() string { return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3]) } func runStringer() { fmt.Println("stringer---") addrs := map[string]IPAddr{ "loopback": {127, 0, 0, 1}, "googleDNS": {8, 8, 8, 8}, } for n, a := range addrs { fmt.Printf("%v: %v\n", n, a) } } // errors type MyError struct { When time.Time What string } func (e *MyError) Error() string { return fmt.Sprintf("at %v, %s", e.When, e.What) } func run() error { return &MyError{ time.Now(), "it didn't work", } } func runErrors() { fmt.Println("errors") if err := run(); err != nil { fmt.Println(err) } } // go routine func say(s string) { for i := 0; i < 5; i++ { time.Sleep(100 * time.Millisecond) fmt.Println(s) } } func sum(a []int, c chan int) { sum := 0 for _, v := range a { sum += v } c <- sum // send sum to channel c }
func runGoRoutine() { a := []int{7, 2, 8, -9, 4, 0} c := make(chan int) go sum(a[:len(a)/2], c) go sum(a[len(a)/2:], c) x, y := <-c, <-c // receive from channel c and assign value to x and y fmt.Println(x, y, x+y) } func runBufferedChannel() { c := make(chan int, 2) c <- 1 c <- 2 fmt.Println(<-c) fmt.Println(<-c) } func fibonacci2(n int, c chan int) { x, y := 0, 1 for i := 0; i < n; i++ { c <- x x, y = y, x+y } close(c) } func runRangeAndClose() { fmt.Println("run fibonacci") c := make(chan int, 10) go fibonacci2(cap(c), c) for i := range c { fmt.Println(i) } } var ( startTime time.Time ) func fibonacci3(c, quit chan int) { x, y := 0, 1 for { select { case c <- x: fmt.Println("Before write", time.Since(startTime)) fmt.Println("Send on chan", time.Since(startTime)) x, y = y, x+y case <-quit: fmt.Println("quit") return } } } func runFibonacci3() { c := make(chan int) quit := make(chan int) go func() { for i := 0; i < 10; i++ { fmt.Println("receivedChan", <-c, time.Since(startTime)) } quit <- 0 }() startTime = time.Now() fibonacci3(c, quit) } func runDefaultSelection() { tick := time.Tick(100 * time.Millisecond) boom := time.After(500 * time.Millisecond) for { select { case <-tick: fmt.Println("tick.") case <-boom: fmt.Println("BOOM!") return default: fmt.Println(" .") time.Sleep(50 * time.Millisecond) } } } func main() { fmt.Println("Welcome to the playground!") fmt.Println("The time is", time.Now()) fmt.Println("My favorite number is", rand.Intn(10)) fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3)) fmt.Println(math.Pi) fmt.Println(add1(42, 13)) fmt.Println(add2(42, 13)) a, b := swap("hello", "world") fmt.Println(a, b) fmt.Println(split(17)) //1 var i1 int fmt.Println(i1, c1, python1, java1) //2 var c2, python2, java2 = true, false, "no!" fmt.Println(i2, j2, c2, python2, java2) //3 var i3, j3 int = 1, 2 k3 := 3 c3, python3, java3 := true, false, "no!" fmt.Println(i3, j3, k3, c3, python3, java3) const f4 = "%T(%v)\n" fmt.Printf(f4, ToBe, ToBe) fmt.Printf(f4, MaxInt, MaxInt) fmt.Printf(f4, z4, z4) // data types // bool // string // int int8 int16 int32 int64 // uint uint8 uint16 uint32 uint64 uintptr // byte // alias for uint8 // rune // alias for int32 // // represents a Unicode code point // float32 float64 // complex64 complex128 // default value var i5 int var f5 float64 var b5 bool var s5 string fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5) var x6, y6 int = 3, 4 var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6)) var z6 int = int(f6) fmt.Println(x6, y6, z6) v2 := 1.3 // change me! fmt.Printf("v is of type %T\n", v2) const World = "世界" fmt.Println("Hello", World) fmt.Println("Happy", Pi, "Day") const Truth = true fmt.Println("Go rules?", Truth) fmt.Println(needInt(Small)) fmt.Println(needFloat(Small)) fmt.Println(needFloat(Big)) sum1 := 0 for i := 0; i < 10; i++ { sum1 += i } fmt.Println(sum1) sum2 := 1 for sum2 < 1000 { sum2 += sum2 } fmt.Println(sum2) // For is Go's "while" sum3 := 1 for sum3 < 1000 { sum3 += sum3 } fmt.Println(sum3) fmt.Println(sqrt(2), sqrt(-4)) fmt.Println( pow1(3, 2, 10), pow1(3, 3, 20), ) //switch switch os := runtime.GOOS; os { case "darwin": fmt.Println("OS X.") case "linux": fmt.Println("Linux.") default: // freebsd, openbsd, // plan9, windows... fmt.Printf("%s.", os) } fmt.Println("When's Saturday?") today := time.Now().Weekday() fmt.Println(today, time.Saturday) switch time.Saturday { case today + 0: fmt.Println("Today.") case today + 1: fmt.Println("Tomorrow.") case today + 2: fmt.Println("In two days.") default: fmt.Println("Too far away.") } t := time.Now() switch { case t.Hour() < 12: fmt.Println("Good morning!") case t.Hour() < 17: fmt.Println("Good afternoon.") default: fmt.Println("Good evening.") } DeferFunc() // end of flow control statements // pointers fmt.Println("Pointers ---") i, j := 42, 2701 var p *int // declaring a pointer to an int. p = &i // point to i fmt.Println(*p) // read i through the pointer. This is known as "dereferencing" or "indirecting". *p = 21 // set i through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(i) // see the new value of i p = &j // point to j. *p = *p / 37 // divide j through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(j) // see the new value of j fmt.Println("Pointers ---") // Structs fmt.Println(Vertex{1, 2}) v1 := Vertex{1, 2} v1.X = 4 fmt.Println(v1.X) p1 := &v1 p1.X = 1e9 fmt.Println(v1) fmt.Println(v3, p2, v4, v5) //arrays var array [2]string array[0] = "Hello" array[1] = "World" fmt.Println(array[0], array[1]) fmt.Println(array) s := []int{2, 3, 5, 7, 11, 13} fmt.Println("s ==", s) for i := 0; i < len(s); i++ { fmt.Printf("s[%d] == %d\n", i, s[i]) } // slicing fmt.Println("s[1:4] ==", s[1:4]) // missing low index implies 0 fmt.Println("s[:3] ==", s[:3]) // missing high index implies len(s) fmt.Println("s[4:] ==", s[4:]) a1 := make([]int, 5) printSlice("a1", a1) b1 := make([]int, 0, 5) printSlice("b1", b1) c1 := b1[:2] printSlice("c1", c1) d1 := c1[2:5] printSlice("d1", d1) //Nil slices var zNil []int fmt.Println(zNil, len(zNil), cap(zNil)) if zNil == nil { fmt.Println("nil!") } var a2 []int printSlice("a2", a2) // append works on nil slices. a2 = append(a2, 0) printSlice("a2", a2) // the slice grows as needed. a2 = append(a2, 1) printSlice("a2", a2) // we can add more than one element at a time. a2 = append(a2, 2, 3, 4) printSlice("a2", a2) // range for i, v := range pow2 { fmt.Printf("2**%d = %d\n", i, v) } pow3 := make([]int, 10) for i := range pow3 { pow3[i] = 1 << uint(i) } for _, value := range pow3 { fmt.Printf("%d\n", value) } pic.Show(Pic) // Maps map1 = make(map[string]LocationCoordinate) map1["Bell Labs"] = LocationCoordinate{ 40.68433, -74.39967, } fmt.Println(map1["Bell Labs"]) fmt.Println(map1) fmt.Println(map2) //Mutating Maps map3 := make(map[string]int) map3["Answer"] = 42 fmt.Println("The value:", map3["Answer"]) v6, ok1 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok1) map3["Answer"] = 48 fmt.Println("The value:", map3["Answer"]) delete(map3, "Answer") fmt.Println("The value:", map3["Answer"]) v6, ok2 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok2) // map exercise wc.Test(WordCount) //functions arevalues too hypot := func(x, y float64) float64 { return math.Sqrt(x*x + y*y) } fmt.Println(hypot(3, 4)) pos, neg := adder(), adder() for i := 0; i < 10; i++ { fmt.Println( pos(i), neg(-2*i), ) } fib := fibonacci() for i := 0; i < 10; i++ { fmt.Println(fib()) } v7 := &FloatVertex{3, 4} fmt.Println("FloatVertex", v7.Abs()) f1 := MyFloat(-math.Sqrt2) fmt.Println(f1.Abs()) v8 := &FloatVertex{3, 4} v8.Scale(5) fmt.Println(v8, v8.Abs()) runInterface() runImplicitInterface() runStringer() runErrors() go say("world") say("hello") runGoRoutine() runBufferedChannel() runRangeAndClose() runFibonacci3() runDefaultSelection() }
random_line_split
sandbox.go
package main import ( "fmt" "golang.org/x/tour/pic" "golang.org/x/tour/wc" "math" "math/cmplx" "math/rand" "os" "runtime" "strings" "time" ) func add1(x int, y int) int { return x + y } func add2(x, y int) int
func swap(x, y string) (string, string) { return y, x } func split(sum int) (x, y int) { x = sum * 4 / 9 y = sum - x return //naked return } //1 var c1, python1, java1 bool //2 var i2, j2 int = 1, 2 var ( ToBe bool = false MaxInt uint64 = 1<<64 - 1 z4 complex128 = cmplx.Sqrt(-5 + 12i) ) const Pi = 3.14 const ( Big = 1 << 100 Small = Big >> 99 ) func needInt(x int) int { return x*10 + 1 } func needFloat(x float64) float64 { return x * 0.1 } func sqrt(x float64) string { if x < 0 { return sqrt(-x) + "i" } return fmt.Sprint(math.Sqrt(x)) } func pow1(x, n, lim float64) float64 { if v := math.Pow(x, n); v < lim { return v } else { fmt.Printf("%g >= %g\n", v, lim) } // can't use v here, though return lim } func DeferFunc() { // defer defer fmt.Println("world") fmt.Println("hello") // stacking defer fmt.Println("counting") for i := 0; i < 10; i++ { defer fmt.Println(i) } fmt.Println("done") } type Vertex struct { X int Y int } var ( v3 = Vertex{1, 2} // has type Vertex v4 = Vertex{X: 1} // Y:0 is implicit v5 = Vertex{} // X:0 and Y:0 p2 = &Vertex{1, 2} // has type *Vertex ) // making slices func printSlice(s string, x []int) { fmt.Printf("%s len=%d cap=%d %v\n", s, len(x), cap(x), x) } // range var pow2 = []int{1, 2, 4, 8, 16, 32, 64, 128} //slicing exercise func Pic(dx, dy int) [][]uint8 { outer := make([][]uint8, dy) for i := range outer { outer[i] = make([]uint8, dx) for j := range outer[i] { outer[i][j] = uint8(j) // also try (x+y)/2, x*y, and x^y. !Wow! :-) } } return outer } //maps type LocationCoordinate struct { Lat, Long float64 } var map1 map[string]LocationCoordinate var map2 = map[string]LocationCoordinate{ "Bell Labs": LocationCoordinate{ 40.68433, -74.39967, }, "Google": LocationCoordinate{ 37.42202, -122.08408, }, "Apple": {37.42202, -122.08408}, } //map exercise func WordCount(s string) map[string]int { var map4 = make(map[string]int) for i, v := range strings.Fields(s) { if map4[v] != 0 { continue } fmt.Println("Looping", i) var count int = 0 for _, word := range strings.Fields(s) { if word == v { count++ } } map4[v] = count } return map4 } //closure func adder() func(int) int { sum := 0 return func(x int) int { sum += x return sum } } var ( previous, current int ) func fibonacci() func() int { return func() int { sum := previous + current if sum == 0 { previous = 0 current = 1 return previous + current } else { previous = current current = sum return current } } } type FloatVertex struct { X, Y float64 } func (v *FloatVertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.Y*v.Y) } type MyFloat float64 func (f MyFloat) Abs() float64 { if f < 0 { return float64(-f) } return float64(f) } func (v *FloatVertex) Scale(f float64) { v.X = v.X * f v.Y = v.Y * f } //interface type Abser interface { Abs() float64 } func runInterface() { var abser Abser f2 := MyFloat(-math.Sqrt2) v9 := FloatVertex{3, 4} abser = f2 // a MyFloat implements Abser abser = &v9 // a *Vertex implements Abser // In the following line, v is a Vertex (not *Vertex) // and does NOT implement Abser. // abser = v9 fmt.Println(abser.Abs()) } // implicit interface type Reader interface { Read(b []byte) (n int, err error) } type Writer interface { Write(b []byte) (n int, err error) } type ReadWriter interface { Reader Writer } type Person struct { Name string Age int } func (p Person) String() string { return fmt.Sprintf("%v (%v years)", p.Name, p.Age) } func runImplicitInterface() { fmt.Println("Implicit interface") var w Writer // os.Stdout implements Writer w = os.Stdout fmt.Fprintf(w, "hello, writer\n") person := Person{"Arthur Dent", 42} anotherPerson := Person{"Zaphod Beeblebrox", 9001} fmt.Println(person, anotherPerson) } //stringer type IPAddr [4]byte func (ip IPAddr) String() string { return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3]) } func runStringer() { fmt.Println("stringer---") addrs := map[string]IPAddr{ "loopback": {127, 0, 0, 1}, "googleDNS": {8, 8, 8, 8}, } for n, a := range addrs { fmt.Printf("%v: %v\n", n, a) } } // errors type MyError struct { When time.Time What string } func (e *MyError) Error() string { return fmt.Sprintf("at %v, %s", e.When, e.What) } func run() error { return &MyError{ time.Now(), "it didn't work", } } func runErrors() { fmt.Println("errors") if err := run(); err != nil { fmt.Println(err) } } // go routine func say(s string) { for i := 0; i < 5; i++ { time.Sleep(100 * time.Millisecond) fmt.Println(s) } } func sum(a []int, c chan int) { sum := 0 for _, v := range a { sum += v } c <- sum // send sum to channel c } func runGoRoutine() { a := []int{7, 2, 8, -9, 4, 0} c := make(chan int) go sum(a[:len(a)/2], c) go sum(a[len(a)/2:], c) x, y := <-c, <-c // receive from channel c and assign value to x and y fmt.Println(x, y, x+y) } func runBufferedChannel() { c := make(chan int, 2) c <- 1 c <- 2 fmt.Println(<-c) fmt.Println(<-c) } func fibonacci2(n int, c chan int) { x, y := 0, 1 for i := 0; i < n; i++ { c <- x x, y = y, x+y } close(c) } func runRangeAndClose() { fmt.Println("run fibonacci") c := make(chan int, 10) go fibonacci2(cap(c), c) for i := range c { fmt.Println(i) } } var ( startTime time.Time ) func fibonacci3(c, quit chan int) { x, y := 0, 1 for { select { case c <- x: fmt.Println("Before write", time.Since(startTime)) fmt.Println("Send on chan", time.Since(startTime)) x, y = y, x+y case <-quit: fmt.Println("quit") return } } } func runFibonacci3() { c := make(chan int) quit := make(chan int) go func() { for i := 0; i < 10; i++ { fmt.Println("receivedChan", <-c, time.Since(startTime)) } quit <- 0 }() startTime = time.Now() fibonacci3(c, quit) } func runDefaultSelection() { tick := time.Tick(100 * time.Millisecond) boom := time.After(500 * time.Millisecond) for { select { case <-tick: fmt.Println("tick.") case <-boom: fmt.Println("BOOM!") return default: fmt.Println(" .") time.Sleep(50 * time.Millisecond) } } } func main() { fmt.Println("Welcome to the playground!") fmt.Println("The time is", time.Now()) fmt.Println("My favorite number is", rand.Intn(10)) fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3)) fmt.Println(math.Pi) fmt.Println(add1(42, 13)) fmt.Println(add2(42, 13)) a, b := swap("hello", "world") fmt.Println(a, b) fmt.Println(split(17)) //1 var i1 int fmt.Println(i1, c1, python1, java1) //2 var c2, python2, java2 = true, false, "no!" fmt.Println(i2, j2, c2, python2, java2) //3 var i3, j3 int = 1, 2 k3 := 3 c3, python3, java3 := true, false, "no!" fmt.Println(i3, j3, k3, c3, python3, java3) const f4 = "%T(%v)\n" fmt.Printf(f4, ToBe, ToBe) fmt.Printf(f4, MaxInt, MaxInt) fmt.Printf(f4, z4, z4) // data types // bool // string // int int8 int16 int32 int64 // uint uint8 uint16 uint32 uint64 uintptr // byte // alias for uint8 // rune // alias for int32 // // represents a Unicode code point // float32 float64 // complex64 complex128 // default value var i5 int var f5 float64 var b5 bool var s5 string fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5) var x6, y6 int = 3, 4 var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6)) var z6 int = int(f6) fmt.Println(x6, y6, z6) v2 := 1.3 // change me! fmt.Printf("v is of type %T\n", v2) const World = "世界" fmt.Println("Hello", World) fmt.Println("Happy", Pi, "Day") const Truth = true fmt.Println("Go rules?", Truth) fmt.Println(needInt(Small)) fmt.Println(needFloat(Small)) fmt.Println(needFloat(Big)) sum1 := 0 for i := 0; i < 10; i++ { sum1 += i } fmt.Println(sum1) sum2 := 1 for sum2 < 1000 { sum2 += sum2 } fmt.Println(sum2) // For is Go's "while" sum3 := 1 for sum3 < 1000 { sum3 += sum3 } fmt.Println(sum3) fmt.Println(sqrt(2), sqrt(-4)) fmt.Println( pow1(3, 2, 10), pow1(3, 3, 20), ) //switch switch os := runtime.GOOS; os { case "darwin": fmt.Println("OS X.") case "linux": fmt.Println("Linux.") default: // freebsd, openbsd, // plan9, windows... fmt.Printf("%s.", os) } fmt.Println("When's Saturday?") today := time.Now().Weekday() fmt.Println(today, time.Saturday) switch time.Saturday { case today + 0: fmt.Println("Today.") case today + 1: fmt.Println("Tomorrow.") case today + 2: fmt.Println("In two days.") default: fmt.Println("Too far away.") } t := time.Now() switch { case t.Hour() < 12: fmt.Println("Good morning!") case t.Hour() < 17: fmt.Println("Good afternoon.") default: fmt.Println("Good evening.") } DeferFunc() // end of flow control statements // pointers fmt.Println("Pointers ---") i, j := 42, 2701 var p *int // declaring a pointer to an int. p = &i // point to i fmt.Println(*p) // read i through the pointer. This is known as "dereferencing" or "indirecting". *p = 21 // set i through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(i) // see the new value of i p = &j // point to j. *p = *p / 37 // divide j through the pointer. This is known as "dereferencing" or "indirecting". fmt.Println(j) // see the new value of j fmt.Println("Pointers ---") // Structs fmt.Println(Vertex{1, 2}) v1 := Vertex{1, 2} v1.X = 4 fmt.Println(v1.X) p1 := &v1 p1.X = 1e9 fmt.Println(v1) fmt.Println(v3, p2, v4, v5) //arrays var array [2]string array[0] = "Hello" array[1] = "World" fmt.Println(array[0], array[1]) fmt.Println(array) s := []int{2, 3, 5, 7, 11, 13} fmt.Println("s ==", s) for i := 0; i < len(s); i++ { fmt.Printf("s[%d] == %d\n", i, s[i]) } // slicing fmt.Println("s[1:4] ==", s[1:4]) // missing low index implies 0 fmt.Println("s[:3] ==", s[:3]) // missing high index implies len(s) fmt.Println("s[4:] ==", s[4:]) a1 := make([]int, 5) printSlice("a1", a1) b1 := make([]int, 0, 5) printSlice("b1", b1) c1 := b1[:2] printSlice("c1", c1) d1 := c1[2:5] printSlice("d1", d1) //Nil slices var zNil []int fmt.Println(zNil, len(zNil), cap(zNil)) if zNil == nil { fmt.Println("nil!") } var a2 []int printSlice("a2", a2) // append works on nil slices. a2 = append(a2, 0) printSlice("a2", a2) // the slice grows as needed. a2 = append(a2, 1) printSlice("a2", a2) // we can add more than one element at a time. a2 = append(a2, 2, 3, 4) printSlice("a2", a2) // range for i, v := range pow2 { fmt.Printf("2**%d = %d\n", i, v) } pow3 := make([]int, 10) for i := range pow3 { pow3[i] = 1 << uint(i) } for _, value := range pow3 { fmt.Printf("%d\n", value) } pic.Show(Pic) // Maps map1 = make(map[string]LocationCoordinate) map1["Bell Labs"] = LocationCoordinate{ 40.68433, -74.39967, } fmt.Println(map1["Bell Labs"]) fmt.Println(map1) fmt.Println(map2) //Mutating Maps map3 := make(map[string]int) map3["Answer"] = 42 fmt.Println("The value:", map3["Answer"]) v6, ok1 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok1) map3["Answer"] = 48 fmt.Println("The value:", map3["Answer"]) delete(map3, "Answer") fmt.Println("The value:", map3["Answer"]) v6, ok2 := map3["Answer"] fmt.Println("The value:", v6, "Present?", ok2) // map exercise wc.Test(WordCount) //functions arevalues too hypot := func(x, y float64) float64 { return math.Sqrt(x*x + y*y) } fmt.Println(hypot(3, 4)) pos, neg := adder(), adder() for i := 0; i < 10; i++ { fmt.Println( pos(i), neg(-2*i), ) } fib := fibonacci() for i := 0; i < 10; i++ { fmt.Println(fib()) } v7 := &FloatVertex{3, 4} fmt.Println("FloatVertex", v7.Abs()) f1 := MyFloat(-math.Sqrt2) fmt.Println(f1.Abs()) v8 := &FloatVertex{3, 4} v8.Scale(5) fmt.Println(v8, v8.Abs()) runInterface() runImplicitInterface() runStringer() runErrors() go say("world") say("hello") runGoRoutine() runBufferedChannel() runRangeAndClose() runFibonacci3() runDefaultSelection() }
{ return x + y }
identifier_body
_train_bot_with_prepared.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import tradebot as tb import numpy as np import copy import progressbar import pickle import klepto def prepare_bbox(): global n_features, n_actions, max_time # Reset environment to the initial state, just in case if bbox.is_level_loaded(): bbox.reset_level() def calc_best_action_using_checkpoint(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def get_best_action(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def train_minibatch(minibatch): old_state_s = np.array([row[0] for row in minibatch]) action_s = np.array([row[1] for row in minibatch]) reward_s = np.array([row[2] for row in minibatch]) new_state_s = np.array([row[3] for row in minibatch]) old_qwal_s = model.predict(old_state_s, batch_size=32) newQ_s = model.predict(new_state_s, batch_size=32) maxQ_s = np.max(newQ_s, axis=1) y = old_qwal_s update_s = reward_s + gamma * maxQ_s for i in range(len(action_s)): y[i, action_s[i]] = update_s[i] model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0) return def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32, replay_memory_size=100000, load_weights=False, save_weights=False): global pgi has_next = 1 global actions global bbox # Prepare environment - load the game level prepare_bbox() update_frequency_cntr = 0 h = 0 if load_weights: model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') # stores tuples of (S, A, R, S') while has_next: # Get current environment state pgi += 1 if pgi % print_step == 0: bar.update(pgi) # state = copy.copy(bbox.get_state()) state = bbox.get_state() train_states_logs.append((state.flatten().tolist())[0:-4]) prev_reward = copy.copy(bbox.get_score()) # Run the Q function on S to get predicted reward values on all the possible actions qval = model.predict(state.reshape(1, n_features), batch_size=1) train_qval.append(qval) action = (np.argmax(qval)) actions[action] += 1 # Choose an action to perform at current step if random.random() < epsilon: # choose random action or best action action = np.random.randint(0, n_actions) # assumes 4 different actions else: # choose best action from Q(s,a) values action = (np.argmax(qval)) # Perform chosen action, observe new state S' # Function do_action(action) returns False if level is finished, otherwise returns True. for a in range(action_repeat): has_next = bbox.do_action(action) new_state = copy.copy(bbox.get_state()) reward = copy.copy(bbox.get_score()) - prev_reward #if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных if True: # в запоминаем все успешные действия и только 20% нейспешных if (len(replay) < replay_memory_size): # if buffer not filled, add to it replay.append((state, action, reward, new_state)) else: # if buffer full, overwrite old values h=np.random.randint(0,replay_memory_size-1) replay[h] = (state, action, reward, new_state) # randomly sample our experience replay memory # minibatch = random.sample(replay, batchSize) minibatch = random.sample(replay, sample_fit_size) train_minibatch(minibatch=minibatch) if update_frequency_cntr >= update_frequency: prim_weights = model_prim.get_weights() model.set_weights(prim_weights) update_frequency_cntr = 0 update_frequency_cntr += 1 # step_times.append(time.time()-st) # Finish the game simulation, print earned reward and save weights if save_weights: model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True) bbox.finish(verbose=0) from keras.models import Sequential from keras.models import model_from_json from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.regularizers import l1l2,activity_l1l2 import random random.seed(6) n_features = n_actions = max_time = -1 days_to_train =-1 first_run =False resumple=False replay_file = u'e:\\trade_data\\HistoryData\\replay.klp' #bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot' #u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot' d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True) d.load("bbox") bbox = d["bbox"] del d if days_to_train != -1: bbox.set_sample_days(days_to_train) exploration_epochs = 1 learning_epochs =1 gamma = 0.8 # a high gamma makes a long term reward more valuable epsilon=0.1 action_repeat = 3 # repeat each action this many times // было 4 update_frequency = 50 # the number of time steps between each Q-net update batchSize = 32 # параметр для обучения сети l1_reg=0.05 l2_reg=0.00001
replay_memory_size=200000 print('replay_memory_size ', replay_memory_size) sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера print_step = 10 n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп print('n_features=', n_features) n_actions = bbox.get_num_of_actions() max_time = bbox.get_max_time() model = Sequential() model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,))) model.add(Activation('relu')) model.add(Dense(1600, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg,l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(800, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg, l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_actions, init='lecun_uniform')) model.add(Activation('linear')) # linear output so we can have range of real-valued outputs rms = RMSprop(lr=0.00025) # 0.00025 model.compile(loss='mse', optimizer=rms) json_string = model.to_json() root = u'e:\\trade_data\\HistoryData\\' open(root + 'my_model_architecture.json', 'w').write(json_string) model_prim = model_from_json(open(root + 'my_model_architecture.json').read()) model_prim.compile(loss='mse', optimizer=rms) r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True) if not first_run: # "загружаем веса, если запуск не первый" model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') r.load("replay") replay = r['replay'] else: replay = [] r['replay'] = replay load_weights = False replay = [] #r['replay'] = replay pgi = 0 total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) bar.start() #текстовые логи train_states_logs=[] train_qval=[] test_states_logs=[] test_qval=[] for i in range(exploration_epochs): print("exploration ", i, " of ", exploration_epochs) epsilon_t=1.0 actions = np.array([0, 0, 0]) run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if epsilon_t > 0.1: epsilon_t -= (1.0 / exploration_epochs) # потихоньку увеличиваем вероятность использования знаний if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) r.dump() for i in range(learning_epochs): actions = np.array([0, 0, 0]) print("learning ", i, " of ", learning_epochs) epsilon = 0.1 run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps + 100) r.dump() def test_strategy(n=4, resample=True, action_repeat=6): results = [] for i in range(n): if resample: random.seed(1 + i) bbox.set_sample_days(days_to_train) bbox.reset_level() has_next = True actions = np.array([0, 0, 0]) while has_next: state = bbox.get_state() qval = model.predict(state.reshape(1, n_features), batch_size=1) action = (np.argmax(qval)) actions[action] += 1 for a in range(action_repeat): has_next = bbox.do_action(action) bbox.finish(verbose=0) print(" test ", i, " score: ", bbox.get_score(), actions) return results print ('тест на тренировочных данных') test_times = 1 results = test_strategy(test_times, action_repeat=action_repeat, resample=False) with open('test_states.txt', "w") as file: for row in test_states_logs: file.write(str(list(row)) + '\n') file.flush() file.close() print (train_states_logs==test_states_logs)
#replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера
random_line_split
_train_bot_with_prepared.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import tradebot as tb import numpy as np import copy import progressbar import pickle import klepto def prepare_bbox(): global n_features, n_actions, max_time # Reset environment to the initial state, just in case if bbox.is_level_loaded(): bbox.reset_level() def calc_best_action_using_checkpoint(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def get_best_action(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def train_minibatch(minibatch): old_state_s = np.array([row[0] for row in minibatch]) action_s = np.array([row[1] for row in minibatch]) reward_s = np.array([row[2] for row in minibatch]) new_state_s = np.array([row[3] for row in minibatch]) old_qwal_s = model.predict(old_state_s, batch_size=32) newQ_s = model.predict(new_state_s, batch_size=32) maxQ_s = np.max(newQ_s, axis=1) y = old_qwal_s update_s = reward_s + gamma * maxQ_s for i in range(len(action_s)): y[i,
model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0) return def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32, replay_memory_size=100000, load_weights=False, save_weights=False): global pgi has_next = 1 global actions global bbox # Prepare environment - load the game level prepare_bbox() update_frequency_cntr = 0 h = 0 if load_weights: model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') # stores tuples of (S, A, R, S') while has_next: # Get current environment state pgi += 1 if pgi % print_step == 0: bar.update(pgi) # state = copy.copy(bbox.get_state()) state = bbox.get_state() train_states_logs.append((state.flatten().tolist())[0:-4]) prev_reward = copy.copy(bbox.get_score()) # Run the Q function on S to get predicted reward values on all the possible actions qval = model.predict(state.reshape(1, n_features), batch_size=1) train_qval.append(qval) action = (np.argmax(qval)) actions[action] += 1 # Choose an action to perform at current step if random.random() < epsilon: # choose random action or best action action = np.random.randint(0, n_actions) # assumes 4 different actions else: # choose best action from Q(s,a) values action = (np.argmax(qval)) # Perform chosen action, observe new state S' # Function do_action(action) returns False if level is finished, otherwise returns True. for a in range(action_repeat): has_next = bbox.do_action(action) new_state = copy.copy(bbox.get_state()) reward = copy.copy(bbox.get_score()) - prev_reward #if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных if True: # в запоминаем все успешные действия и только 20% нейспешных if (len(replay) < replay_memory_size): # if buffer not filled, add to it replay.append((state, action, reward, new_state)) else: # if buffer full, overwrite old values h=np.random.randint(0,replay_memory_size-1) replay[h] = (state, action, reward, new_state) # randomly sample our experience replay memory # minibatch = random.sample(replay, batchSize) minibatch = random.sample(replay, sample_fit_size) train_minibatch(minibatch=minibatch) if update_frequency_cntr >= update_frequency: prim_weights = model_prim.get_weights() model.set_weights(prim_weights) update_frequency_cntr = 0 update_frequency_cntr += 1 # step_times.append(time.time()-st) # Finish the game simulation, print earned reward and save weights if save_weights: model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True) bbox.finish(verbose=0) from keras.models import Sequential from keras.models import model_from_json from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.regularizers import l1l2,activity_l1l2 import random random.seed(6) n_features = n_actions = max_time = -1 days_to_train =-1 first_run =False resumple=False replay_file = u'e:\\trade_data\\HistoryData\\replay.klp' #bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot' #u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot' d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True) d.load("bbox") bbox = d["bbox"] del d if days_to_train != -1: bbox.set_sample_days(days_to_train) exploration_epochs = 1 learning_epochs =1 gamma = 0.8 # a high gamma makes a long term reward more valuable epsilon=0.1 action_repeat = 3 # repeat each action this many times // было 4 update_frequency = 50 # the number of time steps between each Q-net update batchSize = 32 # параметр для обучения сети l1_reg=0.05 l2_reg=0.00001 #replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера replay_memory_size=200000 print('replay_memory_size ', replay_memory_size) sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера print_step = 10 n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп print('n_features=', n_features) n_actions = bbox.get_num_of_actions() max_time = bbox.get_max_time() model = Sequential() model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,))) model.add(Activation('relu')) model.add(Dense(1600, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg,l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(800, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg, l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_actions, init='lecun_uniform')) model.add(Activation('linear')) # linear output so we can have range of real-valued outputs rms = RMSprop(lr=0.00025) # 0.00025 model.compile(loss='mse', optimizer=rms) json_string = model.to_json() root = u'e:\\trade_data\\HistoryData\\' open(root + 'my_model_architecture.json', 'w').write(json_string) model_prim = model_from_json(open(root + 'my_model_architecture.json').read()) model_prim.compile(loss='mse', optimizer=rms) r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True) if not first_run: # "загружаем веса, если запуск не первый" model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') r.load("replay") replay = r['replay'] else: replay = [] r['replay'] = replay load_weights = False replay = [] #r['replay'] = replay pgi = 0 total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) bar.start() #текстовые логи train_states_logs=[] train_qval=[] test_states_logs=[] test_qval=[] for i in range(exploration_epochs): print("exploration ", i, " of ", exploration_epochs) epsilon_t=1.0 actions = np.array([0, 0, 0]) run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if epsilon_t > 0.1: epsilon_t -= (1.0 / exploration_epochs) # потихоньку увеличиваем вероятность использования знаний if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) r.dump() for i in range(learning_epochs): actions = np.array([0, 0, 0]) print("learning ", i, " of ", learning_epochs) epsilon = 0.1 run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps + 100) r.dump() def test_strategy(n=4, resample=True, action_repeat=6): results = [] for i in range(n): if resample: random.seed(1 + i) bbox.set_sample_days(days_to_train) bbox.reset_level() has_next = True actions = np.array([0, 0, 0]) while has_next: state = bbox.get_state() qval = model.predict(state.reshape(1, n_features), batch_size=1) action = (np.argmax(qval)) actions[action] += 1 for a in range(action_repeat): has_next = bbox.do_action(action) bbox.finish(verbose=0) print(" test ", i, " score: ", bbox.get_score(), actions) return results print ('тест на тренировочных данных') test_times = 1 results = test_strategy(test_times, action_repeat=action_repeat, resample=False) with open('test_states.txt', "w") as file: for row in test_states_logs: file.write(str(list(row)) + '\n') file.flush() file.close() print (train_states_logs==test_states_logs)
action_s[i]] = update_s[i]
conditional_block
_train_bot_with_prepared.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import tradebot as tb import numpy as np import copy import progressbar import pickle import klepto def prepare_bbox(): global n_features, n_actions, max_time # Reset environment to the initial state, just in case if bbox.is_level_loaded(): bbox.reset_level() def calc_best_action_using_checkpoint(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def get_best_action(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def train_minibatch(minibatch): old_state_s = np.array([row[0] for row in minibatch]) action_s = np.array([row[1] for row in minibatch]) reward_s = np.array([row[2] for row in minibatch]) new_state_s = np.array([row[3] for row in minibatch]) old_qwal_s = model.predict(old_state_s, batch_size=32) newQ_s = model.predict(new_state_s, batch_size=32) maxQ_s = np.max(newQ_s, axis=1) y = old_qwal_s update_s = reward_s + gamma * maxQ_s for i in range(len(action_s)): y[i, action_s[i]] = update_s[i] model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0) return def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32, replay_memory_size=100000, load_weights=False, save_weights=False): global pgi has_next = 1 global actions global bbox # Prepare environment - load the game level prepare_bbox() update_frequency_cntr = 0 h = 0 if load_weights: model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') # stores tuples of (S, A, R, S') while has_next: # Get current environment state pgi += 1 if pgi % print_step == 0: bar.update(pgi) # state = copy.copy(bbox.get_state()) state = bbox.get_state() train_states_logs.append((state.flatten().tolist())[0:-4]) prev_reward = copy.copy(bbox.get_score()) # Run the Q function on S to get predicted reward values on all the possible actions qval = model.predict(state.reshape(1, n_features), batch_size=1) train_qval.append(qval) action = (np.argmax(qval)) actions[action] += 1 # Choose an action to perform at current step if random.random() < epsilon: # choose random action or best action action = np.random.randint(0, n_actions) # assumes 4 different actions else: # choose best action from Q(s,a) values action = (np.argmax(qval)) # Perform chosen action, observe new state S' # Function do_action(action) returns False if level is finished, otherwise returns True. for a in range(action_repeat): has_next = bbox.do_action(action) new_state = copy.copy(bbox.get_state()) reward = copy.copy(bbox.get_score()) - prev_reward #if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных if True: # в запоминаем все успешные действия и только 20% нейспешных if (len(replay) < replay_memory_size): # if buffer not filled, add to it replay.append((state, action, reward, new_state)) else: # if buffer full, overwrite old values h=np.random.randint(0,replay_memory_size-1) replay[h] = (state, action, reward, new_state) # randomly sample our experience replay memory # minibatch = random.sample(replay, batchSize) minibatch = random.sample(replay, sample_fit_size) train_minibatch(minibatch=minibatch) if update_frequency_cntr >= update_frequency: prim_weights = model_prim.get_weights() model.set_weights(prim_weights) update_frequency_cntr = 0 update_frequency_cntr += 1 # step_times.append(time.time()-st) # Finish the game simulation, print earned reward and save weights if save_weights: model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True) bbox.finish(verbose=0) from keras.models import Sequential from keras.models import model_from_json from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.regularizers import l1l2,activity_l1l2 import random random.seed(6) n_features = n_actions = max_time = -1 days_to_train =-1 first_run =False resumple=False replay_file = u'e:\\trade_data\\HistoryData\\replay.klp' #bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot' #u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot' d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True) d.load("bbox") bbox = d["bbox"] del d if days_to_train != -1: bbox.set_sample_days(days_to_train) exploration_epochs = 1 learning_epochs =1 gamma = 0.8 # a high gamma makes a long term reward more valuable epsilon=0.1 action_repeat = 3 # repeat each action this many times // было 4 update_frequency = 50 # the number of time steps between each Q-net update batchSize = 32 # параметр для обучения сети l1_reg=0.05 l2_reg=0.00001 #replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера replay_memory_size=200000 print('replay_memory_size ', replay_memory_size) sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера print_step = 10 n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп print('n_features=', n_features) n_actions = bbox.get_num_of_actions() max_time = bbox.get_max_time() model = Sequential() model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,))) model.add(Activation('relu')) model.add(Dense(1600, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg,l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(800, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg, l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_actions, init='lecun_uniform')) model.add(Activation('linear')) # linear output so we can have range of real-valued outputs rms = RMSprop(lr=0.00025) # 0.00025 model.compile(loss='mse', optimizer=rms) json_string = model.to_json() root = u'e:\\trade_data\\HistoryData\\' open(root + 'my_model_architecture.json', 'w').write(json_string) model_prim = model_from_json(open(root + 'my_model_architecture.json').read()) model_prim.compile(loss='mse', optimizer=rms) r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True) if not first_run: # "загружаем веса, если запуск не первый" model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') r.load("replay") replay = r['replay'] else: replay = [] r['replay'] = replay load_weights = False replay = [] #r['replay'] = replay pgi = 0 total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) bar.start() #текстовые логи train_states_logs=[] train_qval=[] test_states_logs=[] test_qval=[] for i in range(exploration_epochs): print("exploration ", i, " of ", exploration_epochs) epsilon_t=1.0 actions = np.array([0, 0, 0]) run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if epsilon_t > 0.1: epsilon_t -= (1.0 / exploration_epochs) # потихоньку увеличиваем вероятность использования знаний if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) r.dump() for i in range(learning_epochs): actions = np.array([0, 0, 0]) print("learning ", i, " of ", learning_epochs) epsilon = 0.1 run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps + 100) r.dump() def test_strategy(n=4, resample=True, action_repeat=6): results = [] for i in range(n): if resample: random.seed(1 + i) bbox.set_sample_days(days_to_train) bbox.reset_level() has_next = True actions = np.array([0, 0, 0]) while has_next: state = bbox.get_state() qval
s_logs==test_states_logs)
= model.predict(state.reshape(1, n_features), batch_size=1) action = (np.argmax(qval)) actions[action] += 1 for a in range(action_repeat): has_next = bbox.do_action(action) bbox.finish(verbose=0) print(" test ", i, " score: ", bbox.get_score(), actions) return results print ('тест на тренировочных данных') test_times = 1 results = test_strategy(test_times, action_repeat=action_repeat, resample=False) with open('test_states.txt', "w") as file: for row in test_states_logs: file.write(str(list(row)) + '\n') file.flush() file.close() print (train_state
identifier_body
_train_bot_with_prepared.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import tradebot as tb import numpy as np import copy import progressbar import pickle import klepto def prepare_bbox(): global n_features, n_actions, max_time # Reset environment to the initial state, just in case if bbox.is_level_loaded(): bbox.reset_level() def calc_best_action_using_checkpoint(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def get_best_action(action_range=4): # Pretty straightforward — we create a checkpoint and get it's ID checkpoint_id = bbox.create_checkpoint() best_action = -1 best_score = -1e9 for action in range(n_actions): for _ in range(action_range): # random.randint(1,100) bbox.do_action(action) if bbox.get_score() > best_score: best_score = bbox.get_score() best_action = action bbox.load_from_checkpoint(checkpoint_id) bbox.clear_all_checkpoints() return best_action def trai
ibatch): old_state_s = np.array([row[0] for row in minibatch]) action_s = np.array([row[1] for row in minibatch]) reward_s = np.array([row[2] for row in minibatch]) new_state_s = np.array([row[3] for row in minibatch]) old_qwal_s = model.predict(old_state_s, batch_size=32) newQ_s = model.predict(new_state_s, batch_size=32) maxQ_s = np.max(newQ_s, axis=1) y = old_qwal_s update_s = reward_s + gamma * maxQ_s for i in range(len(action_s)): y[i, action_s[i]] = update_s[i] model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0) return def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32, replay_memory_size=100000, load_weights=False, save_weights=False): global pgi has_next = 1 global actions global bbox # Prepare environment - load the game level prepare_bbox() update_frequency_cntr = 0 h = 0 if load_weights: model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') # stores tuples of (S, A, R, S') while has_next: # Get current environment state pgi += 1 if pgi % print_step == 0: bar.update(pgi) # state = copy.copy(bbox.get_state()) state = bbox.get_state() train_states_logs.append((state.flatten().tolist())[0:-4]) prev_reward = copy.copy(bbox.get_score()) # Run the Q function on S to get predicted reward values on all the possible actions qval = model.predict(state.reshape(1, n_features), batch_size=1) train_qval.append(qval) action = (np.argmax(qval)) actions[action] += 1 # Choose an action to perform at current step if random.random() < epsilon: # choose random action or best action action = np.random.randint(0, n_actions) # assumes 4 different actions else: # choose best action from Q(s,a) values action = (np.argmax(qval)) # Perform chosen action, observe new state S' # Function do_action(action) returns False if level is finished, otherwise returns True. for a in range(action_repeat): has_next = bbox.do_action(action) new_state = copy.copy(bbox.get_state()) reward = copy.copy(bbox.get_score()) - prev_reward #if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных if True: # в запоминаем все успешные действия и только 20% нейспешных if (len(replay) < replay_memory_size): # if buffer not filled, add to it replay.append((state, action, reward, new_state)) else: # if buffer full, overwrite old values h=np.random.randint(0,replay_memory_size-1) replay[h] = (state, action, reward, new_state) # randomly sample our experience replay memory # minibatch = random.sample(replay, batchSize) minibatch = random.sample(replay, sample_fit_size) train_minibatch(minibatch=minibatch) if update_frequency_cntr >= update_frequency: prim_weights = model_prim.get_weights() model.set_weights(prim_weights) update_frequency_cntr = 0 update_frequency_cntr += 1 # step_times.append(time.time()-st) # Finish the game simulation, print earned reward and save weights if save_weights: model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True) bbox.finish(verbose=0) from keras.models import Sequential from keras.models import model_from_json from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.regularizers import l1l2,activity_l1l2 import random random.seed(6) n_features = n_actions = max_time = -1 days_to_train =-1 first_run =False resumple=False replay_file = u'e:\\trade_data\\HistoryData\\replay.klp' #bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot' #u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot' d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True) d.load("bbox") bbox = d["bbox"] del d if days_to_train != -1: bbox.set_sample_days(days_to_train) exploration_epochs = 1 learning_epochs =1 gamma = 0.8 # a high gamma makes a long term reward more valuable epsilon=0.1 action_repeat = 3 # repeat each action this many times // было 4 update_frequency = 50 # the number of time steps between each Q-net update batchSize = 32 # параметр для обучения сети l1_reg=0.05 l2_reg=0.00001 #replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера replay_memory_size=200000 print('replay_memory_size ', replay_memory_size) sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера print_step = 10 n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп print('n_features=', n_features) n_actions = bbox.get_num_of_actions() max_time = bbox.get_max_time() model = Sequential() model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,))) model.add(Activation('relu')) model.add(Dense(1600, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg,l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(800, init='lecun_uniform', W_regularizer=l1l2(l1=l1_reg, l2=l2_reg) )) # a 10 neuron network gives better than random result model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(n_actions, init='lecun_uniform')) model.add(Activation('linear')) # linear output so we can have range of real-valued outputs rms = RMSprop(lr=0.00025) # 0.00025 model.compile(loss='mse', optimizer=rms) json_string = model.to_json() root = u'e:\\trade_data\\HistoryData\\' open(root + 'my_model_architecture.json', 'w').write(json_string) model_prim = model_from_json(open(root + 'my_model_architecture.json').read()) model_prim.compile(loss='mse', optimizer=rms) r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True) if not first_run: # "загружаем веса, если запуск не первый" model.load_weights(root + 'my_model_weights.h5') model_prim.load_weights(root + 'my_model_weights.h5') r.load("replay") replay = r['replay'] else: replay = [] r['replay'] = replay load_weights = False replay = [] #r['replay'] = replay pgi = 0 total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) bar.start() #текстовые логи train_states_logs=[] train_qval=[] test_states_logs=[] test_qval=[] for i in range(exploration_epochs): print("exploration ", i, " of ", exploration_epochs) epsilon_t=1.0 actions = np.array([0, 0, 0]) run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if epsilon_t > 0.1: epsilon_t -= (1.0 / exploration_epochs) # потихоньку увеличиваем вероятность использования знаний if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps) r.dump() for i in range(learning_epochs): actions = np.array([0, 0, 0]) print("learning ", i, " of ", learning_epochs) epsilon = 0.1 run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat, update_frequency=update_frequency, sample_fit_size=sample_fit_size, replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True) print("score: ", np.round(bbox.get_score()), actions) if resumple: bbox.set_sample_days(days_to_train) total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat)) bar = progressbar.ProgressBar(maxval=total_steps + 100) r.dump() def test_strategy(n=4, resample=True, action_repeat=6): results = [] for i in range(n): if resample: random.seed(1 + i) bbox.set_sample_days(days_to_train) bbox.reset_level() has_next = True actions = np.array([0, 0, 0]) while has_next: state = bbox.get_state() qval = model.predict(state.reshape(1, n_features), batch_size=1) action = (np.argmax(qval)) actions[action] += 1 for a in range(action_repeat): has_next = bbox.do_action(action) bbox.finish(verbose=0) print(" test ", i, " score: ", bbox.get_score(), actions) return results print ('тест на тренировочных данных') test_times = 1 results = test_strategy(test_times, action_repeat=action_repeat, resample=False) with open('test_states.txt', "w") as file: for row in test_states_logs: file.write(str(list(row)) + '\n') file.flush() file.close() print (train_states_logs==test_states_logs)
n_minibatch(min
identifier_name
gitstore.go
package gitstore import ( "bytes" "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "github.com/metakeule/gitlib" "github.com/metakeule/zoom" // "gopkg.in/vmihailenco/msgpack.v1" ) /* TODO implement sharding, i.e. add a layer on top, fullfilling the zoom.Store interface and saving on the correspondig shard. (and add synchronization) TODO check transactions with indices!!! */ func FileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { return false } } return true } type Git struct { *gitlib.Git shard string } func Open(baseDir string, shard string) (g Git, err error) { // fmt.Println("opening") //gitBase := filepath.Join(baseDir, ".git") gitBase := baseDir // ignoring error because gitBase might already exist // println("creating " + gitBase) // os.Mkdir(gitBase, 0755) var git *gitlib.Git git, err = gitlib.NewGit(gitBase) if err != nil { return } if !git.IsInitialized() { // fmt.Println("initializing") err = git.Transaction(func(tx *gitlib.Transaction) error { // we got problems with rm --cached and ls-files therefor we prefer to not // use bare repositories for now if err := tx.Init(); err != nil { return err } /* sha1, err := tx.WriteHashObject(strings.NewReader("index\nblob\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, ".gitignore") if err != nil { return err } */ sha1, err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, "README") if err != nil { return err } sha1, err = tx.WriteTree() if err != nil { return err } sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README")) if err != nil { return err } return tx.UpdateHeadsRef("master", sha1) }) if err != nil { return } } g = Git{Git: git, shard: shard} return } func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) { return g.Git.Transaction(func(tx *gitlib.Transaction) error { var store zoom.Store = &Store{tx, g.shard} return zoom.NewTransaction(store, msg, action) }) } type Store struct { *gitlib.Transaction shard string } // map relname => nodeUuid, only the texts that have a key set are going to be changed func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error { for textPath, text := range texts { path := s.textPath(uuid, textPath) known, err := s.IsFileKnown(path) if err != nil { return err } rd := strings.NewReader(text) sha1, err2 := s.Transaction.WriteHashObject(rd) if err2 != nil { return err2 } if known { err = s.Transaction.UpdateIndexCache(sha1, path) } else { err = s.Transaction.AddIndexCache(sha1, path) } if err != nil { return err } } return nil } func (s *Store) saveBlobToFile(path string, blob io.Reader) error { dir := filepath.Dir(path) os.MkdirAll(dir, 0755) file, err := os.Create(path) if err != nil { return err } defer file.Close() // make a buffer to keep chunks that are read buf := make([]byte, 1024) for { // read a chunk n, err := blob.Read(buf) if err != nil && err != io.EOF { return err } if n == 0 { break } // write a chunk if _, err := file.Write(buf[:n]); err != nil { return err } } return nil } /* // map poolname => []nodeUuid, only the blobs that have a key set are going to be changed func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error { for blobPath, blob := range blobs { path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath)) if err := s.saveBlobToFile(path, blob); err != nil { return err } } return nil } */ /* func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) return s.saveBlobToFile(path, rd) } */ /* */ /* func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(blobPath, file) } return nil } */ /* func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(file) } return nil } */ // GetNodeBlobs calls fn for each existing blob in requestedBlobs /* func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error { for _, blob := range requestedBlobs { err := s.callwithBlob(uuid, blob, fn) if err != nil { return err } } return nil } */ func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) { texts = map[string]string{} var known bool for _, text := range requestedTexts { known, err = s.IsFileKnown(s.textPath(uuid, text)) if err != nil { return } // fmt.Printf("file %s is known: %v\n", text, known) if known { var buf bytes.Buffer err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf) if err != nil { return } texts[text] = buf.String() } } return } func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error { path := s.edgePath(category, uuid) known, err := s.IsFileKnown(path) if err != nil { return err } return s.save(path, !known, edges) } // RemoveEdges also removes the properties node of an edge // Is the edges file is already removed, no error should be returned func (s *Store) RemoveEdges(category, uuid string) error { edges, err := s.GetEdges(category, uuid) if err != nil { return err } for _, propID := range edges { if err := s.RemoveNode(propID); err != nil { return err } } path := s.edgePath(category, uuid) return s.RemoveIndex(path) } // if there is no edge file for the given category, no error is returned, but empty edges map func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) { path := s.edgePath(category, uuid) edges = map[string]string{} known, err := s.IsFileKnown(path) if err != nil { return edges, err } if !known { return edges, nil } err = s.load(path, &edges) return edges, err } func (s *Store) edgePath(category string, uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:]) } func (s *Store) propPath(uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:]) } func (s *Store) textPath(uuid string, key string) string { // return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key) } func (s *Store) BlobPath(uuid string, blobpath string) string { return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath) } func (g *Store) Commit(msg zoom.CommitMessage) error { // fmt.Println("commit from store " + comment) treeSha, err := g.Transaction.WriteTree() if err != nil { return err } var parent string parent, err = g.ShowHeadsRef("master") // fmt.Println("parent commit is: " + parent) if err != nil { return err } var commitSha string commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String())) if err != nil { return err } return g.UpdateHeadsRef("master", commitSha) } func (g *Store) save(path string, isNew bool, data interface{}) error { // fmt.Printf("storing: %#v in %#v\n", data, path) var buf bytes.Buffer // enc := msgpack.NewEncoder(&buf) enc := json.NewEncoder(&buf) err := enc.Encode(data) if err != nil { return err } // fmt.Println("result", buf.String()) var sha1 string sha1, err = g.Transaction.WriteHashObject(&buf) if err != nil { return err } if isNew { err = g.Transaction.AddIndexCache(sha1, path) } else { err = g.Transaction.UpdateIndexCache(sha1, path) } if err != nil { return err } return nil } func (g *Store) load(path string, data interface{}) error { // fmt.Println("loading from ", path) var buf bytes.Buffer err := g.Transaction.ReadCatHeadFile(path, &buf) if err != nil { fmt.Println(err) return err } // fmt.Println("reading", buf.String()) //dec := msgpack.NewDecoder(&buf) dec := json.NewDecoder(&buf) return dec.Decode(data) } // only the props that have a key set are going to be changed // if no node properties file does exist, no error should be returned and s.save(....isNew) should be used func (g *Store) SaveNodeProperties(uuid string, props map[string]interface{}) error { path := g.propPath(uuid) known, err := g.IsFileKnown(path) if err != nil { return err } if known { orig := map[string]interface{}{} err := g.load(path, &orig) if err != nil { return err } for k, v := range props { if v == nil { delete(orig, k) } else { orig[k] = v } } props = orig } return g.save(path, !known, props) } // TODO Rollback any actions that have been taken since the last commit // stage should be cleared and any newly added data should be removed // maybe a cleanup command should remove the orphaned sha1s (git gc maybe??) func (g *Store) Rollback() error { return g.ResetToHeadAll() } // TODO what happens on errors? changes will not be committed! // TODO what about the edges? must delete them all (and identify them all) // to identify them we need something like // ls refs/*/shard/uuid[:2], uuid[2:] // return fmt.Sprintf("refs/%s/%s/%s/%s", category, shard, uuid[:2], uuid[2:]) // if any file does not exist, no error should be returned func (g *Store) RemoveNode(uuid string) error
// only the properties that exist make it into the returned map // it is no error if a requested property does not exist for a node // the caller has to check the returned map against the requested props if // she wants to check, if all requested properties have been returned // if the node properties file is not there no error should be returned func (g *Store) GetNodeProperties(uuid string, requestedProps []string) (props map[string]interface{}, err error) { path := g.propPath(uuid) orig := map[string]interface{}{} err = g.load(path, &orig) if err != nil { return nil, err } props = map[string]interface{}{} for _, req := range requestedProps { v, has := orig[req] if has { props[req] = v } } return } func (g *Store) Shard() string { return g.shard }
{ // fmt.Printf("trying to remove node: uuid %#v shard %#v\n", uuid, shard) // fmt.Println("proppath is ", g.propPath(uuid)) paths := []string{ fmt.Sprintf("text/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), fmt.Sprintf("blob/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), } files, err := g.LsFiles(fmt.Sprintf("refs/*/%s/%s/%s", g.shard, uuid[:2], uuid[2:])) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } for _, path := range paths { files, err := g.LsFiles(fmt.Sprintf("%s/*", path)) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { // fmt.Printf("trying to remove file: %#v\n", file) known, err := g.IsFileKnown(file) if err != nil { // fmt.Printf("can't remove index: %#v\n", file) return err } if known { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } } } err = g.Transaction.RemoveIndex(g.propPath(uuid)) if err != nil { return err } return nil }
identifier_body
gitstore.go
package gitstore import ( "bytes" "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "github.com/metakeule/gitlib" "github.com/metakeule/zoom" // "gopkg.in/vmihailenco/msgpack.v1" ) /* TODO implement sharding, i.e. add a layer on top, fullfilling the zoom.Store interface and saving on the correspondig shard. (and add synchronization) TODO check transactions with indices!!! */ func FileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { return false } } return true } type Git struct { *gitlib.Git shard string } func Open(baseDir string, shard string) (g Git, err error) { // fmt.Println("opening") //gitBase := filepath.Join(baseDir, ".git") gitBase := baseDir // ignoring error because gitBase might already exist // println("creating " + gitBase) // os.Mkdir(gitBase, 0755) var git *gitlib.Git git, err = gitlib.NewGit(gitBase) if err != nil { return } if !git.IsInitialized() { // fmt.Println("initializing") err = git.Transaction(func(tx *gitlib.Transaction) error { // we got problems with rm --cached and ls-files therefor we prefer to not // use bare repositories for now if err := tx.Init(); err != nil { return err } /* sha1, err := tx.WriteHashObject(strings.NewReader("index\nblob\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, ".gitignore") if err != nil { return err } */ sha1, err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, "README") if err != nil { return err } sha1, err = tx.WriteTree() if err != nil { return err } sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README")) if err != nil { return err } return tx.UpdateHeadsRef("master", sha1) }) if err != nil { return } } g = Git{Git: git, shard: shard} return } func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) { return g.Git.Transaction(func(tx *gitlib.Transaction) error { var store zoom.Store = &Store{tx, g.shard} return zoom.NewTransaction(store, msg, action) }) } type Store struct { *gitlib.Transaction shard string } // map relname => nodeUuid, only the texts that have a key set are going to be changed func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error { for textPath, text := range texts { path := s.textPath(uuid, textPath) known, err := s.IsFileKnown(path) if err != nil { return err } rd := strings.NewReader(text) sha1, err2 := s.Transaction.WriteHashObject(rd) if err2 != nil { return err2 } if known { err = s.Transaction.UpdateIndexCache(sha1, path) } else { err = s.Transaction.AddIndexCache(sha1, path) } if err != nil { return err } } return nil } func (s *Store) saveBlobToFile(path string, blob io.Reader) error { dir := filepath.Dir(path) os.MkdirAll(dir, 0755) file, err := os.Create(path) if err != nil { return err } defer file.Close() // make a buffer to keep chunks that are read buf := make([]byte, 1024) for { // read a chunk n, err := blob.Read(buf) if err != nil && err != io.EOF { return err } if n == 0 { break } // write a chunk if _, err := file.Write(buf[:n]); err != nil { return err } } return nil } /* // map poolname => []nodeUuid, only the blobs that have a key set are going to be changed func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error { for blobPath, blob := range blobs { path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath)) if err := s.saveBlobToFile(path, blob); err != nil { return err } } return nil } */ /* func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) return s.saveBlobToFile(path, rd) } */ /* */ /* func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err
return fn(blobPath, file) } return nil } */ /* func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(file) } return nil } */ // GetNodeBlobs calls fn for each existing blob in requestedBlobs /* func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error { for _, blob := range requestedBlobs { err := s.callwithBlob(uuid, blob, fn) if err != nil { return err } } return nil } */ func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) { texts = map[string]string{} var known bool for _, text := range requestedTexts { known, err = s.IsFileKnown(s.textPath(uuid, text)) if err != nil { return } // fmt.Printf("file %s is known: %v\n", text, known) if known { var buf bytes.Buffer err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf) if err != nil { return } texts[text] = buf.String() } } return } func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error { path := s.edgePath(category, uuid) known, err := s.IsFileKnown(path) if err != nil { return err } return s.save(path, !known, edges) } // RemoveEdges also removes the properties node of an edge // Is the edges file is already removed, no error should be returned func (s *Store) RemoveEdges(category, uuid string) error { edges, err := s.GetEdges(category, uuid) if err != nil { return err } for _, propID := range edges { if err := s.RemoveNode(propID); err != nil { return err } } path := s.edgePath(category, uuid) return s.RemoveIndex(path) } // if there is no edge file for the given category, no error is returned, but empty edges map func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) { path := s.edgePath(category, uuid) edges = map[string]string{} known, err := s.IsFileKnown(path) if err != nil { return edges, err } if !known { return edges, nil } err = s.load(path, &edges) return edges, err } func (s *Store) edgePath(category string, uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:]) } func (s *Store) propPath(uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:]) } func (s *Store) textPath(uuid string, key string) string { // return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key) } func (s *Store) BlobPath(uuid string, blobpath string) string { return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath) } func (g *Store) Commit(msg zoom.CommitMessage) error { // fmt.Println("commit from store " + comment) treeSha, err := g.Transaction.WriteTree() if err != nil { return err } var parent string parent, err = g.ShowHeadsRef("master") // fmt.Println("parent commit is: " + parent) if err != nil { return err } var commitSha string commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String())) if err != nil { return err } return g.UpdateHeadsRef("master", commitSha) } func (g *Store) save(path string, isNew bool, data interface{}) error { // fmt.Printf("storing: %#v in %#v\n", data, path) var buf bytes.Buffer // enc := msgpack.NewEncoder(&buf) enc := json.NewEncoder(&buf) err := enc.Encode(data) if err != nil { return err } // fmt.Println("result", buf.String()) var sha1 string sha1, err = g.Transaction.WriteHashObject(&buf) if err != nil { return err } if isNew { err = g.Transaction.AddIndexCache(sha1, path) } else { err = g.Transaction.UpdateIndexCache(sha1, path) } if err != nil { return err } return nil } func (g *Store) load(path string, data interface{}) error { // fmt.Println("loading from ", path) var buf bytes.Buffer err := g.Transaction.ReadCatHeadFile(path, &buf) if err != nil { fmt.Println(err) return err } // fmt.Println("reading", buf.String()) //dec := msgpack.NewDecoder(&buf) dec := json.NewDecoder(&buf) return dec.Decode(data) } // only the props that have a key set are going to be changed // if no node properties file does exist, no error should be returned and s.save(....isNew) should be used func (g *Store) SaveNodeProperties(uuid string, props map[string]interface{}) error { path := g.propPath(uuid) known, err := g.IsFileKnown(path) if err != nil { return err } if known { orig := map[string]interface{}{} err := g.load(path, &orig) if err != nil { return err } for k, v := range props { if v == nil { delete(orig, k) } else { orig[k] = v } } props = orig } return g.save(path, !known, props) } // TODO Rollback any actions that have been taken since the last commit // stage should be cleared and any newly added data should be removed // maybe a cleanup command should remove the orphaned sha1s (git gc maybe??) func (g *Store) Rollback() error { return g.ResetToHeadAll() } // TODO what happens on errors? changes will not be committed! // TODO what about the edges? must delete them all (and identify them all) // to identify them we need something like // ls refs/*/shard/uuid[:2], uuid[2:] // return fmt.Sprintf("refs/%s/%s/%s/%s", category, shard, uuid[:2], uuid[2:]) // if any file does not exist, no error should be returned func (g *Store) RemoveNode(uuid string) error { // fmt.Printf("trying to remove node: uuid %#v shard %#v\n", uuid, shard) // fmt.Println("proppath is ", g.propPath(uuid)) paths := []string{ fmt.Sprintf("text/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), fmt.Sprintf("blob/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), } files, err := g.LsFiles(fmt.Sprintf("refs/*/%s/%s/%s", g.shard, uuid[:2], uuid[2:])) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } for _, path := range paths { files, err := g.LsFiles(fmt.Sprintf("%s/*", path)) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { // fmt.Printf("trying to remove file: %#v\n", file) known, err := g.IsFileKnown(file) if err != nil { // fmt.Printf("can't remove index: %#v\n", file) return err } if known { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } } } err = g.Transaction.RemoveIndex(g.propPath(uuid)) if err != nil { return err } return nil } // only the properties that exist make it into the returned map // it is no error if a requested property does not exist for a node // the caller has to check the returned map against the requested props if // she wants to check, if all requested properties have been returned // if the node properties file is not there no error should be returned func (g *Store) GetNodeProperties(uuid string, requestedProps []string) (props map[string]interface{}, err error) { path := g.propPath(uuid) orig := map[string]interface{}{} err = g.load(path, &orig) if err != nil { return nil, err } props = map[string]interface{}{} for _, req := range requestedProps { v, has := orig[req] if has { props[req] = v } } return } func (g *Store) Shard() string { return g.shard }
} defer file.Close()
random_line_split
gitstore.go
package gitstore import ( "bytes" "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "github.com/metakeule/gitlib" "github.com/metakeule/zoom" // "gopkg.in/vmihailenco/msgpack.v1" ) /* TODO implement sharding, i.e. add a layer on top, fullfilling the zoom.Store interface and saving on the correspondig shard. (and add synchronization) TODO check transactions with indices!!! */ func FileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { return false } } return true } type Git struct { *gitlib.Git shard string } func Open(baseDir string, shard string) (g Git, err error) { // fmt.Println("opening") //gitBase := filepath.Join(baseDir, ".git") gitBase := baseDir // ignoring error because gitBase might already exist // println("creating " + gitBase) // os.Mkdir(gitBase, 0755) var git *gitlib.Git git, err = gitlib.NewGit(gitBase) if err != nil { return } if !git.IsInitialized() { // fmt.Println("initializing") err = git.Transaction(func(tx *gitlib.Transaction) error { // we got problems with rm --cached and ls-files therefor we prefer to not // use bare repositories for now if err := tx.Init(); err != nil { return err } /* sha1, err := tx.WriteHashObject(strings.NewReader("index\nblob\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, ".gitignore") if err != nil { return err } */ sha1, err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, "README") if err != nil { return err } sha1, err = tx.WriteTree() if err != nil { return err } sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README")) if err != nil { return err } return tx.UpdateHeadsRef("master", sha1) }) if err != nil { return } } g = Git{Git: git, shard: shard} return } func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) { return g.Git.Transaction(func(tx *gitlib.Transaction) error { var store zoom.Store = &Store{tx, g.shard} return zoom.NewTransaction(store, msg, action) }) } type Store struct { *gitlib.Transaction shard string } // map relname => nodeUuid, only the texts that have a key set are going to be changed func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error { for textPath, text := range texts { path := s.textPath(uuid, textPath) known, err := s.IsFileKnown(path) if err != nil { return err } rd := strings.NewReader(text) sha1, err2 := s.Transaction.WriteHashObject(rd) if err2 != nil { return err2 } if known { err = s.Transaction.UpdateIndexCache(sha1, path) } else { err = s.Transaction.AddIndexCache(sha1, path) } if err != nil { return err } } return nil } func (s *Store) saveBlobToFile(path string, blob io.Reader) error { dir := filepath.Dir(path) os.MkdirAll(dir, 0755) file, err := os.Create(path) if err != nil { return err } defer file.Close() // make a buffer to keep chunks that are read buf := make([]byte, 1024) for { // read a chunk n, err := blob.Read(buf) if err != nil && err != io.EOF { return err } if n == 0 { break } // write a chunk if _, err := file.Write(buf[:n]); err != nil { return err } } return nil } /* // map poolname => []nodeUuid, only the blobs that have a key set are going to be changed func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error { for blobPath, blob := range blobs { path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath)) if err := s.saveBlobToFile(path, blob); err != nil { return err } } return nil } */ /* func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) return s.saveBlobToFile(path, rd) } */ /* */ /* func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(blobPath, file) } return nil } */ /* func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(file) } return nil } */ // GetNodeBlobs calls fn for each existing blob in requestedBlobs /* func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error { for _, blob := range requestedBlobs { err := s.callwithBlob(uuid, blob, fn) if err != nil { return err } } return nil } */ func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) { texts = map[string]string{} var known bool for _, text := range requestedTexts { known, err = s.IsFileKnown(s.textPath(uuid, text)) if err != nil { return } // fmt.Printf("file %s is known: %v\n", text, known) if known { var buf bytes.Buffer err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf) if err != nil { return } texts[text] = buf.String() } } return } func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error { path := s.edgePath(category, uuid) known, err := s.IsFileKnown(path) if err != nil { return err } return s.save(path, !known, edges) } // RemoveEdges also removes the properties node of an edge // Is the edges file is already removed, no error should be returned func (s *Store) RemoveEdges(category, uuid string) error { edges, err := s.GetEdges(category, uuid) if err != nil { return err } for _, propID := range edges { if err := s.RemoveNode(propID); err != nil { return err } } path := s.edgePath(category, uuid) return s.RemoveIndex(path) } // if there is no edge file for the given category, no error is returned, but empty edges map func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) { path := s.edgePath(category, uuid) edges = map[string]string{} known, err := s.IsFileKnown(path) if err != nil { return edges, err } if !known { return edges, nil } err = s.load(path, &edges) return edges, err } func (s *Store) edgePath(category string, uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:]) } func (s *Store) propPath(uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:]) } func (s *Store) textPath(uuid string, key string) string { // return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key) } func (s *Store) BlobPath(uuid string, blobpath string) string { return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath) } func (g *Store) Commit(msg zoom.CommitMessage) error { // fmt.Println("commit from store " + comment) treeSha, err := g.Transaction.WriteTree() if err != nil { return err } var parent string parent, err = g.ShowHeadsRef("master") // fmt.Println("parent commit is: " + parent) if err != nil { return err } var commitSha string commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String())) if err != nil { return err } return g.UpdateHeadsRef("master", commitSha) } func (g *Store) save(path string, isNew bool, data interface{}) error { // fmt.Printf("storing: %#v in %#v\n", data, path) var buf bytes.Buffer // enc := msgpack.NewEncoder(&buf) enc := json.NewEncoder(&buf) err := enc.Encode(data) if err != nil
// fmt.Println("result", buf.String()) var sha1 string sha1, err = g.Transaction.WriteHashObject(&buf) if err != nil { return err } if isNew { err = g.Transaction.AddIndexCache(sha1, path) } else { err = g.Transaction.UpdateIndexCache(sha1, path) } if err != nil { return err } return nil } func (g *Store) load(path string, data interface{}) error { // fmt.Println("loading from ", path) var buf bytes.Buffer err := g.Transaction.ReadCatHeadFile(path, &buf) if err != nil { fmt.Println(err) return err } // fmt.Println("reading", buf.String()) //dec := msgpack.NewDecoder(&buf) dec := json.NewDecoder(&buf) return dec.Decode(data) } // only the props that have a key set are going to be changed // if no node properties file does exist, no error should be returned and s.save(....isNew) should be used func (g *Store) SaveNodeProperties(uuid string, props map[string]interface{}) error { path := g.propPath(uuid) known, err := g.IsFileKnown(path) if err != nil { return err } if known { orig := map[string]interface{}{} err := g.load(path, &orig) if err != nil { return err } for k, v := range props { if v == nil { delete(orig, k) } else { orig[k] = v } } props = orig } return g.save(path, !known, props) } // TODO Rollback any actions that have been taken since the last commit // stage should be cleared and any newly added data should be removed // maybe a cleanup command should remove the orphaned sha1s (git gc maybe??) func (g *Store) Rollback() error { return g.ResetToHeadAll() } // TODO what happens on errors? changes will not be committed! // TODO what about the edges? must delete them all (and identify them all) // to identify them we need something like // ls refs/*/shard/uuid[:2], uuid[2:] // return fmt.Sprintf("refs/%s/%s/%s/%s", category, shard, uuid[:2], uuid[2:]) // if any file does not exist, no error should be returned func (g *Store) RemoveNode(uuid string) error { // fmt.Printf("trying to remove node: uuid %#v shard %#v\n", uuid, shard) // fmt.Println("proppath is ", g.propPath(uuid)) paths := []string{ fmt.Sprintf("text/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), fmt.Sprintf("blob/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), } files, err := g.LsFiles(fmt.Sprintf("refs/*/%s/%s/%s", g.shard, uuid[:2], uuid[2:])) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } for _, path := range paths { files, err := g.LsFiles(fmt.Sprintf("%s/*", path)) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { // fmt.Printf("trying to remove file: %#v\n", file) known, err := g.IsFileKnown(file) if err != nil { // fmt.Printf("can't remove index: %#v\n", file) return err } if known { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } } } err = g.Transaction.RemoveIndex(g.propPath(uuid)) if err != nil { return err } return nil } // only the properties that exist make it into the returned map // it is no error if a requested property does not exist for a node // the caller has to check the returned map against the requested props if // she wants to check, if all requested properties have been returned // if the node properties file is not there no error should be returned func (g *Store) GetNodeProperties(uuid string, requestedProps []string) (props map[string]interface{}, err error) { path := g.propPath(uuid) orig := map[string]interface{}{} err = g.load(path, &orig) if err != nil { return nil, err } props = map[string]interface{}{} for _, req := range requestedProps { v, has := orig[req] if has { props[req] = v } } return } func (g *Store) Shard() string { return g.shard }
{ return err }
conditional_block
gitstore.go
package gitstore import ( "bytes" "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "github.com/metakeule/gitlib" "github.com/metakeule/zoom" // "gopkg.in/vmihailenco/msgpack.v1" ) /* TODO implement sharding, i.e. add a layer on top, fullfilling the zoom.Store interface and saving on the correspondig shard. (and add synchronization) TODO check transactions with indices!!! */ func
(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { return false } } return true } type Git struct { *gitlib.Git shard string } func Open(baseDir string, shard string) (g Git, err error) { // fmt.Println("opening") //gitBase := filepath.Join(baseDir, ".git") gitBase := baseDir // ignoring error because gitBase might already exist // println("creating " + gitBase) // os.Mkdir(gitBase, 0755) var git *gitlib.Git git, err = gitlib.NewGit(gitBase) if err != nil { return } if !git.IsInitialized() { // fmt.Println("initializing") err = git.Transaction(func(tx *gitlib.Transaction) error { // we got problems with rm --cached and ls-files therefor we prefer to not // use bare repositories for now if err := tx.Init(); err != nil { return err } /* sha1, err := tx.WriteHashObject(strings.NewReader("index\nblob\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, ".gitignore") if err != nil { return err } */ sha1, err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n")) if err != nil { return err } err = tx.AddIndexCache(sha1, "README") if err != nil { return err } sha1, err = tx.WriteTree() if err != nil { return err } sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README")) if err != nil { return err } return tx.UpdateHeadsRef("master", sha1) }) if err != nil { return } } g = Git{Git: git, shard: shard} return } func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) { return g.Git.Transaction(func(tx *gitlib.Transaction) error { var store zoom.Store = &Store{tx, g.shard} return zoom.NewTransaction(store, msg, action) }) } type Store struct { *gitlib.Transaction shard string } // map relname => nodeUuid, only the texts that have a key set are going to be changed func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error { for textPath, text := range texts { path := s.textPath(uuid, textPath) known, err := s.IsFileKnown(path) if err != nil { return err } rd := strings.NewReader(text) sha1, err2 := s.Transaction.WriteHashObject(rd) if err2 != nil { return err2 } if known { err = s.Transaction.UpdateIndexCache(sha1, path) } else { err = s.Transaction.AddIndexCache(sha1, path) } if err != nil { return err } } return nil } func (s *Store) saveBlobToFile(path string, blob io.Reader) error { dir := filepath.Dir(path) os.MkdirAll(dir, 0755) file, err := os.Create(path) if err != nil { return err } defer file.Close() // make a buffer to keep chunks that are read buf := make([]byte, 1024) for { // read a chunk n, err := blob.Read(buf) if err != nil && err != io.EOF { return err } if n == 0 { break } // write a chunk if _, err := file.Write(buf[:n]); err != nil { return err } } return nil } /* // map poolname => []nodeUuid, only the blobs that have a key set are going to be changed func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error { for blobPath, blob := range blobs { path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath)) if err := s.saveBlobToFile(path, blob); err != nil { return err } } return nil } */ /* func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) return s.saveBlobToFile(path, rd) } */ /* */ /* func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(blobPath, file) } return nil } */ /* func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error { path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath)) if FileExists(path) { file, err := os.Open(path) if err != nil { return err } defer file.Close() return fn(file) } return nil } */ // GetNodeBlobs calls fn for each existing blob in requestedBlobs /* func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error { for _, blob := range requestedBlobs { err := s.callwithBlob(uuid, blob, fn) if err != nil { return err } } return nil } */ func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) { texts = map[string]string{} var known bool for _, text := range requestedTexts { known, err = s.IsFileKnown(s.textPath(uuid, text)) if err != nil { return } // fmt.Printf("file %s is known: %v\n", text, known) if known { var buf bytes.Buffer err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf) if err != nil { return } texts[text] = buf.String() } } return } func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error { path := s.edgePath(category, uuid) known, err := s.IsFileKnown(path) if err != nil { return err } return s.save(path, !known, edges) } // RemoveEdges also removes the properties node of an edge // Is the edges file is already removed, no error should be returned func (s *Store) RemoveEdges(category, uuid string) error { edges, err := s.GetEdges(category, uuid) if err != nil { return err } for _, propID := range edges { if err := s.RemoveNode(propID); err != nil { return err } } path := s.edgePath(category, uuid) return s.RemoveIndex(path) } // if there is no edge file for the given category, no error is returned, but empty edges map func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) { path := s.edgePath(category, uuid) edges = map[string]string{} known, err := s.IsFileKnown(path) if err != nil { return edges, err } if !known { return edges, nil } err = s.load(path, &edges) return edges, err } func (s *Store) edgePath(category string, uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:]) } func (s *Store) propPath(uuid string) string { //return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:]) } func (s *Store) textPath(uuid string, key string) string { // return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:]) return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key) } func (s *Store) BlobPath(uuid string, blobpath string) string { return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath) } func (g *Store) Commit(msg zoom.CommitMessage) error { // fmt.Println("commit from store " + comment) treeSha, err := g.Transaction.WriteTree() if err != nil { return err } var parent string parent, err = g.ShowHeadsRef("master") // fmt.Println("parent commit is: " + parent) if err != nil { return err } var commitSha string commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String())) if err != nil { return err } return g.UpdateHeadsRef("master", commitSha) } func (g *Store) save(path string, isNew bool, data interface{}) error { // fmt.Printf("storing: %#v in %#v\n", data, path) var buf bytes.Buffer // enc := msgpack.NewEncoder(&buf) enc := json.NewEncoder(&buf) err := enc.Encode(data) if err != nil { return err } // fmt.Println("result", buf.String()) var sha1 string sha1, err = g.Transaction.WriteHashObject(&buf) if err != nil { return err } if isNew { err = g.Transaction.AddIndexCache(sha1, path) } else { err = g.Transaction.UpdateIndexCache(sha1, path) } if err != nil { return err } return nil } func (g *Store) load(path string, data interface{}) error { // fmt.Println("loading from ", path) var buf bytes.Buffer err := g.Transaction.ReadCatHeadFile(path, &buf) if err != nil { fmt.Println(err) return err } // fmt.Println("reading", buf.String()) //dec := msgpack.NewDecoder(&buf) dec := json.NewDecoder(&buf) return dec.Decode(data) } // only the props that have a key set are going to be changed // if no node properties file does exist, no error should be returned and s.save(....isNew) should be used func (g *Store) SaveNodeProperties(uuid string, props map[string]interface{}) error { path := g.propPath(uuid) known, err := g.IsFileKnown(path) if err != nil { return err } if known { orig := map[string]interface{}{} err := g.load(path, &orig) if err != nil { return err } for k, v := range props { if v == nil { delete(orig, k) } else { orig[k] = v } } props = orig } return g.save(path, !known, props) } // TODO Rollback any actions that have been taken since the last commit // stage should be cleared and any newly added data should be removed // maybe a cleanup command should remove the orphaned sha1s (git gc maybe??) func (g *Store) Rollback() error { return g.ResetToHeadAll() } // TODO what happens on errors? changes will not be committed! // TODO what about the edges? must delete them all (and identify them all) // to identify them we need something like // ls refs/*/shard/uuid[:2], uuid[2:] // return fmt.Sprintf("refs/%s/%s/%s/%s", category, shard, uuid[:2], uuid[2:]) // if any file does not exist, no error should be returned func (g *Store) RemoveNode(uuid string) error { // fmt.Printf("trying to remove node: uuid %#v shard %#v\n", uuid, shard) // fmt.Println("proppath is ", g.propPath(uuid)) paths := []string{ fmt.Sprintf("text/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), fmt.Sprintf("blob/%s/%s/%s", g.shard, uuid[:2], uuid[2:]), } files, err := g.LsFiles(fmt.Sprintf("refs/*/%s/%s/%s", g.shard, uuid[:2], uuid[2:])) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } for _, path := range paths { files, err := g.LsFiles(fmt.Sprintf("%s/*", path)) if err != nil { // fmt.Println("error from ls files") return err } for _, file := range files { // fmt.Printf("trying to remove file: %#v\n", file) known, err := g.IsFileKnown(file) if err != nil { // fmt.Printf("can't remove index: %#v\n", file) return err } if known { err := g.Transaction.RemoveIndex(file) if err != nil { return err } } } } err = g.Transaction.RemoveIndex(g.propPath(uuid)) if err != nil { return err } return nil } // only the properties that exist make it into the returned map // it is no error if a requested property does not exist for a node // the caller has to check the returned map against the requested props if // she wants to check, if all requested properties have been returned // if the node properties file is not there no error should be returned func (g *Store) GetNodeProperties(uuid string, requestedProps []string) (props map[string]interface{}, err error) { path := g.propPath(uuid) orig := map[string]interface{}{} err = g.load(path, &orig) if err != nil { return nil, err } props = map[string]interface{}{} for _, req := range requestedProps { v, has := orig[req] if has { props[req] = v } } return } func (g *Store) Shard() string { return g.shard }
FileExists
identifier_name
_wx.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ vispy backend for wxPython. """ from __future__ import division from time import sleep import gc import warnings from ..base import (BaseApplicationBackend, BaseCanvasBackend, BaseTimerBackend) from ...util import keys, logger from ...util.ptime import time from ... import config USE_EGL = config['gl_backend'].lower().startswith('es') # -------------------------------------------------------------------- init --- try: # avoid silly locale warning on OSX with warnings.catch_warnings(record=True): import wx from wx import glcanvas from wx.glcanvas import GLCanvas # Map native keys to vispy keys KEYMAP = { wx.WXK_SHIFT: keys.SHIFT, wx.WXK_CONTROL: keys.CONTROL, wx.WXK_ALT: keys.ALT, wx.WXK_WINDOWS_MENU: keys.META, wx.WXK_LEFT: keys.LEFT, wx.WXK_UP: keys.UP, wx.WXK_RIGHT: keys.RIGHT, wx.WXK_DOWN: keys.DOWN, wx.WXK_PAGEUP: keys.PAGEUP, wx.WXK_PAGEDOWN: keys.PAGEDOWN, wx.WXK_INSERT: keys.INSERT, wx.WXK_DELETE: keys.DELETE, wx.WXK_HOME: keys.HOME, wx.WXK_END: keys.END, wx.WXK_ESCAPE: keys.ESCAPE, wx.WXK_BACK: keys.BACKSPACE, wx.WXK_F1: keys.F1, wx.WXK_F2: keys.F2, wx.WXK_F3: keys.F3, wx.WXK_F4: keys.F4, wx.WXK_F5: keys.F5, wx.WXK_F6: keys.F6, wx.WXK_F7: keys.F7, wx.WXK_F8: keys.F8, wx.WXK_F9: keys.F9, wx.WXK_F10: keys.F10, wx.WXK_F11: keys.F11, wx.WXK_F12: keys.F12, wx.WXK_SPACE: keys.SPACE, wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN wx.WXK_NUMPAD_ENTER: keys.ENTER, wx.WXK_TAB: keys.TAB, } except Exception as exp: available, testable, why_not, which = False, False, str(exp), None class GLCanvas(object): pass else: if USE_EGL: available, testable, why_not = False, False, 'EGL not supported' else: available, testable, why_not = True, True, None which = 'wxPython ' + str(wx.__version__) # -------------------------------------------------------------- capability --- capability = dict( # things that can be set by the backend title=True, size=True, position=True, show=True, vsync=True, resizable=True, decorate=True, fullscreen=True, context=True, multi_window=True, scroll=True, parent=True, always_on_top=True, ) # ------------------------------------------------------- set_configuration --- def _set_config(c): """Set gl configuration""" gl_attribs = [glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'], glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'], glcanvas.WX_GL_MIN_RED, c['red_size'], glcanvas.WX_GL_MIN_GREEN, c['green_size'], glcanvas.WX_GL_MIN_BLUE, c['blue_size'], glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']] gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else [] gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else [] return gl_attribs # ------------------------------------------------------------- application --- _wx_app = None _timers = [] class ApplicationBackend(BaseApplicationBackend): def __init__(self): BaseApplicationBackend.__init__(self) self._event_loop = wx.EventLoop() wx.EventLoop.SetActive(self._event_loop) def _vispy_get_backend_name(self): return 'wx' def _vispy_process_events(self): # inpsired by https://github.com/wxWidgets/wxPython/blob/master/ # samples/mainloop/mainloop.py for _ in range(3): # trial-and-error found this to work (!) while self._event_loop.Pending(): self._event_loop.Dispatch() _wx_app.ProcessIdle() sleep(0.01) def _vispy_run(self): return _wx_app.MainLoop() def _vispy_quit(self): global _wx_app _wx_app.ExitMainLoop() def _vispy_get_native_app(self): # Get native app in save way. Taken from guisupport.py global _wx_app _wx_app = wx.GetApp() # in case the user already has one if _wx_app is None: _wx_app = wx.PySimpleApp() _wx_app.SetExitOnFrameDelete(True) return _wx_app # ------------------------------------------------------------------ canvas --- def _get_mods(evt): """Helper to extract list of mods from event""" mods = [] mods += [keys.CONTROL] if evt.ControlDown() else [] mods += [keys.ALT] if evt.AltDown() else [] mods += [keys.SHIFT] if evt.ShiftDown() else [] mods += [keys.META] if evt.MetaDown() else [] return mods def _process_key(evt):
class DummySize(object): def __init__(self, size): self.size = size def GetSize(self): return self.size def Skip(self): pass class CanvasBackend(GLCanvas, BaseCanvasBackend): """ wxPython backend for Canvas abstract class.""" # args are for BaseCanvasBackend, kwargs are for us. def __init__(self, *args, **kwargs): BaseCanvasBackend.__init__(self, *args) p = self._process_backend_kwargs(kwargs) # WX supports OS double-click events, so we set this here to # avoid double events self._double_click_supported = True # Set config self._gl_attribs = _set_config(p.context.config) # Deal with context p.context.shared.add_ref('wx', self) if p.context.shared.ref is self: self._gl_context = None # set for real once we init the GLCanvas else: self._gl_context = p.context.shared.ref._gl_context if p.parent is None: style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN) style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER style |= wx.STAY_ON_TOP if p.always_on_top else 0 self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position, p.size, style) if not p.resizable: self._frame.SetSizeHints(p.size[0], p.size[1], p.size[0], p.size[1]) if p.fullscreen is not False: if p.fullscreen is not True: logger.warning('Cannot specify monitor number for wx ' 'fullscreen, using default') self._fullscreen = True else: self._fullscreen = False _wx_app.SetTopWindow(self._frame) parent = self._frame self._frame.Raise() self._frame.Bind(wx.EVT_CLOSE, self.on_close) else: parent = p.parent self._frame = None self._fullscreen = False self._init = False GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position, size=p.size, style=0, name='GLCanvas', attribList=self._gl_attribs) if self._gl_context is None: self._gl_context = glcanvas.GLContext(self) self.SetFocus() self._vispy_set_title(p.title) self._size = None self.Bind(wx.EVT_SIZE, self.on_resize) self.Bind(wx.EVT_PAINT, self.on_draw) self.Bind(wx.EVT_KEY_DOWN, self.on_key_down) self.Bind(wx.EVT_KEY_UP, self.on_key_up) self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event) self._size_init = p.size self._vispy_set_visible(p.show) def on_resize(self, event): if self._vispy_canvas is None or not self._init: event.Skip() return size = event.GetSize() self._vispy_canvas.events.resize(size=size) self.Refresh() event.Skip() def on_draw(self, event): if self._vispy_canvas is None: return dc = wx.PaintDC(self) # needed for wx if not self._init: self._initialize() self._vispy_canvas.set_current() self._vispy_canvas.events.draw(region=None) del dc event.Skip() def _initialize(self): if self._vispy_canvas is None: return self._init = True self._vispy_canvas.set_current() self._vispy_canvas.events.initialize() self.on_resize(DummySize(self._size_init)) def _vispy_set_current(self): self.SetCurrent(self._gl_context) def _vispy_warmup(self): etime = time() + 0.3 while time() < etime: sleep(0.01) self._vispy_canvas.set_current() self._vispy_canvas.app.process_events() def _vispy_swap_buffers(self): # Swap front and back buffer self._vispy_canvas.set_current() self.SwapBuffers() def _vispy_set_title(self, title): # Set the window title. Has no effect for widgets if self._frame is not None: self._frame.SetLabel(title) def _vispy_set_size(self, w, h): # Set size of the widget or window if not self._init: self._size_init = (w, h) self.SetSizeWH(w, h) def _vispy_set_position(self, x, y): # Set positionof the widget or window. May have no effect for widgets if self._frame is not None: self._frame.SetPosition((x, y)) def _vispy_get_fullscreen(self): return self._fullscreen def _vispy_set_fullscreen(self, fullscreen): if self._frame is not None: self._fullscreen = bool(fullscreen) self._vispy_set_visible(True) def _vispy_set_visible(self, visible): # Show or hide the window or widget self.Show(visible) if visible: if self._frame is not None: self._frame.ShowFullScreen(self._fullscreen) def _vispy_update(self): # Invoke a redraw self.Refresh() def _vispy_close(self): if self._vispy_canvas is None: return # Force the window or widget to shut down canvas = self frame = self._frame self._gl_context = None # let RC destroy this in case it's shared canvas.Close() canvas.Destroy() if frame: frame.Close() frame.Destroy() gc.collect() # ensure context gets destroyed if it should be def _vispy_get_size(self): if self._vispy_canvas is None: return w, h = self.GetClientSize() return w, h def _vispy_get_position(self): if self._vispy_canvas is None: return x, y = self.GetPosition() return x, y def on_close(self, evt): if not self: # wx control evaluates to false if C++ part deleted return if self._vispy_canvas is None: return self._vispy_canvas.close() def on_mouse_event(self, evt): if self._vispy_canvas is None: return pos = (evt.GetX(), evt.GetY()) mods = _get_mods(evt) if evt.GetWheelRotation() != 0: delta = (0., float(evt.GetWheelRotation())) self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos, modifiers=mods) elif evt.Moving() or evt.Dragging(): # mouse move event self._vispy_mouse_move(pos=pos, modifiers=mods) elif evt.ButtonDown(): if evt.LeftDown(): button = 0 elif evt.MiddleDown(): button = 1 elif evt.RightDown(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) elif evt.ButtonUp(): if evt.LeftUp(): button = 0 elif evt.MiddleUp(): button = 1 elif evt.RightUp(): button = 2 else: evt.Skip() self._vispy_mouse_release(pos=pos, button=button, modifiers=mods) elif evt.ButtonDClick(): if evt.LeftDClick(): button = 0 elif evt.MiddleDClick(): button = 1 elif evt.RightDClick(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) self._vispy_mouse_double_click(pos=pos, button=button, modifiers=mods) evt.Skip() def on_key_down(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_press(key=key, text=text, modifiers=_get_mods(evt)) def on_key_up(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_release(key=key, text=text, modifiers=_get_mods(evt)) # ------------------------------------------------------------------- timer --- class TimerBackend(BaseTimerBackend): def __init__(self, vispy_timer): BaseTimerBackend.__init__(self, vispy_timer) assert _wx_app is not None parent = _wx_app.GetTopWindow() # assume it's the parent window self._timer = wx.Timer(parent, -1) parent.Bind(wx.EVT_TIMER, self._vispy_timeout, self._timer) def _vispy_start(self, interval): self._timer.Start(interval * 1000., False) def _vispy_stop(self): self._timer.Stop() def _vispy_timeout(self, evt): self._vispy_timer._timeout() evt.Skip()
"""Helper to convert from wx keycode to vispy keycode""" key = evt.GetKeyCode() if key in KEYMAP: return KEYMAP[key], '' if 97 <= key <= 122: key -= 32 if key >= 32 and key <= 127: return keys.Key(chr(key)), chr(key) else: return None, None
identifier_body
_wx.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ vispy backend for wxPython. """ from __future__ import division from time import sleep import gc import warnings from ..base import (BaseApplicationBackend, BaseCanvasBackend, BaseTimerBackend) from ...util import keys, logger from ...util.ptime import time from ... import config USE_EGL = config['gl_backend'].lower().startswith('es') # -------------------------------------------------------------------- init --- try: # avoid silly locale warning on OSX with warnings.catch_warnings(record=True): import wx from wx import glcanvas from wx.glcanvas import GLCanvas # Map native keys to vispy keys KEYMAP = { wx.WXK_SHIFT: keys.SHIFT, wx.WXK_CONTROL: keys.CONTROL, wx.WXK_ALT: keys.ALT, wx.WXK_WINDOWS_MENU: keys.META, wx.WXK_LEFT: keys.LEFT, wx.WXK_UP: keys.UP, wx.WXK_RIGHT: keys.RIGHT, wx.WXK_DOWN: keys.DOWN, wx.WXK_PAGEUP: keys.PAGEUP, wx.WXK_PAGEDOWN: keys.PAGEDOWN, wx.WXK_INSERT: keys.INSERT, wx.WXK_DELETE: keys.DELETE, wx.WXK_HOME: keys.HOME, wx.WXK_END: keys.END, wx.WXK_ESCAPE: keys.ESCAPE, wx.WXK_BACK: keys.BACKSPACE, wx.WXK_F1: keys.F1, wx.WXK_F2: keys.F2, wx.WXK_F3: keys.F3, wx.WXK_F4: keys.F4, wx.WXK_F5: keys.F5, wx.WXK_F6: keys.F6, wx.WXK_F7: keys.F7, wx.WXK_F8: keys.F8, wx.WXK_F9: keys.F9, wx.WXK_F10: keys.F10, wx.WXK_F11: keys.F11, wx.WXK_F12: keys.F12, wx.WXK_SPACE: keys.SPACE, wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN wx.WXK_NUMPAD_ENTER: keys.ENTER, wx.WXK_TAB: keys.TAB, } except Exception as exp: available, testable, why_not, which = False, False, str(exp), None class GLCanvas(object): pass else: if USE_EGL: available, testable, why_not = False, False, 'EGL not supported' else: available, testable, why_not = True, True, None which = 'wxPython ' + str(wx.__version__) # -------------------------------------------------------------- capability --- capability = dict( # things that can be set by the backend title=True, size=True, position=True, show=True, vsync=True, resizable=True, decorate=True, fullscreen=True, context=True, multi_window=True, scroll=True, parent=True, always_on_top=True, ) # ------------------------------------------------------- set_configuration --- def _set_config(c): """Set gl configuration""" gl_attribs = [glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'], glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'], glcanvas.WX_GL_MIN_RED, c['red_size'], glcanvas.WX_GL_MIN_GREEN, c['green_size'], glcanvas.WX_GL_MIN_BLUE, c['blue_size'], glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']] gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else [] gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else [] return gl_attribs # ------------------------------------------------------------- application --- _wx_app = None _timers = [] class ApplicationBackend(BaseApplicationBackend): def __init__(self): BaseApplicationBackend.__init__(self) self._event_loop = wx.EventLoop() wx.EventLoop.SetActive(self._event_loop) def _vispy_get_backend_name(self): return 'wx' def _vispy_process_events(self): # inpsired by https://github.com/wxWidgets/wxPython/blob/master/ # samples/mainloop/mainloop.py for _ in range(3): # trial-and-error found this to work (!) while self._event_loop.Pending(): self._event_loop.Dispatch() _wx_app.ProcessIdle() sleep(0.01) def _vispy_run(self): return _wx_app.MainLoop() def _vispy_quit(self): global _wx_app _wx_app.ExitMainLoop() def _vispy_get_native_app(self): # Get native app in save way. Taken from guisupport.py global _wx_app _wx_app = wx.GetApp() # in case the user already has one if _wx_app is None: _wx_app = wx.PySimpleApp() _wx_app.SetExitOnFrameDelete(True) return _wx_app # ------------------------------------------------------------------ canvas --- def _get_mods(evt): """Helper to extract list of mods from event""" mods = [] mods += [keys.CONTROL] if evt.ControlDown() else [] mods += [keys.ALT] if evt.AltDown() else [] mods += [keys.SHIFT] if evt.ShiftDown() else [] mods += [keys.META] if evt.MetaDown() else [] return mods def _process_key(evt): """Helper to convert from wx keycode to vispy keycode""" key = evt.GetKeyCode() if key in KEYMAP: return KEYMAP[key], '' if 97 <= key <= 122: key -= 32 if key >= 32 and key <= 127: return keys.Key(chr(key)), chr(key) else: return None, None class DummySize(object): def __init__(self, size): self.size = size def GetSize(self): return self.size def Skip(self): pass class CanvasBackend(GLCanvas, BaseCanvasBackend): """ wxPython backend for Canvas abstract class.""" # args are for BaseCanvasBackend, kwargs are for us. def __init__(self, *args, **kwargs): BaseCanvasBackend.__init__(self, *args) p = self._process_backend_kwargs(kwargs) # WX supports OS double-click events, so we set this here to # avoid double events self._double_click_supported = True # Set config self._gl_attribs = _set_config(p.context.config) # Deal with context p.context.shared.add_ref('wx', self) if p.context.shared.ref is self: self._gl_context = None # set for real once we init the GLCanvas else: self._gl_context = p.context.shared.ref._gl_context if p.parent is None: style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN) style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER style |= wx.STAY_ON_TOP if p.always_on_top else 0 self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position, p.size, style) if not p.resizable: self._frame.SetSizeHints(p.size[0], p.size[1], p.size[0], p.size[1]) if p.fullscreen is not False: if p.fullscreen is not True: logger.warning('Cannot specify monitor number for wx ' 'fullscreen, using default') self._fullscreen = True else: self._fullscreen = False _wx_app.SetTopWindow(self._frame) parent = self._frame self._frame.Raise() self._frame.Bind(wx.EVT_CLOSE, self.on_close) else: parent = p.parent self._frame = None self._fullscreen = False self._init = False GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position, size=p.size, style=0, name='GLCanvas', attribList=self._gl_attribs) if self._gl_context is None: self._gl_context = glcanvas.GLContext(self) self.SetFocus() self._vispy_set_title(p.title) self._size = None self.Bind(wx.EVT_SIZE, self.on_resize) self.Bind(wx.EVT_PAINT, self.on_draw) self.Bind(wx.EVT_KEY_DOWN, self.on_key_down) self.Bind(wx.EVT_KEY_UP, self.on_key_up) self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event) self._size_init = p.size self._vispy_set_visible(p.show) def on_resize(self, event): if self._vispy_canvas is None or not self._init: event.Skip() return size = event.GetSize() self._vispy_canvas.events.resize(size=size) self.Refresh() event.Skip() def on_draw(self, event): if self._vispy_canvas is None: return dc = wx.PaintDC(self) # needed for wx if not self._init: self._initialize() self._vispy_canvas.set_current() self._vispy_canvas.events.draw(region=None) del dc event.Skip() def _initialize(self): if self._vispy_canvas is None: return self._init = True self._vispy_canvas.set_current() self._vispy_canvas.events.initialize() self.on_resize(DummySize(self._size_init)) def _vispy_set_current(self): self.SetCurrent(self._gl_context) def _vispy_warmup(self): etime = time() + 0.3 while time() < etime: sleep(0.01) self._vispy_canvas.set_current() self._vispy_canvas.app.process_events() def _vispy_swap_buffers(self): # Swap front and back buffer self._vispy_canvas.set_current() self.SwapBuffers() def _vispy_set_title(self, title): # Set the window title. Has no effect for widgets if self._frame is not None: self._frame.SetLabel(title) def _vispy_set_size(self, w, h): # Set size of the widget or window if not self._init: self._size_init = (w, h) self.SetSizeWH(w, h) def _vispy_set_position(self, x, y): # Set positionof the widget or window. May have no effect for widgets if self._frame is not None: self._frame.SetPosition((x, y)) def
(self): return self._fullscreen def _vispy_set_fullscreen(self, fullscreen): if self._frame is not None: self._fullscreen = bool(fullscreen) self._vispy_set_visible(True) def _vispy_set_visible(self, visible): # Show or hide the window or widget self.Show(visible) if visible: if self._frame is not None: self._frame.ShowFullScreen(self._fullscreen) def _vispy_update(self): # Invoke a redraw self.Refresh() def _vispy_close(self): if self._vispy_canvas is None: return # Force the window or widget to shut down canvas = self frame = self._frame self._gl_context = None # let RC destroy this in case it's shared canvas.Close() canvas.Destroy() if frame: frame.Close() frame.Destroy() gc.collect() # ensure context gets destroyed if it should be def _vispy_get_size(self): if self._vispy_canvas is None: return w, h = self.GetClientSize() return w, h def _vispy_get_position(self): if self._vispy_canvas is None: return x, y = self.GetPosition() return x, y def on_close(self, evt): if not self: # wx control evaluates to false if C++ part deleted return if self._vispy_canvas is None: return self._vispy_canvas.close() def on_mouse_event(self, evt): if self._vispy_canvas is None: return pos = (evt.GetX(), evt.GetY()) mods = _get_mods(evt) if evt.GetWheelRotation() != 0: delta = (0., float(evt.GetWheelRotation())) self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos, modifiers=mods) elif evt.Moving() or evt.Dragging(): # mouse move event self._vispy_mouse_move(pos=pos, modifiers=mods) elif evt.ButtonDown(): if evt.LeftDown(): button = 0 elif evt.MiddleDown(): button = 1 elif evt.RightDown(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) elif evt.ButtonUp(): if evt.LeftUp(): button = 0 elif evt.MiddleUp(): button = 1 elif evt.RightUp(): button = 2 else: evt.Skip() self._vispy_mouse_release(pos=pos, button=button, modifiers=mods) elif evt.ButtonDClick(): if evt.LeftDClick(): button = 0 elif evt.MiddleDClick(): button = 1 elif evt.RightDClick(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) self._vispy_mouse_double_click(pos=pos, button=button, modifiers=mods) evt.Skip() def on_key_down(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_press(key=key, text=text, modifiers=_get_mods(evt)) def on_key_up(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_release(key=key, text=text, modifiers=_get_mods(evt)) # ------------------------------------------------------------------- timer --- class TimerBackend(BaseTimerBackend): def __init__(self, vispy_timer): BaseTimerBackend.__init__(self, vispy_timer) assert _wx_app is not None parent = _wx_app.GetTopWindow() # assume it's the parent window self._timer = wx.Timer(parent, -1) parent.Bind(wx.EVT_TIMER, self._vispy_timeout, self._timer) def _vispy_start(self, interval): self._timer.Start(interval * 1000., False) def _vispy_stop(self): self._timer.Stop() def _vispy_timeout(self, evt): self._vispy_timer._timeout() evt.Skip()
_vispy_get_fullscreen
identifier_name
_wx.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ vispy backend for wxPython. """ from __future__ import division from time import sleep import gc import warnings from ..base import (BaseApplicationBackend, BaseCanvasBackend, BaseTimerBackend) from ...util import keys, logger from ...util.ptime import time from ... import config USE_EGL = config['gl_backend'].lower().startswith('es') # -------------------------------------------------------------------- init --- try: # avoid silly locale warning on OSX with warnings.catch_warnings(record=True): import wx from wx import glcanvas from wx.glcanvas import GLCanvas # Map native keys to vispy keys KEYMAP = { wx.WXK_SHIFT: keys.SHIFT, wx.WXK_CONTROL: keys.CONTROL, wx.WXK_ALT: keys.ALT, wx.WXK_WINDOWS_MENU: keys.META, wx.WXK_LEFT: keys.LEFT, wx.WXK_UP: keys.UP, wx.WXK_RIGHT: keys.RIGHT, wx.WXK_DOWN: keys.DOWN, wx.WXK_PAGEUP: keys.PAGEUP, wx.WXK_PAGEDOWN: keys.PAGEDOWN, wx.WXK_INSERT: keys.INSERT, wx.WXK_DELETE: keys.DELETE, wx.WXK_HOME: keys.HOME, wx.WXK_END: keys.END, wx.WXK_ESCAPE: keys.ESCAPE, wx.WXK_BACK: keys.BACKSPACE, wx.WXK_F1: keys.F1, wx.WXK_F2: keys.F2, wx.WXK_F3: keys.F3, wx.WXK_F4: keys.F4, wx.WXK_F5: keys.F5, wx.WXK_F6: keys.F6, wx.WXK_F7: keys.F7, wx.WXK_F8: keys.F8, wx.WXK_F9: keys.F9, wx.WXK_F10: keys.F10, wx.WXK_F11: keys.F11, wx.WXK_F12: keys.F12, wx.WXK_SPACE: keys.SPACE, wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN wx.WXK_NUMPAD_ENTER: keys.ENTER, wx.WXK_TAB: keys.TAB, } except Exception as exp: available, testable, why_not, which = False, False, str(exp), None class GLCanvas(object): pass else: if USE_EGL: available, testable, why_not = False, False, 'EGL not supported' else: available, testable, why_not = True, True, None which = 'wxPython ' + str(wx.__version__) # -------------------------------------------------------------- capability --- capability = dict( # things that can be set by the backend title=True, size=True, position=True, show=True, vsync=True, resizable=True, decorate=True, fullscreen=True, context=True, multi_window=True, scroll=True, parent=True, always_on_top=True, ) # ------------------------------------------------------- set_configuration --- def _set_config(c): """Set gl configuration""" gl_attribs = [glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'], glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'], glcanvas.WX_GL_MIN_RED, c['red_size'], glcanvas.WX_GL_MIN_GREEN, c['green_size'], glcanvas.WX_GL_MIN_BLUE, c['blue_size'], glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']] gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else [] gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else [] return gl_attribs # ------------------------------------------------------------- application --- _wx_app = None _timers = [] class ApplicationBackend(BaseApplicationBackend): def __init__(self): BaseApplicationBackend.__init__(self) self._event_loop = wx.EventLoop() wx.EventLoop.SetActive(self._event_loop) def _vispy_get_backend_name(self): return 'wx' def _vispy_process_events(self): # inpsired by https://github.com/wxWidgets/wxPython/blob/master/ # samples/mainloop/mainloop.py for _ in range(3): # trial-and-error found this to work (!) while self._event_loop.Pending(): self._event_loop.Dispatch() _wx_app.ProcessIdle() sleep(0.01) def _vispy_run(self): return _wx_app.MainLoop() def _vispy_quit(self): global _wx_app _wx_app.ExitMainLoop() def _vispy_get_native_app(self): # Get native app in save way. Taken from guisupport.py global _wx_app _wx_app = wx.GetApp() # in case the user already has one if _wx_app is None: _wx_app = wx.PySimpleApp() _wx_app.SetExitOnFrameDelete(True) return _wx_app # ------------------------------------------------------------------ canvas --- def _get_mods(evt): """Helper to extract list of mods from event""" mods = [] mods += [keys.CONTROL] if evt.ControlDown() else [] mods += [keys.ALT] if evt.AltDown() else [] mods += [keys.SHIFT] if evt.ShiftDown() else [] mods += [keys.META] if evt.MetaDown() else [] return mods
"""Helper to convert from wx keycode to vispy keycode""" key = evt.GetKeyCode() if key in KEYMAP: return KEYMAP[key], '' if 97 <= key <= 122: key -= 32 if key >= 32 and key <= 127: return keys.Key(chr(key)), chr(key) else: return None, None class DummySize(object): def __init__(self, size): self.size = size def GetSize(self): return self.size def Skip(self): pass class CanvasBackend(GLCanvas, BaseCanvasBackend): """ wxPython backend for Canvas abstract class.""" # args are for BaseCanvasBackend, kwargs are for us. def __init__(self, *args, **kwargs): BaseCanvasBackend.__init__(self, *args) p = self._process_backend_kwargs(kwargs) # WX supports OS double-click events, so we set this here to # avoid double events self._double_click_supported = True # Set config self._gl_attribs = _set_config(p.context.config) # Deal with context p.context.shared.add_ref('wx', self) if p.context.shared.ref is self: self._gl_context = None # set for real once we init the GLCanvas else: self._gl_context = p.context.shared.ref._gl_context if p.parent is None: style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN) style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER style |= wx.STAY_ON_TOP if p.always_on_top else 0 self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position, p.size, style) if not p.resizable: self._frame.SetSizeHints(p.size[0], p.size[1], p.size[0], p.size[1]) if p.fullscreen is not False: if p.fullscreen is not True: logger.warning('Cannot specify monitor number for wx ' 'fullscreen, using default') self._fullscreen = True else: self._fullscreen = False _wx_app.SetTopWindow(self._frame) parent = self._frame self._frame.Raise() self._frame.Bind(wx.EVT_CLOSE, self.on_close) else: parent = p.parent self._frame = None self._fullscreen = False self._init = False GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position, size=p.size, style=0, name='GLCanvas', attribList=self._gl_attribs) if self._gl_context is None: self._gl_context = glcanvas.GLContext(self) self.SetFocus() self._vispy_set_title(p.title) self._size = None self.Bind(wx.EVT_SIZE, self.on_resize) self.Bind(wx.EVT_PAINT, self.on_draw) self.Bind(wx.EVT_KEY_DOWN, self.on_key_down) self.Bind(wx.EVT_KEY_UP, self.on_key_up) self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event) self._size_init = p.size self._vispy_set_visible(p.show) def on_resize(self, event): if self._vispy_canvas is None or not self._init: event.Skip() return size = event.GetSize() self._vispy_canvas.events.resize(size=size) self.Refresh() event.Skip() def on_draw(self, event): if self._vispy_canvas is None: return dc = wx.PaintDC(self) # needed for wx if not self._init: self._initialize() self._vispy_canvas.set_current() self._vispy_canvas.events.draw(region=None) del dc event.Skip() def _initialize(self): if self._vispy_canvas is None: return self._init = True self._vispy_canvas.set_current() self._vispy_canvas.events.initialize() self.on_resize(DummySize(self._size_init)) def _vispy_set_current(self): self.SetCurrent(self._gl_context) def _vispy_warmup(self): etime = time() + 0.3 while time() < etime: sleep(0.01) self._vispy_canvas.set_current() self._vispy_canvas.app.process_events() def _vispy_swap_buffers(self): # Swap front and back buffer self._vispy_canvas.set_current() self.SwapBuffers() def _vispy_set_title(self, title): # Set the window title. Has no effect for widgets if self._frame is not None: self._frame.SetLabel(title) def _vispy_set_size(self, w, h): # Set size of the widget or window if not self._init: self._size_init = (w, h) self.SetSizeWH(w, h) def _vispy_set_position(self, x, y): # Set positionof the widget or window. May have no effect for widgets if self._frame is not None: self._frame.SetPosition((x, y)) def _vispy_get_fullscreen(self): return self._fullscreen def _vispy_set_fullscreen(self, fullscreen): if self._frame is not None: self._fullscreen = bool(fullscreen) self._vispy_set_visible(True) def _vispy_set_visible(self, visible): # Show or hide the window or widget self.Show(visible) if visible: if self._frame is not None: self._frame.ShowFullScreen(self._fullscreen) def _vispy_update(self): # Invoke a redraw self.Refresh() def _vispy_close(self): if self._vispy_canvas is None: return # Force the window or widget to shut down canvas = self frame = self._frame self._gl_context = None # let RC destroy this in case it's shared canvas.Close() canvas.Destroy() if frame: frame.Close() frame.Destroy() gc.collect() # ensure context gets destroyed if it should be def _vispy_get_size(self): if self._vispy_canvas is None: return w, h = self.GetClientSize() return w, h def _vispy_get_position(self): if self._vispy_canvas is None: return x, y = self.GetPosition() return x, y def on_close(self, evt): if not self: # wx control evaluates to false if C++ part deleted return if self._vispy_canvas is None: return self._vispy_canvas.close() def on_mouse_event(self, evt): if self._vispy_canvas is None: return pos = (evt.GetX(), evt.GetY()) mods = _get_mods(evt) if evt.GetWheelRotation() != 0: delta = (0., float(evt.GetWheelRotation())) self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos, modifiers=mods) elif evt.Moving() or evt.Dragging(): # mouse move event self._vispy_mouse_move(pos=pos, modifiers=mods) elif evt.ButtonDown(): if evt.LeftDown(): button = 0 elif evt.MiddleDown(): button = 1 elif evt.RightDown(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) elif evt.ButtonUp(): if evt.LeftUp(): button = 0 elif evt.MiddleUp(): button = 1 elif evt.RightUp(): button = 2 else: evt.Skip() self._vispy_mouse_release(pos=pos, button=button, modifiers=mods) elif evt.ButtonDClick(): if evt.LeftDClick(): button = 0 elif evt.MiddleDClick(): button = 1 elif evt.RightDClick(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) self._vispy_mouse_double_click(pos=pos, button=button, modifiers=mods) evt.Skip() def on_key_down(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_press(key=key, text=text, modifiers=_get_mods(evt)) def on_key_up(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_release(key=key, text=text, modifiers=_get_mods(evt)) # ------------------------------------------------------------------- timer --- class TimerBackend(BaseTimerBackend): def __init__(self, vispy_timer): BaseTimerBackend.__init__(self, vispy_timer) assert _wx_app is not None parent = _wx_app.GetTopWindow() # assume it's the parent window self._timer = wx.Timer(parent, -1) parent.Bind(wx.EVT_TIMER, self._vispy_timeout, self._timer) def _vispy_start(self, interval): self._timer.Start(interval * 1000., False) def _vispy_stop(self): self._timer.Stop() def _vispy_timeout(self, evt): self._vispy_timer._timeout() evt.Skip()
def _process_key(evt):
random_line_split
_wx.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. """ vispy backend for wxPython. """ from __future__ import division from time import sleep import gc import warnings from ..base import (BaseApplicationBackend, BaseCanvasBackend, BaseTimerBackend) from ...util import keys, logger from ...util.ptime import time from ... import config USE_EGL = config['gl_backend'].lower().startswith('es') # -------------------------------------------------------------------- init --- try: # avoid silly locale warning on OSX with warnings.catch_warnings(record=True): import wx from wx import glcanvas from wx.glcanvas import GLCanvas # Map native keys to vispy keys KEYMAP = { wx.WXK_SHIFT: keys.SHIFT, wx.WXK_CONTROL: keys.CONTROL, wx.WXK_ALT: keys.ALT, wx.WXK_WINDOWS_MENU: keys.META, wx.WXK_LEFT: keys.LEFT, wx.WXK_UP: keys.UP, wx.WXK_RIGHT: keys.RIGHT, wx.WXK_DOWN: keys.DOWN, wx.WXK_PAGEUP: keys.PAGEUP, wx.WXK_PAGEDOWN: keys.PAGEDOWN, wx.WXK_INSERT: keys.INSERT, wx.WXK_DELETE: keys.DELETE, wx.WXK_HOME: keys.HOME, wx.WXK_END: keys.END, wx.WXK_ESCAPE: keys.ESCAPE, wx.WXK_BACK: keys.BACKSPACE, wx.WXK_F1: keys.F1, wx.WXK_F2: keys.F2, wx.WXK_F3: keys.F3, wx.WXK_F4: keys.F4, wx.WXK_F5: keys.F5, wx.WXK_F6: keys.F6, wx.WXK_F7: keys.F7, wx.WXK_F8: keys.F8, wx.WXK_F9: keys.F9, wx.WXK_F10: keys.F10, wx.WXK_F11: keys.F11, wx.WXK_F12: keys.F12, wx.WXK_SPACE: keys.SPACE, wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN wx.WXK_NUMPAD_ENTER: keys.ENTER, wx.WXK_TAB: keys.TAB, } except Exception as exp: available, testable, why_not, which = False, False, str(exp), None class GLCanvas(object): pass else: if USE_EGL: available, testable, why_not = False, False, 'EGL not supported' else: available, testable, why_not = True, True, None which = 'wxPython ' + str(wx.__version__) # -------------------------------------------------------------- capability --- capability = dict( # things that can be set by the backend title=True, size=True, position=True, show=True, vsync=True, resizable=True, decorate=True, fullscreen=True, context=True, multi_window=True, scroll=True, parent=True, always_on_top=True, ) # ------------------------------------------------------- set_configuration --- def _set_config(c): """Set gl configuration""" gl_attribs = [glcanvas.WX_GL_RGBA, glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'], glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'], glcanvas.WX_GL_MIN_RED, c['red_size'], glcanvas.WX_GL_MIN_GREEN, c['green_size'], glcanvas.WX_GL_MIN_BLUE, c['blue_size'], glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']] gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else [] gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else [] return gl_attribs # ------------------------------------------------------------- application --- _wx_app = None _timers = [] class ApplicationBackend(BaseApplicationBackend): def __init__(self): BaseApplicationBackend.__init__(self) self._event_loop = wx.EventLoop() wx.EventLoop.SetActive(self._event_loop) def _vispy_get_backend_name(self): return 'wx' def _vispy_process_events(self): # inpsired by https://github.com/wxWidgets/wxPython/blob/master/ # samples/mainloop/mainloop.py for _ in range(3): # trial-and-error found this to work (!) while self._event_loop.Pending(): self._event_loop.Dispatch() _wx_app.ProcessIdle() sleep(0.01) def _vispy_run(self): return _wx_app.MainLoop() def _vispy_quit(self): global _wx_app _wx_app.ExitMainLoop() def _vispy_get_native_app(self): # Get native app in save way. Taken from guisupport.py global _wx_app _wx_app = wx.GetApp() # in case the user already has one if _wx_app is None: _wx_app = wx.PySimpleApp() _wx_app.SetExitOnFrameDelete(True) return _wx_app # ------------------------------------------------------------------ canvas --- def _get_mods(evt): """Helper to extract list of mods from event""" mods = [] mods += [keys.CONTROL] if evt.ControlDown() else [] mods += [keys.ALT] if evt.AltDown() else [] mods += [keys.SHIFT] if evt.ShiftDown() else [] mods += [keys.META] if evt.MetaDown() else [] return mods def _process_key(evt): """Helper to convert from wx keycode to vispy keycode""" key = evt.GetKeyCode() if key in KEYMAP: return KEYMAP[key], '' if 97 <= key <= 122: key -= 32 if key >= 32 and key <= 127: return keys.Key(chr(key)), chr(key) else: return None, None class DummySize(object): def __init__(self, size): self.size = size def GetSize(self): return self.size def Skip(self): pass class CanvasBackend(GLCanvas, BaseCanvasBackend): """ wxPython backend for Canvas abstract class.""" # args are for BaseCanvasBackend, kwargs are for us. def __init__(self, *args, **kwargs): BaseCanvasBackend.__init__(self, *args) p = self._process_backend_kwargs(kwargs) # WX supports OS double-click events, so we set this here to # avoid double events self._double_click_supported = True # Set config self._gl_attribs = _set_config(p.context.config) # Deal with context p.context.shared.add_ref('wx', self) if p.context.shared.ref is self: self._gl_context = None # set for real once we init the GLCanvas else: self._gl_context = p.context.shared.ref._gl_context if p.parent is None: style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN) style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER style |= wx.STAY_ON_TOP if p.always_on_top else 0 self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position, p.size, style) if not p.resizable: self._frame.SetSizeHints(p.size[0], p.size[1], p.size[0], p.size[1]) if p.fullscreen is not False: if p.fullscreen is not True: logger.warning('Cannot specify monitor number for wx ' 'fullscreen, using default') self._fullscreen = True else: self._fullscreen = False _wx_app.SetTopWindow(self._frame) parent = self._frame self._frame.Raise() self._frame.Bind(wx.EVT_CLOSE, self.on_close) else: parent = p.parent self._frame = None self._fullscreen = False self._init = False GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position, size=p.size, style=0, name='GLCanvas', attribList=self._gl_attribs) if self._gl_context is None: self._gl_context = glcanvas.GLContext(self) self.SetFocus() self._vispy_set_title(p.title) self._size = None self.Bind(wx.EVT_SIZE, self.on_resize) self.Bind(wx.EVT_PAINT, self.on_draw) self.Bind(wx.EVT_KEY_DOWN, self.on_key_down) self.Bind(wx.EVT_KEY_UP, self.on_key_up) self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event) self._size_init = p.size self._vispy_set_visible(p.show) def on_resize(self, event): if self._vispy_canvas is None or not self._init: event.Skip() return size = event.GetSize() self._vispy_canvas.events.resize(size=size) self.Refresh() event.Skip() def on_draw(self, event): if self._vispy_canvas is None: return dc = wx.PaintDC(self) # needed for wx if not self._init: self._initialize() self._vispy_canvas.set_current() self._vispy_canvas.events.draw(region=None) del dc event.Skip() def _initialize(self): if self._vispy_canvas is None: return self._init = True self._vispy_canvas.set_current() self._vispy_canvas.events.initialize() self.on_resize(DummySize(self._size_init)) def _vispy_set_current(self): self.SetCurrent(self._gl_context) def _vispy_warmup(self): etime = time() + 0.3 while time() < etime: sleep(0.01) self._vispy_canvas.set_current() self._vispy_canvas.app.process_events() def _vispy_swap_buffers(self): # Swap front and back buffer self._vispy_canvas.set_current() self.SwapBuffers() def _vispy_set_title(self, title): # Set the window title. Has no effect for widgets if self._frame is not None: self._frame.SetLabel(title) def _vispy_set_size(self, w, h): # Set size of the widget or window if not self._init: self._size_init = (w, h) self.SetSizeWH(w, h) def _vispy_set_position(self, x, y): # Set positionof the widget or window. May have no effect for widgets if self._frame is not None: self._frame.SetPosition((x, y)) def _vispy_get_fullscreen(self): return self._fullscreen def _vispy_set_fullscreen(self, fullscreen): if self._frame is not None: self._fullscreen = bool(fullscreen) self._vispy_set_visible(True) def _vispy_set_visible(self, visible): # Show or hide the window or widget self.Show(visible) if visible: if self._frame is not None: self._frame.ShowFullScreen(self._fullscreen) def _vispy_update(self): # Invoke a redraw self.Refresh() def _vispy_close(self): if self._vispy_canvas is None: return # Force the window or widget to shut down canvas = self frame = self._frame self._gl_context = None # let RC destroy this in case it's shared canvas.Close() canvas.Destroy() if frame: frame.Close() frame.Destroy() gc.collect() # ensure context gets destroyed if it should be def _vispy_get_size(self): if self._vispy_canvas is None: return w, h = self.GetClientSize() return w, h def _vispy_get_position(self): if self._vispy_canvas is None: return x, y = self.GetPosition() return x, y def on_close(self, evt): if not self: # wx control evaluates to false if C++ part deleted return if self._vispy_canvas is None: return self._vispy_canvas.close() def on_mouse_event(self, evt): if self._vispy_canvas is None: return pos = (evt.GetX(), evt.GetY()) mods = _get_mods(evt) if evt.GetWheelRotation() != 0: delta = (0., float(evt.GetWheelRotation())) self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos, modifiers=mods) elif evt.Moving() or evt.Dragging(): # mouse move event self._vispy_mouse_move(pos=pos, modifiers=mods) elif evt.ButtonDown():
elif evt.ButtonUp(): if evt.LeftUp(): button = 0 elif evt.MiddleUp(): button = 1 elif evt.RightUp(): button = 2 else: evt.Skip() self._vispy_mouse_release(pos=pos, button=button, modifiers=mods) elif evt.ButtonDClick(): if evt.LeftDClick(): button = 0 elif evt.MiddleDClick(): button = 1 elif evt.RightDClick(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) self._vispy_mouse_double_click(pos=pos, button=button, modifiers=mods) evt.Skip() def on_key_down(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_press(key=key, text=text, modifiers=_get_mods(evt)) def on_key_up(self, evt): if self._vispy_canvas is None: return key, text = _process_key(evt) self._vispy_canvas.events.key_release(key=key, text=text, modifiers=_get_mods(evt)) # ------------------------------------------------------------------- timer --- class TimerBackend(BaseTimerBackend): def __init__(self, vispy_timer): BaseTimerBackend.__init__(self, vispy_timer) assert _wx_app is not None parent = _wx_app.GetTopWindow() # assume it's the parent window self._timer = wx.Timer(parent, -1) parent.Bind(wx.EVT_TIMER, self._vispy_timeout, self._timer) def _vispy_start(self, interval): self._timer.Start(interval * 1000., False) def _vispy_stop(self): self._timer.Stop() def _vispy_timeout(self, evt): self._vispy_timer._timeout() evt.Skip()
if evt.LeftDown(): button = 0 elif evt.MiddleDown(): button = 1 elif evt.RightDown(): button = 2 else: evt.Skip() self._vispy_mouse_press(pos=pos, button=button, modifiers=mods)
conditional_block
protocol_adapter.rs
use crate::HandlerError; use bigdecimal::{BigDecimal, FromPrimitive}; use graphql_parser::query::{ Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value, }; use query_core::query_document::*; /// Protocol adapter for GraphQL -> Query Document. /// /// GraphQL is mapped as following: /// - Every field of a `query { ... }` or single selection block `{ ... }` is mapped to an `Operation::Read`. /// - Every field of a single `mutation { ... }` is mapped to an `Operation::Write`. /// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored. /// - Fields on the queries are mapped to `Field`s, including arguments. /// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s. /// /// Currently unsupported features: /// - Fragments in any form. /// - Variables. /// - Subscription queries. /// - Query names are ignored pub struct GraphQLProtocolAdapter; impl GraphQLProtocolAdapter { pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> { let gql_doc = match graphql_parser::parse_query(query) { Ok(doc) => doc, Err(err) if err.to_string().contains("number too large to fit in target type") | err.to_string().contains("number too small to fit in target type") => { return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned())); } err @ Err(_) => err?, }; Self::convert(gql_doc, operation_name) } pub fn convert(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> { let mut operations: Vec<Operation> = match operation { Some(ref op) => gql_doc .definitions .into_iter() .find(|def| Self::matches_operation(def, op)) .ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query."))) .and_then(Self::convert_definition), None => gql_doc .definitions .into_iter() .map(Self::convert_definition) .collect::<crate::Result<Vec<Vec<Operation>>>>() .map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()), }?; let operation = operations .pop() .ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))? .dedup_selections(); Ok(operation) } fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>>
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> { Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect()) } fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> { Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect()) } fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> { selection_set .items .into_iter() .map(|item| match item { GqlSelection::Field(f) => { let arguments: Vec<(String, ArgumentValue)> = f .arguments .into_iter() .map(|(k, v)| Ok((k, Self::convert_value(v)?))) .collect::<crate::Result<Vec<_>>>()?; let nested_selections = Self::convert_selection_set(f.selection_set)?; Ok(Selection::new(f.name, f.alias, arguments, nested_selections)) } GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature( "Fragment spread", format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position), )), GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature( "Inline fragment", format!("At position {}.", i.position), )), }) .collect() } /// Checks if the given GraphQL definition matches the operation name that should be executed. fn matches_operation(def: &Definition<String>, operation: &str) -> bool { let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some(); match def { Definition::Fragment(_) => false, Definition::Operation(op) => match op { OperationDefinition::Subscription(s) => check(s.name.as_ref()), OperationDefinition::SelectionSet(_) => false, OperationDefinition::Query(q) => check(q.name.as_ref()), OperationDefinition::Mutation(m) => check(m.name.as_ref()), }, } } fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> { match value { Value::Variable(name) => Err(HandlerError::unsupported_feature( "Variable usage", format!("Variable '{name}'."), )), Value::Int(i) => match i.as_i64() { Some(i) => Ok(ArgumentValue::int(i)), None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))), }, Value::Float(f) => match BigDecimal::from_f64(f) { Some(dec) => Ok(ArgumentValue::float(dec)), None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))), }, Value::String(s) => Ok(ArgumentValue::string(s)), Value::Boolean(b) => Ok(ArgumentValue::bool(b)), Value::Null => Ok(ArgumentValue::null()), Value::Enum(e) => Ok(ArgumentValue::r#enum(e)), Value::List(values) => { let values: Vec<ArgumentValue> = values .into_iter() .map(Self::convert_value) .collect::<crate::Result<Vec<ArgumentValue>>>()?; Ok(ArgumentValue::list(values)) } Value::Object(map) => { let values = map .into_iter() .map(|(k, v)| Self::convert_value(v).map(|v| (k, v))) .collect::<crate::Result<ArgumentValueObject>>()?; Ok(ArgumentValue::object(values)) } } } } #[cfg(test)] mod tests { use super::*; #[test] fn converts_single_query() { let query = r#" query findTheModelOperation { findOneModel(where: {a_number: {gte: 1}}) { id, large_number, other { name } } } "#; let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap(); assert_eq!(operation.name(), "findOneModel"); assert!(matches!(operation, Operation::Read(_))); let read = operation.into_read().unwrap(); let where_args = ArgumentValue::object([( "a_number".to_string(), ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]), )]); assert_eq!(read.arguments(), [("where".to_string(), where_args)]); let selections = Vec::from([ Selection::new("id", None, [], Vec::new()), Selection::new("large_number", None, [], Vec::new()), Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])), ]); assert_eq!(read.nested_selections(), selections); } #[test] fn converts_single_mutation() { let query = r#" mutation { createOnePost(data: { id: 1, categories: {create: [{id: 1}, {id: 2}]} }) { id, categories { id } } } "#; let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap(); assert_eq!(operation.name(), "createOnePost"); assert!(matches!(operation, Operation::Write(_))); let write = operation.into_write().unwrap(); let data_args = ArgumentValue::object([ ("id".to_string(), ArgumentValue::int(1)), ( "categories".to_string(), ArgumentValue::object([( "create".to_string(), ArgumentValue::list([ ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]), ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]), ]), )]), ), ]); println!("args {:?}", write.arguments()); assert_eq!(write.arguments(), [("data".to_string(), data_args)]); } }
{ match def { Definition::Fragment(f) => Err(HandlerError::unsupported_feature( "Fragment definition", format!("Fragment '{}', at position {}.", f.name, f.position), )), Definition::Operation(op) => match op { OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature( "Subscription query", format!("At position {}.", s.position), )), OperationDefinition::SelectionSet(s) => Self::convert_query(s), OperationDefinition::Query(q) => Self::convert_query(q.selection_set), OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set), }, } }
identifier_body
protocol_adapter.rs
use crate::HandlerError; use bigdecimal::{BigDecimal, FromPrimitive}; use graphql_parser::query::{ Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value, }; use query_core::query_document::*; /// Protocol adapter for GraphQL -> Query Document. /// /// GraphQL is mapped as following: /// - Every field of a `query { ... }` or single selection block `{ ... }` is mapped to an `Operation::Read`. /// - Every field of a single `mutation { ... }` is mapped to an `Operation::Write`. /// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored. /// - Fields on the queries are mapped to `Field`s, including arguments. /// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s. /// /// Currently unsupported features: /// - Fragments in any form. /// - Variables. /// - Subscription queries. /// - Query names are ignored pub struct GraphQLProtocolAdapter; impl GraphQLProtocolAdapter { pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> { let gql_doc = match graphql_parser::parse_query(query) { Ok(doc) => doc, Err(err) if err.to_string().contains("number too large to fit in target type") | err.to_string().contains("number too small to fit in target type") => { return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned())); } err @ Err(_) => err?, }; Self::convert(gql_doc, operation_name) } pub fn
(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> { let mut operations: Vec<Operation> = match operation { Some(ref op) => gql_doc .definitions .into_iter() .find(|def| Self::matches_operation(def, op)) .ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query."))) .and_then(Self::convert_definition), None => gql_doc .definitions .into_iter() .map(Self::convert_definition) .collect::<crate::Result<Vec<Vec<Operation>>>>() .map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()), }?; let operation = operations .pop() .ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))? .dedup_selections(); Ok(operation) } fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> { match def { Definition::Fragment(f) => Err(HandlerError::unsupported_feature( "Fragment definition", format!("Fragment '{}', at position {}.", f.name, f.position), )), Definition::Operation(op) => match op { OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature( "Subscription query", format!("At position {}.", s.position), )), OperationDefinition::SelectionSet(s) => Self::convert_query(s), OperationDefinition::Query(q) => Self::convert_query(q.selection_set), OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set), }, } } fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> { Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect()) } fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> { Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect()) } fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> { selection_set .items .into_iter() .map(|item| match item { GqlSelection::Field(f) => { let arguments: Vec<(String, ArgumentValue)> = f .arguments .into_iter() .map(|(k, v)| Ok((k, Self::convert_value(v)?))) .collect::<crate::Result<Vec<_>>>()?; let nested_selections = Self::convert_selection_set(f.selection_set)?; Ok(Selection::new(f.name, f.alias, arguments, nested_selections)) } GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature( "Fragment spread", format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position), )), GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature( "Inline fragment", format!("At position {}.", i.position), )), }) .collect() } /// Checks if the given GraphQL definition matches the operation name that should be executed. fn matches_operation(def: &Definition<String>, operation: &str) -> bool { let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some(); match def { Definition::Fragment(_) => false, Definition::Operation(op) => match op { OperationDefinition::Subscription(s) => check(s.name.as_ref()), OperationDefinition::SelectionSet(_) => false, OperationDefinition::Query(q) => check(q.name.as_ref()), OperationDefinition::Mutation(m) => check(m.name.as_ref()), }, } } fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> { match value { Value::Variable(name) => Err(HandlerError::unsupported_feature( "Variable usage", format!("Variable '{name}'."), )), Value::Int(i) => match i.as_i64() { Some(i) => Ok(ArgumentValue::int(i)), None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))), }, Value::Float(f) => match BigDecimal::from_f64(f) { Some(dec) => Ok(ArgumentValue::float(dec)), None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))), }, Value::String(s) => Ok(ArgumentValue::string(s)), Value::Boolean(b) => Ok(ArgumentValue::bool(b)), Value::Null => Ok(ArgumentValue::null()), Value::Enum(e) => Ok(ArgumentValue::r#enum(e)), Value::List(values) => { let values: Vec<ArgumentValue> = values .into_iter() .map(Self::convert_value) .collect::<crate::Result<Vec<ArgumentValue>>>()?; Ok(ArgumentValue::list(values)) } Value::Object(map) => { let values = map .into_iter() .map(|(k, v)| Self::convert_value(v).map(|v| (k, v))) .collect::<crate::Result<ArgumentValueObject>>()?; Ok(ArgumentValue::object(values)) } } } } #[cfg(test)] mod tests { use super::*; #[test] fn converts_single_query() { let query = r#" query findTheModelOperation { findOneModel(where: {a_number: {gte: 1}}) { id, large_number, other { name } } } "#; let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap(); assert_eq!(operation.name(), "findOneModel"); assert!(matches!(operation, Operation::Read(_))); let read = operation.into_read().unwrap(); let where_args = ArgumentValue::object([( "a_number".to_string(), ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]), )]); assert_eq!(read.arguments(), [("where".to_string(), where_args)]); let selections = Vec::from([ Selection::new("id", None, [], Vec::new()), Selection::new("large_number", None, [], Vec::new()), Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])), ]); assert_eq!(read.nested_selections(), selections); } #[test] fn converts_single_mutation() { let query = r#" mutation { createOnePost(data: { id: 1, categories: {create: [{id: 1}, {id: 2}]} }) { id, categories { id } } } "#; let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap(); assert_eq!(operation.name(), "createOnePost"); assert!(matches!(operation, Operation::Write(_))); let write = operation.into_write().unwrap(); let data_args = ArgumentValue::object([ ("id".to_string(), ArgumentValue::int(1)), ( "categories".to_string(), ArgumentValue::object([( "create".to_string(), ArgumentValue::list([ ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]), ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]), ]), )]), ), ]); println!("args {:?}", write.arguments()); assert_eq!(write.arguments(), [("data".to_string(), data_args)]); } }
convert
identifier_name
protocol_adapter.rs
use crate::HandlerError; use bigdecimal::{BigDecimal, FromPrimitive}; use graphql_parser::query::{ Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value, }; use query_core::query_document::*; /// Protocol adapter for GraphQL -> Query Document. /// /// GraphQL is mapped as following: /// - Every field of a `query { ... }` or single selection block `{ ... }` is mapped to an `Operation::Read`. /// - Every field of a single `mutation { ... }` is mapped to an `Operation::Write`. /// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored. /// - Fields on the queries are mapped to `Field`s, including arguments. /// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s. /// /// Currently unsupported features: /// - Fragments in any form. /// - Variables. /// - Subscription queries. /// - Query names are ignored pub struct GraphQLProtocolAdapter; impl GraphQLProtocolAdapter { pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> { let gql_doc = match graphql_parser::parse_query(query) { Ok(doc) => doc, Err(err) if err.to_string().contains("number too large to fit in target type") | err.to_string().contains("number too small to fit in target type") => { return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned())); } err @ Err(_) => err?, }; Self::convert(gql_doc, operation_name) } pub fn convert(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> { let mut operations: Vec<Operation> = match operation { Some(ref op) => gql_doc .definitions .into_iter() .find(|def| Self::matches_operation(def, op)) .ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query."))) .and_then(Self::convert_definition), None => gql_doc .definitions .into_iter() .map(Self::convert_definition) .collect::<crate::Result<Vec<Vec<Operation>>>>() .map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()), }?; let operation = operations .pop() .ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))? .dedup_selections(); Ok(operation) } fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> { match def { Definition::Fragment(f) => Err(HandlerError::unsupported_feature( "Fragment definition", format!("Fragment '{}', at position {}.", f.name, f.position), )), Definition::Operation(op) => match op { OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature( "Subscription query", format!("At position {}.", s.position), )), OperationDefinition::SelectionSet(s) => Self::convert_query(s), OperationDefinition::Query(q) => Self::convert_query(q.selection_set), OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set), }, } } fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> { Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect()) } fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> { Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect()) } fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> { selection_set .items .into_iter() .map(|item| match item { GqlSelection::Field(f) => { let arguments: Vec<(String, ArgumentValue)> = f .arguments .into_iter() .map(|(k, v)| Ok((k, Self::convert_value(v)?))) .collect::<crate::Result<Vec<_>>>()?; let nested_selections = Self::convert_selection_set(f.selection_set)?; Ok(Selection::new(f.name, f.alias, arguments, nested_selections)) } GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature( "Fragment spread", format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position), )), GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature( "Inline fragment", format!("At position {}.", i.position), )), }) .collect() } /// Checks if the given GraphQL definition matches the operation name that should be executed. fn matches_operation(def: &Definition<String>, operation: &str) -> bool { let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some(); match def { Definition::Fragment(_) => false, Definition::Operation(op) => match op { OperationDefinition::Subscription(s) => check(s.name.as_ref()), OperationDefinition::SelectionSet(_) => false, OperationDefinition::Query(q) => check(q.name.as_ref()), OperationDefinition::Mutation(m) => check(m.name.as_ref()), }, } } fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> { match value { Value::Variable(name) => Err(HandlerError::unsupported_feature( "Variable usage", format!("Variable '{name}'."), )), Value::Int(i) => match i.as_i64() { Some(i) => Ok(ArgumentValue::int(i)), None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))), }, Value::Float(f) => match BigDecimal::from_f64(f) { Some(dec) => Ok(ArgumentValue::float(dec)), None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))), }, Value::String(s) => Ok(ArgumentValue::string(s)), Value::Boolean(b) => Ok(ArgumentValue::bool(b)), Value::Null => Ok(ArgumentValue::null()), Value::Enum(e) => Ok(ArgumentValue::r#enum(e)), Value::List(values) => { let values: Vec<ArgumentValue> = values .into_iter() .map(Self::convert_value) .collect::<crate::Result<Vec<ArgumentValue>>>()?; Ok(ArgumentValue::list(values)) } Value::Object(map) => { let values = map .into_iter() .map(|(k, v)| Self::convert_value(v).map(|v| (k, v))) .collect::<crate::Result<ArgumentValueObject>>()?; Ok(ArgumentValue::object(values)) } } } } #[cfg(test)] mod tests { use super::*; #[test] fn converts_single_query() { let query = r#" query findTheModelOperation { findOneModel(where: {a_number: {gte: 1}}) { id, large_number, other { name } } } "#; let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap(); assert_eq!(operation.name(), "findOneModel"); assert!(matches!(operation, Operation::Read(_))); let read = operation.into_read().unwrap(); let where_args = ArgumentValue::object([( "a_number".to_string(), ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]), )]); assert_eq!(read.arguments(), [("where".to_string(), where_args)]); let selections = Vec::from([ Selection::new("id", None, [], Vec::new()), Selection::new("large_number", None, [], Vec::new()), Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])), ]); assert_eq!(read.nested_selections(), selections); } #[test] fn converts_single_mutation() { let query = r#" mutation { createOnePost(data: { id: 1, categories: {create: [{id: 1}, {id: 2}]} }) { id, categories { id } } } "#; let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap(); assert_eq!(operation.name(), "createOnePost"); assert!(matches!(operation, Operation::Write(_)));
( "categories".to_string(), ArgumentValue::object([( "create".to_string(), ArgumentValue::list([ ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]), ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]), ]), )]), ), ]); println!("args {:?}", write.arguments()); assert_eq!(write.arguments(), [("data".to_string(), data_args)]); } }
let write = operation.into_write().unwrap(); let data_args = ArgumentValue::object([ ("id".to_string(), ArgumentValue::int(1)),
random_line_split
resource.py
import json from decimal import Decimal from base64 import b64decode from twisted.internet.defer import maybeDeferred, gatherResults from twisted.internet import reactor from twisted.internet.threads import deferToThreadPool from twisted.web import http from twisted.web.client import getPage from twisted.web.resource import Resource, NoResource from twisted.web.server import NOT_DONE_YET from twisted.web.static import File from twisted.python import log from axiom.errors import ItemNotFound from axiom.attributes import AND from bdm.donate import Donation, Donator, donationToDict, donatorToDict from bdm.error import BloodyError, PaypalError from bdm.constants import CODE from valve.source.a2s import ServerQuerier, NoResponseError from valve.steam.id import SteamID as ValveSteamID def steamidTo64(steamid): return ValveSteamID.from_text(steamid).as_64() def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK): """ Serializes C{result} to JSON and writes it to C{request}. @param result: The content to be serialized and written to the request. @type result: An object accepted by json.dumps. @param request: The request object to write JSON to. @type request: L{twisted.web.server.Request} @param code: A code to include in the JSON response. @type code: C{int} @param status: The HTTP status the response will have. @type status: C{int} """ response = { u'code': code.value, u'result': result} request.setHeader('content-type', 'application/json') request.setResponseCode(status) request.write(json.dumps(response)) request.finish() def _mapErrorCodeToStatus(code): """ Maps a L{CODE} constant to a HTTP code. """ if code == 103: return http.NOT_FOUND return http.INTERNAL_SERVER_ERROR def _writeJSONErrorResponse(f, request): """ Serializes a L{Failure} to JSON and writes it to the C{request} @param f: The L{Failure} to serialize. @type f: L{Failure} @param request: The request object to write the JSON to. @type request: L{twisted.web.server.Request} """ code = getattr(f.value, 'code', CODE.UNKNOWN) _writeJSONResponse( result=f.getErrorMessage().decode('ascii'), request=request, code=code, status=_mapErrorCodeToStatus(code)) raise f def jsonResult(f): """ Decorator for render_* methods. Serializes the return value or exception to JSON and then writes it to the request object. """ def _inner(self, request): d = maybeDeferred(f, self, request) d.addCallback(_writeJSONResponse, request) d.addErrback(_writeJSONErrorResponse, request) return NOT_DONE_YET return _inner class RootResource(Resource): def __init__(self, store, steamKey, paypalSandbox, threadPool): Resource.__init__(self) self.putChild("api", DonationAPI(store, steamKey, threadPool)) self.putChild("paypal", PayPal(store, paypalSandbox)) self.putChild("static", File('bdm/static/')) self.putChild("", File('bdm/static/html/index.html')) class PayPal(Resource): isLeaf = True def __init__(self, store, sandbox): Resource.__init__(self) self.store = store self.SANDBOX = sandbox def verify(self, request): """ Verify PayPal IPN data. """ paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr' if not self.SANDBOX: paypalURL = 'https://www.paypal.com/cgi-bin/webscr' def _cb(response): if response == 'INVALID': raise PaypalError( 'IPN data invalid. data: %s', (data,)) elif response == 'VERIFIED': return True else: raise PaypalError('Unrecognized verification response: %s', (response,)) data = request.content.read() params = '?cmd=_notify-validate&' + data d = getPage(paypalURL+params, method='POST') d.addCallback(_cb) return d def _process(self, data): paymentStatus = data['payment_status'][0].lower() method = getattr(self, '_payment_%s' % (paymentStatus,)) if method is not None: method(data) else: log.err('Unknown payment status: %s' % (paymentStatus,)) def _payment_completed(self, data): txn_id = data['txn_id'][0] amount = data.get('settle_amount', data['mc_gross'])[0] custom = json.loads(b64decode(data['custom'][0])) anonymous = custom['anonymous'] steamID = custom['steamid'] if steamID: steamID = unicode(steamidTo64(steamID)) donator = self.store.findOrCreate( Donator, steamID=steamID, anonymous=anonymous) donator.addDonation(Decimal(amount), unicode(txn_id)) def _payment_refunded(self, data): donation = self.store.query( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_reversed(self, data): donation = self.store.findUnique( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_canceled_reversal(self, data): #XXX: TODO if ithere is ever a reversal cancelled. log.err("Reversal cancelled:") log.err(data) def render_POST(self, request): """ Recieves and verifies PayPal callbacks. """ log.msg("Paypal callback:") log.msg(request.args) d = self.verify(request) d.addCallback(lambda ign: self._process(request.args)) d.addErrback(log.err) return '' class DonationAPI(Resource): isLeaf = True def __init__(self, store, steamKey, threadPool): self.store = store self.steamKey = steamKey self.threadPool = threadPool Resource.__init__(self) def recent(self, limit): """ Retrieve a list of recent donations. @param limit: The amount of donations to return. @type limit: L{int} @return: A list of donations. @rtype: L{list} of L{dict}s. """ def _cb(players, donations): donators = [] for donation in donations: player = players[donation.donator.steamID].copy() player['date'] = donation.date.asPOSIXTimestamp() player['amount'] = str(donation.amount) donators.append(player) return donators donations = [] steamids = set() for donation in self.store.query(Donation, AND(Donation.donator == Donator.storeID, Donator.anonymous == False, Donator.steamID != None), limit=limit, sort=Donation.date.descending): steamids.add(donation.donator.steamID) donations.append(donation) d = self.getPlayerSummaries(steamids) d.addCallback(_cb, donations) return d def steamID(self, steamid): try: donator = self.store.findUnique( Donator, Donator.steamID == unicode(steamid)) except ItemNotFound: raise BloodyError("SteamID '%s' not found." % (steamid,)) donations = [] for donation in donator.donations: donations.append(donationToDict(donation)) return donations def getPlayerSummaries(self, steamids): def _cb(response): r = json.loads(response)['response'] players = {} for player in r['players']: p = player['steamid'] players[p] = player return players url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?' params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids)) d = getPage(str(url+params)) d.addCallback(_cb) return d @jsonResult def render_GET(self, request): if not request.postpath: return "nope" name = request.postpath[0] if name == u'steamid': if len(request.postpath[1]) <= 1 or request.postpath[1] is None: raise Exception("No SteamID provided.") return self.steamID(request.postpath[1]) if name == u'recent': try: limit = request.postpath[1] except IndexError: limit = 5 return self.recent(limit) if name == u'top': try: limit = request.postpath[1] except IndexError: limit = 5 return self.getTop(limit) return NoResource('') @jsonResult def render_POST(self, request): if not request.postpath: return "maybe sam dox" name = request.postpath[0] content = json.loads(request.content.read()) if not content: return 'No JSON provided' if name == u'servers': return self.serverStats(content) return NoResource('') def getTop(self, limit): """ Retrieves a list of donators sorted by total donation amount. """ def _cb(info, donators): players = [] for donator in donators: players.append(dict(donator, **info[donator['steamID']])) return players donators = [] steamIDs = [] for d in self.store.query(Donator, AND(Donator.anonymous == False, Donator.steamID != None), sort=Donator.totalAmount.desc, limit=limit): steamIDs.append(d.steamID) donators.append(donatorToDict(d)) d = self.getPlayerSummaries(steamIDs) d.addCallback(_cb, donators) return d def serverStats(self, servers, querier=ServerQuerier): def getInfo(server): def _tx(): q = querier(server) try: info = q.info() return {'server_name': info['server_name'], 'map': info['map'], 'player_count': info['player_count'], 'max_players': info['max_players'], 'online': True, 'location': server[2]} except NoResponseError: return {'server_name': server[0], 'online': False, 'location': server[2]} return deferToThreadPool(reactor, self.threadPool, _tx) deferreds = [] for server in servers:
d = gatherResults(deferreds, consumeErrors=True) return d
deferreds.append(getInfo(server))
conditional_block
resource.py
import json from decimal import Decimal from base64 import b64decode from twisted.internet.defer import maybeDeferred, gatherResults from twisted.internet import reactor from twisted.internet.threads import deferToThreadPool from twisted.web import http from twisted.web.client import getPage from twisted.web.resource import Resource, NoResource from twisted.web.server import NOT_DONE_YET from twisted.web.static import File from twisted.python import log from axiom.errors import ItemNotFound from axiom.attributes import AND from bdm.donate import Donation, Donator, donationToDict, donatorToDict from bdm.error import BloodyError, PaypalError from bdm.constants import CODE from valve.source.a2s import ServerQuerier, NoResponseError from valve.steam.id import SteamID as ValveSteamID def steamidTo64(steamid):
def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK): """ Serializes C{result} to JSON and writes it to C{request}. @param result: The content to be serialized and written to the request. @type result: An object accepted by json.dumps. @param request: The request object to write JSON to. @type request: L{twisted.web.server.Request} @param code: A code to include in the JSON response. @type code: C{int} @param status: The HTTP status the response will have. @type status: C{int} """ response = { u'code': code.value, u'result': result} request.setHeader('content-type', 'application/json') request.setResponseCode(status) request.write(json.dumps(response)) request.finish() def _mapErrorCodeToStatus(code): """ Maps a L{CODE} constant to a HTTP code. """ if code == 103: return http.NOT_FOUND return http.INTERNAL_SERVER_ERROR def _writeJSONErrorResponse(f, request): """ Serializes a L{Failure} to JSON and writes it to the C{request} @param f: The L{Failure} to serialize. @type f: L{Failure} @param request: The request object to write the JSON to. @type request: L{twisted.web.server.Request} """ code = getattr(f.value, 'code', CODE.UNKNOWN) _writeJSONResponse( result=f.getErrorMessage().decode('ascii'), request=request, code=code, status=_mapErrorCodeToStatus(code)) raise f def jsonResult(f): """ Decorator for render_* methods. Serializes the return value or exception to JSON and then writes it to the request object. """ def _inner(self, request): d = maybeDeferred(f, self, request) d.addCallback(_writeJSONResponse, request) d.addErrback(_writeJSONErrorResponse, request) return NOT_DONE_YET return _inner class RootResource(Resource): def __init__(self, store, steamKey, paypalSandbox, threadPool): Resource.__init__(self) self.putChild("api", DonationAPI(store, steamKey, threadPool)) self.putChild("paypal", PayPal(store, paypalSandbox)) self.putChild("static", File('bdm/static/')) self.putChild("", File('bdm/static/html/index.html')) class PayPal(Resource): isLeaf = True def __init__(self, store, sandbox): Resource.__init__(self) self.store = store self.SANDBOX = sandbox def verify(self, request): """ Verify PayPal IPN data. """ paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr' if not self.SANDBOX: paypalURL = 'https://www.paypal.com/cgi-bin/webscr' def _cb(response): if response == 'INVALID': raise PaypalError( 'IPN data invalid. data: %s', (data,)) elif response == 'VERIFIED': return True else: raise PaypalError('Unrecognized verification response: %s', (response,)) data = request.content.read() params = '?cmd=_notify-validate&' + data d = getPage(paypalURL+params, method='POST') d.addCallback(_cb) return d def _process(self, data): paymentStatus = data['payment_status'][0].lower() method = getattr(self, '_payment_%s' % (paymentStatus,)) if method is not None: method(data) else: log.err('Unknown payment status: %s' % (paymentStatus,)) def _payment_completed(self, data): txn_id = data['txn_id'][0] amount = data.get('settle_amount', data['mc_gross'])[0] custom = json.loads(b64decode(data['custom'][0])) anonymous = custom['anonymous'] steamID = custom['steamid'] if steamID: steamID = unicode(steamidTo64(steamID)) donator = self.store.findOrCreate( Donator, steamID=steamID, anonymous=anonymous) donator.addDonation(Decimal(amount), unicode(txn_id)) def _payment_refunded(self, data): donation = self.store.query( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_reversed(self, data): donation = self.store.findUnique( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_canceled_reversal(self, data): #XXX: TODO if ithere is ever a reversal cancelled. log.err("Reversal cancelled:") log.err(data) def render_POST(self, request): """ Recieves and verifies PayPal callbacks. """ log.msg("Paypal callback:") log.msg(request.args) d = self.verify(request) d.addCallback(lambda ign: self._process(request.args)) d.addErrback(log.err) return '' class DonationAPI(Resource): isLeaf = True def __init__(self, store, steamKey, threadPool): self.store = store self.steamKey = steamKey self.threadPool = threadPool Resource.__init__(self) def recent(self, limit): """ Retrieve a list of recent donations. @param limit: The amount of donations to return. @type limit: L{int} @return: A list of donations. @rtype: L{list} of L{dict}s. """ def _cb(players, donations): donators = [] for donation in donations: player = players[donation.donator.steamID].copy() player['date'] = donation.date.asPOSIXTimestamp() player['amount'] = str(donation.amount) donators.append(player) return donators donations = [] steamids = set() for donation in self.store.query(Donation, AND(Donation.donator == Donator.storeID, Donator.anonymous == False, Donator.steamID != None), limit=limit, sort=Donation.date.descending): steamids.add(donation.donator.steamID) donations.append(donation) d = self.getPlayerSummaries(steamids) d.addCallback(_cb, donations) return d def steamID(self, steamid): try: donator = self.store.findUnique( Donator, Donator.steamID == unicode(steamid)) except ItemNotFound: raise BloodyError("SteamID '%s' not found." % (steamid,)) donations = [] for donation in donator.donations: donations.append(donationToDict(donation)) return donations def getPlayerSummaries(self, steamids): def _cb(response): r = json.loads(response)['response'] players = {} for player in r['players']: p = player['steamid'] players[p] = player return players url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?' params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids)) d = getPage(str(url+params)) d.addCallback(_cb) return d @jsonResult def render_GET(self, request): if not request.postpath: return "nope" name = request.postpath[0] if name == u'steamid': if len(request.postpath[1]) <= 1 or request.postpath[1] is None: raise Exception("No SteamID provided.") return self.steamID(request.postpath[1]) if name == u'recent': try: limit = request.postpath[1] except IndexError: limit = 5 return self.recent(limit) if name == u'top': try: limit = request.postpath[1] except IndexError: limit = 5 return self.getTop(limit) return NoResource('') @jsonResult def render_POST(self, request): if not request.postpath: return "maybe sam dox" name = request.postpath[0] content = json.loads(request.content.read()) if not content: return 'No JSON provided' if name == u'servers': return self.serverStats(content) return NoResource('') def getTop(self, limit): """ Retrieves a list of donators sorted by total donation amount. """ def _cb(info, donators): players = [] for donator in donators: players.append(dict(donator, **info[donator['steamID']])) return players donators = [] steamIDs = [] for d in self.store.query(Donator, AND(Donator.anonymous == False, Donator.steamID != None), sort=Donator.totalAmount.desc, limit=limit): steamIDs.append(d.steamID) donators.append(donatorToDict(d)) d = self.getPlayerSummaries(steamIDs) d.addCallback(_cb, donators) return d def serverStats(self, servers, querier=ServerQuerier): def getInfo(server): def _tx(): q = querier(server) try: info = q.info() return {'server_name': info['server_name'], 'map': info['map'], 'player_count': info['player_count'], 'max_players': info['max_players'], 'online': True, 'location': server[2]} except NoResponseError: return {'server_name': server[0], 'online': False, 'location': server[2]} return deferToThreadPool(reactor, self.threadPool, _tx) deferreds = [] for server in servers: deferreds.append(getInfo(server)) d = gatherResults(deferreds, consumeErrors=True) return d
return ValveSteamID.from_text(steamid).as_64()
identifier_body
resource.py
import json from decimal import Decimal from base64 import b64decode from twisted.internet.defer import maybeDeferred, gatherResults from twisted.internet import reactor from twisted.internet.threads import deferToThreadPool from twisted.web import http from twisted.web.client import getPage from twisted.web.resource import Resource, NoResource from twisted.web.server import NOT_DONE_YET from twisted.web.static import File from twisted.python import log from axiom.errors import ItemNotFound from axiom.attributes import AND from bdm.donate import Donation, Donator, donationToDict, donatorToDict from bdm.error import BloodyError, PaypalError from bdm.constants import CODE from valve.source.a2s import ServerQuerier, NoResponseError from valve.steam.id import SteamID as ValveSteamID def steamidTo64(steamid): return ValveSteamID.from_text(steamid).as_64() def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK): """ Serializes C{result} to JSON and writes it to C{request}. @param result: The content to be serialized and written to the request. @type result: An object accepted by json.dumps. @param request: The request object to write JSON to. @type request: L{twisted.web.server.Request} @param code: A code to include in the JSON response. @type code: C{int} @param status: The HTTP status the response will have. @type status: C{int} """ response = { u'code': code.value, u'result': result} request.setHeader('content-type', 'application/json') request.setResponseCode(status) request.write(json.dumps(response)) request.finish() def _mapErrorCodeToStatus(code): """ Maps a L{CODE} constant to a HTTP code. """ if code == 103: return http.NOT_FOUND return http.INTERNAL_SERVER_ERROR def _writeJSONErrorResponse(f, request): """ Serializes a L{Failure} to JSON and writes it to the C{request} @param f: The L{Failure} to serialize. @type f: L{Failure} @param request: The request object to write the JSON to. @type request: L{twisted.web.server.Request} """ code = getattr(f.value, 'code', CODE.UNKNOWN) _writeJSONResponse( result=f.getErrorMessage().decode('ascii'), request=request, code=code, status=_mapErrorCodeToStatus(code)) raise f def jsonResult(f): """ Decorator for render_* methods. Serializes the return value or exception to JSON and then writes it to the request object. """ def _inner(self, request): d = maybeDeferred(f, self, request) d.addCallback(_writeJSONResponse, request) d.addErrback(_writeJSONErrorResponse, request) return NOT_DONE_YET return _inner class RootResource(Resource): def __init__(self, store, steamKey, paypalSandbox, threadPool): Resource.__init__(self) self.putChild("api", DonationAPI(store, steamKey, threadPool)) self.putChild("paypal", PayPal(store, paypalSandbox)) self.putChild("static", File('bdm/static/')) self.putChild("", File('bdm/static/html/index.html')) class PayPal(Resource): isLeaf = True def __init__(self, store, sandbox): Resource.__init__(self) self.store = store self.SANDBOX = sandbox def verify(self, request): """ Verify PayPal IPN data. """ paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr' if not self.SANDBOX: paypalURL = 'https://www.paypal.com/cgi-bin/webscr'
elif response == 'VERIFIED': return True else: raise PaypalError('Unrecognized verification response: %s', (response,)) data = request.content.read() params = '?cmd=_notify-validate&' + data d = getPage(paypalURL+params, method='POST') d.addCallback(_cb) return d def _process(self, data): paymentStatus = data['payment_status'][0].lower() method = getattr(self, '_payment_%s' % (paymentStatus,)) if method is not None: method(data) else: log.err('Unknown payment status: %s' % (paymentStatus,)) def _payment_completed(self, data): txn_id = data['txn_id'][0] amount = data.get('settle_amount', data['mc_gross'])[0] custom = json.loads(b64decode(data['custom'][0])) anonymous = custom['anonymous'] steamID = custom['steamid'] if steamID: steamID = unicode(steamidTo64(steamID)) donator = self.store.findOrCreate( Donator, steamID=steamID, anonymous=anonymous) donator.addDonation(Decimal(amount), unicode(txn_id)) def _payment_refunded(self, data): donation = self.store.query( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_reversed(self, data): donation = self.store.findUnique( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_canceled_reversal(self, data): #XXX: TODO if ithere is ever a reversal cancelled. log.err("Reversal cancelled:") log.err(data) def render_POST(self, request): """ Recieves and verifies PayPal callbacks. """ log.msg("Paypal callback:") log.msg(request.args) d = self.verify(request) d.addCallback(lambda ign: self._process(request.args)) d.addErrback(log.err) return '' class DonationAPI(Resource): isLeaf = True def __init__(self, store, steamKey, threadPool): self.store = store self.steamKey = steamKey self.threadPool = threadPool Resource.__init__(self) def recent(self, limit): """ Retrieve a list of recent donations. @param limit: The amount of donations to return. @type limit: L{int} @return: A list of donations. @rtype: L{list} of L{dict}s. """ def _cb(players, donations): donators = [] for donation in donations: player = players[donation.donator.steamID].copy() player['date'] = donation.date.asPOSIXTimestamp() player['amount'] = str(donation.amount) donators.append(player) return donators donations = [] steamids = set() for donation in self.store.query(Donation, AND(Donation.donator == Donator.storeID, Donator.anonymous == False, Donator.steamID != None), limit=limit, sort=Donation.date.descending): steamids.add(donation.donator.steamID) donations.append(donation) d = self.getPlayerSummaries(steamids) d.addCallback(_cb, donations) return d def steamID(self, steamid): try: donator = self.store.findUnique( Donator, Donator.steamID == unicode(steamid)) except ItemNotFound: raise BloodyError("SteamID '%s' not found." % (steamid,)) donations = [] for donation in donator.donations: donations.append(donationToDict(donation)) return donations def getPlayerSummaries(self, steamids): def _cb(response): r = json.loads(response)['response'] players = {} for player in r['players']: p = player['steamid'] players[p] = player return players url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?' params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids)) d = getPage(str(url+params)) d.addCallback(_cb) return d @jsonResult def render_GET(self, request): if not request.postpath: return "nope" name = request.postpath[0] if name == u'steamid': if len(request.postpath[1]) <= 1 or request.postpath[1] is None: raise Exception("No SteamID provided.") return self.steamID(request.postpath[1]) if name == u'recent': try: limit = request.postpath[1] except IndexError: limit = 5 return self.recent(limit) if name == u'top': try: limit = request.postpath[1] except IndexError: limit = 5 return self.getTop(limit) return NoResource('') @jsonResult def render_POST(self, request): if not request.postpath: return "maybe sam dox" name = request.postpath[0] content = json.loads(request.content.read()) if not content: return 'No JSON provided' if name == u'servers': return self.serverStats(content) return NoResource('') def getTop(self, limit): """ Retrieves a list of donators sorted by total donation amount. """ def _cb(info, donators): players = [] for donator in donators: players.append(dict(donator, **info[donator['steamID']])) return players donators = [] steamIDs = [] for d in self.store.query(Donator, AND(Donator.anonymous == False, Donator.steamID != None), sort=Donator.totalAmount.desc, limit=limit): steamIDs.append(d.steamID) donators.append(donatorToDict(d)) d = self.getPlayerSummaries(steamIDs) d.addCallback(_cb, donators) return d def serverStats(self, servers, querier=ServerQuerier): def getInfo(server): def _tx(): q = querier(server) try: info = q.info() return {'server_name': info['server_name'], 'map': info['map'], 'player_count': info['player_count'], 'max_players': info['max_players'], 'online': True, 'location': server[2]} except NoResponseError: return {'server_name': server[0], 'online': False, 'location': server[2]} return deferToThreadPool(reactor, self.threadPool, _tx) deferreds = [] for server in servers: deferreds.append(getInfo(server)) d = gatherResults(deferreds, consumeErrors=True) return d
def _cb(response): if response == 'INVALID': raise PaypalError( 'IPN data invalid. data: %s', (data,))
random_line_split
resource.py
import json from decimal import Decimal from base64 import b64decode from twisted.internet.defer import maybeDeferred, gatherResults from twisted.internet import reactor from twisted.internet.threads import deferToThreadPool from twisted.web import http from twisted.web.client import getPage from twisted.web.resource import Resource, NoResource from twisted.web.server import NOT_DONE_YET from twisted.web.static import File from twisted.python import log from axiom.errors import ItemNotFound from axiom.attributes import AND from bdm.donate import Donation, Donator, donationToDict, donatorToDict from bdm.error import BloodyError, PaypalError from bdm.constants import CODE from valve.source.a2s import ServerQuerier, NoResponseError from valve.steam.id import SteamID as ValveSteamID def steamidTo64(steamid): return ValveSteamID.from_text(steamid).as_64() def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK): """ Serializes C{result} to JSON and writes it to C{request}. @param result: The content to be serialized and written to the request. @type result: An object accepted by json.dumps. @param request: The request object to write JSON to. @type request: L{twisted.web.server.Request} @param code: A code to include in the JSON response. @type code: C{int} @param status: The HTTP status the response will have. @type status: C{int} """ response = { u'code': code.value, u'result': result} request.setHeader('content-type', 'application/json') request.setResponseCode(status) request.write(json.dumps(response)) request.finish() def _mapErrorCodeToStatus(code): """ Maps a L{CODE} constant to a HTTP code. """ if code == 103: return http.NOT_FOUND return http.INTERNAL_SERVER_ERROR def _writeJSONErrorResponse(f, request): """ Serializes a L{Failure} to JSON and writes it to the C{request} @param f: The L{Failure} to serialize. @type f: L{Failure} @param request: The request object to write the JSON to. @type request: L{twisted.web.server.Request} """ code = getattr(f.value, 'code', CODE.UNKNOWN) _writeJSONResponse( result=f.getErrorMessage().decode('ascii'), request=request, code=code, status=_mapErrorCodeToStatus(code)) raise f def jsonResult(f): """ Decorator for render_* methods. Serializes the return value or exception to JSON and then writes it to the request object. """ def _inner(self, request): d = maybeDeferred(f, self, request) d.addCallback(_writeJSONResponse, request) d.addErrback(_writeJSONErrorResponse, request) return NOT_DONE_YET return _inner class RootResource(Resource): def __init__(self, store, steamKey, paypalSandbox, threadPool): Resource.__init__(self) self.putChild("api", DonationAPI(store, steamKey, threadPool)) self.putChild("paypal", PayPal(store, paypalSandbox)) self.putChild("static", File('bdm/static/')) self.putChild("", File('bdm/static/html/index.html')) class PayPal(Resource): isLeaf = True def __init__(self, store, sandbox): Resource.__init__(self) self.store = store self.SANDBOX = sandbox def verify(self, request): """ Verify PayPal IPN data. """ paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr' if not self.SANDBOX: paypalURL = 'https://www.paypal.com/cgi-bin/webscr' def _cb(response): if response == 'INVALID': raise PaypalError( 'IPN data invalid. data: %s', (data,)) elif response == 'VERIFIED': return True else: raise PaypalError('Unrecognized verification response: %s', (response,)) data = request.content.read() params = '?cmd=_notify-validate&' + data d = getPage(paypalURL+params, method='POST') d.addCallback(_cb) return d def _process(self, data): paymentStatus = data['payment_status'][0].lower() method = getattr(self, '_payment_%s' % (paymentStatus,)) if method is not None: method(data) else: log.err('Unknown payment status: %s' % (paymentStatus,)) def _payment_completed(self, data): txn_id = data['txn_id'][0] amount = data.get('settle_amount', data['mc_gross'])[0] custom = json.loads(b64decode(data['custom'][0])) anonymous = custom['anonymous'] steamID = custom['steamid'] if steamID: steamID = unicode(steamidTo64(steamID)) donator = self.store.findOrCreate( Donator, steamID=steamID, anonymous=anonymous) donator.addDonation(Decimal(amount), unicode(txn_id)) def _payment_refunded(self, data): donation = self.store.query( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_reversed(self, data): donation = self.store.findUnique( Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0]))) donation.deleteFromStore() def _payment_canceled_reversal(self, data): #XXX: TODO if ithere is ever a reversal cancelled. log.err("Reversal cancelled:") log.err(data) def render_POST(self, request): """ Recieves and verifies PayPal callbacks. """ log.msg("Paypal callback:") log.msg(request.args) d = self.verify(request) d.addCallback(lambda ign: self._process(request.args)) d.addErrback(log.err) return '' class DonationAPI(Resource): isLeaf = True def __init__(self, store, steamKey, threadPool): self.store = store self.steamKey = steamKey self.threadPool = threadPool Resource.__init__(self) def recent(self, limit): """ Retrieve a list of recent donations. @param limit: The amount of donations to return. @type limit: L{int} @return: A list of donations. @rtype: L{list} of L{dict}s. """ def _cb(players, donations): donators = [] for donation in donations: player = players[donation.donator.steamID].copy() player['date'] = donation.date.asPOSIXTimestamp() player['amount'] = str(donation.amount) donators.append(player) return donators donations = [] steamids = set() for donation in self.store.query(Donation, AND(Donation.donator == Donator.storeID, Donator.anonymous == False, Donator.steamID != None), limit=limit, sort=Donation.date.descending): steamids.add(donation.donator.steamID) donations.append(donation) d = self.getPlayerSummaries(steamids) d.addCallback(_cb, donations) return d def steamID(self, steamid): try: donator = self.store.findUnique( Donator, Donator.steamID == unicode(steamid)) except ItemNotFound: raise BloodyError("SteamID '%s' not found." % (steamid,)) donations = [] for donation in donator.donations: donations.append(donationToDict(donation)) return donations def getPlayerSummaries(self, steamids): def _cb(response): r = json.loads(response)['response'] players = {} for player in r['players']: p = player['steamid'] players[p] = player return players url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?' params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids)) d = getPage(str(url+params)) d.addCallback(_cb) return d @jsonResult def render_GET(self, request): if not request.postpath: return "nope" name = request.postpath[0] if name == u'steamid': if len(request.postpath[1]) <= 1 or request.postpath[1] is None: raise Exception("No SteamID provided.") return self.steamID(request.postpath[1]) if name == u'recent': try: limit = request.postpath[1] except IndexError: limit = 5 return self.recent(limit) if name == u'top': try: limit = request.postpath[1] except IndexError: limit = 5 return self.getTop(limit) return NoResource('') @jsonResult def
(self, request): if not request.postpath: return "maybe sam dox" name = request.postpath[0] content = json.loads(request.content.read()) if not content: return 'No JSON provided' if name == u'servers': return self.serverStats(content) return NoResource('') def getTop(self, limit): """ Retrieves a list of donators sorted by total donation amount. """ def _cb(info, donators): players = [] for donator in donators: players.append(dict(donator, **info[donator['steamID']])) return players donators = [] steamIDs = [] for d in self.store.query(Donator, AND(Donator.anonymous == False, Donator.steamID != None), sort=Donator.totalAmount.desc, limit=limit): steamIDs.append(d.steamID) donators.append(donatorToDict(d)) d = self.getPlayerSummaries(steamIDs) d.addCallback(_cb, donators) return d def serverStats(self, servers, querier=ServerQuerier): def getInfo(server): def _tx(): q = querier(server) try: info = q.info() return {'server_name': info['server_name'], 'map': info['map'], 'player_count': info['player_count'], 'max_players': info['max_players'], 'online': True, 'location': server[2]} except NoResponseError: return {'server_name': server[0], 'online': False, 'location': server[2]} return deferToThreadPool(reactor, self.threadPool, _tx) deferreds = [] for server in servers: deferreds.append(getInfo(server)) d = gatherResults(deferreds, consumeErrors=True) return d
render_POST
identifier_name
__init__.py
import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objs as go # import plotly.tools as tls import matplotlib.pyplot as plt from scipy.spatial import distance from sklearn.utils.extmath import randomized_svd from tqdm import tqdm class kohonen: """ Matrix SOM Initialize weight matrix For epoch <- 1 to N do Choose input matrix observation randomly - i For k <- 1 to n_node do compute d(input matrix i, weight matrix k) end Best Matching Unit = winning node = node with the smallest distance For k <- 1 to n_node do update weight matrix end end Update weight mi(t + 1) = mi(t) + ⍺(t) * hci(t) [x(t) - mi(t)] Neighborhood function hci(t) = h(dist(rc, ri), t) rc, ri: location vectors of node c and i if Gaussian: hci(t) = exp(-dist^2 / (2 * σ^2(t))) Radius: σ(t) = σ_0 * exp(-t / ƛ) Learning rate: ⍺(t) = ⍺_0 * exp(-t / ƛ) """ def __init__( self, data, xdim, ydim, topo = "rectangular", neighbor = "gaussian", dist = "frobenius", decay = "exponential", seed = None ): """ :param data: 3d array. processed data set for Online SOM Detector :param xdim: Number of x-grid :param ydim: Number of y-grid :param topo: Topology of output space - rectangular or hexagonal :param neighbor: Neighborhood function - gaussian, bubble, or triangular :param dist: Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or :param decay: decaying learning rate and radius - exponential or linear :param seed: Random seed """ np.random.seed(seed = seed) if xdim is None or ydim is None: xdim = int(np.sqrt(5 * np.sqrt(data.shape[0]))) ydim = xdim self.net_dim = np.array([xdim, ydim]) self.ncol = data.shape[2] self.nrow = data.shape[1] # Initialize codebook matrix self.init_weight() # Topology topo_types = ["rectangular", "hexagonal"] if topo not in topo_types: raise ValueError("Invalid topo. Expected one of: %s" % topo_types) self.topo = topo self.init_grid() self.dist_node() # Neighborhood function neighbor_types = ["gaussian", "bubble", "triangular"] if neighbor not in neighbor_types: raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types) self.neighbor_func = neighbor # Distance function dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"] if dist not in dist_type: raise ValueError("Invalid dist. Expected one of: %s" % dist_type) self.dist_func = dist # Decay decay_types = ["exponential", "linear"] if decay not in decay_types: raise ValueError("Invalid decay. Expected one of: %s" % decay_types) self.decay_func = decay # som() self.epoch = None self.alpha = None self.sigma = None self.initial_learn = None self.initial_r = None # find_bmu() self.bmu = None # plot self.reconstruction_error = None self.dist_normal = None self.project = None def init_weight(self): self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) def init_grid(self): """ [row_pts, col_pts] xdim x ydim rows (points) [1,1] [2,1] [1,2] [2,2] 2---------> 1--------->^ """ self.pts = np.array( np.meshgrid( np.arange(self.net_dim[0]) + 1, np.arange(self.net_dim[1]) + 1 ) ).reshape(2, np.prod(self.net_dim)).T if self.topo == "hexagonal": self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2) self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1] def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False): """ :param data: 3d array. processed data set for Online SOM Detector :param epoch: epoch number :param init_rate: initial learning rate :param init_radius: initial radius of BMU neighborhood :param keep_net: keep every weight matrix path? """ num_obs = data.shape[0] obs_id = np.arange(num_obs) chose_i = np.empty(1) node_id = None hci = None self.epoch = epoch if keep_net: self.net_path = np.empty( (self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) ) # learning rate if init_rate is None: init_rate = .1 self.alpha = init_rate self.initial_learn = init_rate # radius of neighborhood if init_radius is None: init_radius = np.quantile(self.dci, q = 2 / 3, axis = None) self.sigma = init_radius self.initial_r = init_radius # time constant (lambda) rate_constant = epoch radius_constant = epoch / np.log(self.sigma) # distance between nodes bmu_dist = self.dci[1, :] rcst_err = np.empty(epoch) for i in tqdm(range(epoch), desc = "epoch"): chose_i = int(np.random.choice(obs_id, size = 1)) # BMU - self.bmu self.find_bmu(data, chose_i) # reconstruction error - sum of distances from BMU rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])]) bmu_dist = self.dci[self.bmu.astype(int), :].flatten() # decay self.sigma = self.decay(init_radius, i + 1, radius_constant) self.alpha = self.decay(init_rate, i + 1, rate_constant) # neighboring nodes (includes BMU) neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten() for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"): node_id = neighbor_neuron[k] hci = self.neighborhood(bmu_dist[node_id], self.sigma) # update codebook matrices of neighboring nodes self.net[node_id, :, :] += \ self.alpha * hci * \ (data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol)) if keep_net: self.net_path[i, :, :, :] = self.net self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err}) def find_bmu(self, data, index): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. """ dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])]) self.bmu = np.argmin(dist_code) def dist_mat(self, data, index, node): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. :param node: node index :return: distance between input matrix observation and weight matrix of the node """ if self.dist_func == "frobenius": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro") elif self.dist_func == "nuclear": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc") elif self.dist_func == "mahalanobis": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # spectral decomposition sigma = udu.T w, v = np.linalg.eigh(covmat) # inverse = ud^-1u.T w[w == 0] += .0001 covinv = v.dot(np.diag(1 / w)).dot(v.T) ss = x.dot(covinv).dot(x.T) return np.sqrt(np.trace(ss)) elif self.dist_func == "eros": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # svd(covariance) u, s, vh = randomized_svd(covmat, n_components = covmat.shape[1], n_iter = 1, random_state = None) # normalize eigenvalue w = s / s.sum() # distance ss = np.multiply(vh, w).dot(vh.T) return np.sqrt(np.trace(ss)) def dist_node(self): """ :return: distance matrix of SOM neuron """ if self.topo == "hexagonal": self.dci = distance.cdist(self.pts, self.pts, "euclidean") elif self.topo == "rectangular": self.dci = distance.cdist(self.pts, self.pts, "chebyshev") def decay(self, init, time, time_constant): """ :param init: initial value :param time: t :param time_constant: lambda :return: decaying value of alpha or sigma """ if self.decay_func == "exponential": return init * np.exp(-time / time_constant) elif self.decay_func == "linear": return init * (1 - time / time_constant) def neighborhood(self, node_distance, radius): """ :param node_distance: Distance between SOM neurons :param radius: Radius of BMU neighborhood :return: Neighborhood function hci """ if self.neighbor_func == "gaussian": return np.exp(-node_distance ** 2 / (2 * (radius ** 2))) elif self.neighbor_func == "bubble": if node_distance <= radius: return 1.0
else: return 0.0 elif self.neighbor_func == "triangular": if node_distance <= radius: return 1 - np.abs(node_distance) / radius else: return 0.0 def dist_weight(self, data, index): """ :param data: Processed data set for SOM :param index: index for data :return: minimum distance between input matrix and weight matrices, its node id (BMU) """ dist_wt = np.asarray([self.dist_mat(data, index, j) for j in tqdm(range(self.net.shape[0]), desc = "bmu")]) return np.min(dist_wt), np.argmin(dist_wt) def plot_error(self): """ :return: line plot of reconstruction error versus epoch """ fig = px.line(self.reconstruction_error, x = "Epoch", y = "Reconstruction Error") fig.show() def plot_heatmap(self, data): """ :return: Heatmap for SOM """ if self.project is None: normal_distance = np.asarray( [self.dist_weight(data, i) for i in tqdm(range(data.shape[0]), desc="mapping")] ) self.dist_normal = normal_distance[:, 0] self.project = normal_distance[:, 1] x = self.project % self.net_dim[0] y = self.project // self.net_dim[0] if self.topo == "rectangular": fig = go.Figure( go.Histogram2d( x = x, y = y, colorscale = "Viridis" ) ) fig.show() elif self.topo == "hexagonal": x = x + .5 * (y % 2) y = np.sqrt(3) / 2 * y # plt_hex = plt.hexbin(x, y) # plt.close() # fig = tls.mpl_to_plotly(plt_hex) plt.hexbin(x, y) plt.show()
conditional_block
__init__.py
import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objs as go # import plotly.tools as tls import matplotlib.pyplot as plt from scipy.spatial import distance from sklearn.utils.extmath import randomized_svd from tqdm import tqdm class kohonen: """ Matrix SOM Initialize weight matrix For epoch <- 1 to N do Choose input matrix observation randomly - i For k <- 1 to n_node do compute d(input matrix i, weight matrix k) end Best Matching Unit = winning node = node with the smallest distance For k <- 1 to n_node do update weight matrix end end Update weight mi(t + 1) = mi(t) + ⍺(t) * hci(t) [x(t) - mi(t)] Neighborhood function hci(t) = h(dist(rc, ri), t) rc, ri: location vectors of node c and i if Gaussian: hci(t) = exp(-dist^2 / (2 * σ^2(t))) Radius: σ(t) = σ_0 * exp(-t / ƛ) Learning rate: ⍺(t) = ⍺_0 * exp(-t / ƛ) """ def __init__( self, data, xdim, ydim, topo = "rectangular", neighbor = "gaussian", dist = "frobenius", decay = "exponential", seed = None ): """ :param data: 3d array. processed data set for Online SOM Detector :param xdim: Number of x-grid :param ydim: Number of y-grid :param topo: Topology of output space - rectangular or hexagonal :param neighbor: Neighborhood function - gaussian, bubble, or triangular :param dist: Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or :param decay: decaying learning rate and radius - exponential or linear :param seed: Random seed """ np.random.seed(seed = seed) if xdim is None or ydim is None: xdim = int(np.sqrt(5 * np.sqrt(data.shape[0]))) ydim = xdim self.net_dim = np.array([xdim, ydim]) self.ncol = data.shape[2] self.nrow = data.shape[1] # Initialize codebook matrix self.init_weight() # Topology topo_types = ["rectangular", "hexagonal"] if topo not in topo_types: raise ValueError("Invalid topo. Expected one of: %s" % topo_types) self.topo = topo self.init_grid() self.dist_node() # Neighborhood function neighbor_types = ["gaussian", "bubble", "triangular"] if neighbor not in neighbor_types: raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types) self.neighbor_func = neighbor # Distance function dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"] if dist not in dist_type: raise ValueError("Invalid dist. Expected one of: %s" % dist_type) self.dist_func = dist # Decay decay_types = ["exponential", "linear"] if decay not in decay_types: raise ValueError("Invalid decay. Expected one of: %s" % decay_types) self.decay_func = decay # som() self.epoch = None self.alpha = None self.sigma = None self.initial_learn = None self.initial_r = None # find_bmu() self.bmu = None # plot self.reconstruction_error = None self.dist_normal = None self.project = None def init_weight(self): self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) def init_grid(self): """ [row_pts, col_pts] xdim x ydim rows (points) [1,1] [2,1] [1,2] [2,2] 2---------> 1--------->^ """ self.pts = np.array( np.meshgrid( np.arange(self.net_dim[0]) + 1, np.arange(self.net_dim[1]) + 1 ) ).reshape(2, np.prod(self.net_dim)).T if self.topo == "hexagonal": self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2) self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1] def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False): """ :param data: 3d array. processed data set for Online SOM Detector :param epoch: epoch number :param init_rate: initial learning rate :param init_radius: initial radius of BMU neighborhood :param keep_net: keep every weight matrix path? """ num_obs = data.shape[0] obs_id = np.arange(num_obs) chose_i = np.empty(1) node_id = None hci = None self.epoch = epoch if keep_net: self.net_path = np.empty( (self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) ) # learning rate if init_rate is None: init_rate = .1 self.alpha = init_rate self.initial_learn = init_rate # radius of neighborhood if init_radius is None: init_radius = np.quantile(self.dci, q = 2 / 3, axis = None) self.sigma = init_radius self.initial_r = init_radius # time constant (lambda) rate_constant = epoch radius_constant = epoch / np.log(self.sigma) # distance between nodes bmu_dist = self.dci[1, :] rcst_err = np.empty(epoch) for i in tqdm(range(epoch), desc = "epoch"): chose_i = int(np.random.choice(obs_id, size = 1)) # BMU - self.bmu self.find_bmu(data, chose_i) # reconstruction error - sum of distances from BMU rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])]) bmu_dist = self.dci[self.bmu.astype(int), :].flatten() # decay self.sigma = self.decay(init_radius, i + 1, radius_constant) self.alpha = self.decay(init_rate, i + 1, rate_constant) # neighboring nodes (includes BMU) neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten() for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"): node_id = neighbor_neuron[k] hci = self.neighborhood(bmu_dist[node_id], self.sigma) # update codebook matrices of neighboring nodes self.net[node_id, :, :] += \ self.alpha * hci * \ (data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol)) if keep_net: self.net_path[i, :, :, :] = self.net self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err}) def find_bmu(self, data, index): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. """ dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])]) self.bmu = np.argmin(dist_code) def dist_mat(self, data, index, node): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. :param node: node index :return: distance between input matrix observation and weight matrix of the node """ if self.dist_func == "frobenius": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro") elif self.dist_func == "nuclear": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc") elif self.dist_func == "mahalanobis": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # spectral decomposition sigma = udu.T w, v = np.linalg.eigh(covmat) # inverse = ud^-1u.T w[w == 0] += .0001 covinv = v.dot(np.diag(1 / w)).dot(v.T) ss = x.dot(covinv).dot(x.T) return np.sqrt(np.trace(ss)) elif self.dist_func == "eros": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # svd(covariance) u, s, vh = randomized_svd(covmat, n_components = covmat.shape[1], n_iter = 1, random_state = None) # normalize eigenvalue w = s / s.sum() # distance ss = np.multiply(vh, w).dot(vh.T) return np.sqrt(np.trace(ss)) def dist_node(self): """ :return: distance matrix of SOM neuron """ if self.topo == "hexagonal": self.dci = distance.cdist(self.pts, self.pts, "euclidean") elif self.topo == "rectangular": self.dci = distance.cdist(self.pts, self.pts, "chebyshev") def decay(self, init, time, time_constant): """ :param init: initial value :param time: t :param time_constant: lambda :return: decaying value of alpha or sigma """ if self.decay_func == "exponential": return init * np.exp(-time / time_constant) elif self.decay_func == "linear": return init * (1 - time / time_constant) def neighborhood(self, node_distance, radius): """ :param node_distance: Distance between SOM neurons :param radius: Radius of BMU neighborhood :return: Neighborhood function hci """ if self.neighbor_func == "gaussian": return np.exp(-node_distance ** 2 / (2 * (radius ** 2))) elif self.neighbor_func == "bubble": if node_distance <= radius: return 1.0 else:
elif self.neighbor_func == "triangular": if node_distance <= radius: return 1 - np.abs(node_distance) / radius else: return 0.0 def dist_weight(self, data, index): """ :param data: Processed data set for SOM :param index: index for data :return: minimum distance between input matrix and weight matrices, its node id (BMU) """ dist_wt = np.asarray([self.dist_mat(data, index, j) for j in tqdm(range(self.net.shape[0]), desc = "bmu")]) return np.min(dist_wt), np.argmin(dist_wt) def plot_error(self): """ :return: line plot of reconstruction error versus epoch """ fig = px.line(self.reconstruction_error, x = "Epoch", y = "Reconstruction Error") fig.show() def plot_heatmap(self, data): """ :return: Heatmap for SOM """ if self.project is None: normal_distance = np.asarray( [self.dist_weight(data, i) for i in tqdm(range(data.shape[0]), desc="mapping")] ) self.dist_normal = normal_distance[:, 0] self.project = normal_distance[:, 1] x = self.project % self.net_dim[0] y = self.project // self.net_dim[0] if self.topo == "rectangular": fig = go.Figure( go.Histogram2d( x = x, y = y, colorscale = "Viridis" ) ) fig.show() elif self.topo == "hexagonal": x = x + .5 * (y % 2) y = np.sqrt(3) / 2 * y # plt_hex = plt.hexbin(x, y) # plt.close() # fig = tls.mpl_to_plotly(plt_hex) plt.hexbin(x, y) plt.show()
return 0.0
random_line_split
__init__.py
import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objs as go # import plotly.tools as tls import matplotlib.pyplot as plt from scipy.spatial import distance from sklearn.utils.extmath import randomized_svd from tqdm import tqdm class
: """ Matrix SOM Initialize weight matrix For epoch <- 1 to N do Choose input matrix observation randomly - i For k <- 1 to n_node do compute d(input matrix i, weight matrix k) end Best Matching Unit = winning node = node with the smallest distance For k <- 1 to n_node do update weight matrix end end Update weight mi(t + 1) = mi(t) + ⍺(t) * hci(t) [x(t) - mi(t)] Neighborhood function hci(t) = h(dist(rc, ri), t) rc, ri: location vectors of node c and i if Gaussian: hci(t) = exp(-dist^2 / (2 * σ^2(t))) Radius: σ(t) = σ_0 * exp(-t / ƛ) Learning rate: ⍺(t) = ⍺_0 * exp(-t / ƛ) """ def __init__( self, data, xdim, ydim, topo = "rectangular", neighbor = "gaussian", dist = "frobenius", decay = "exponential", seed = None ): """ :param data: 3d array. processed data set for Online SOM Detector :param xdim: Number of x-grid :param ydim: Number of y-grid :param topo: Topology of output space - rectangular or hexagonal :param neighbor: Neighborhood function - gaussian, bubble, or triangular :param dist: Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or :param decay: decaying learning rate and radius - exponential or linear :param seed: Random seed """ np.random.seed(seed = seed) if xdim is None or ydim is None: xdim = int(np.sqrt(5 * np.sqrt(data.shape[0]))) ydim = xdim self.net_dim = np.array([xdim, ydim]) self.ncol = data.shape[2] self.nrow = data.shape[1] # Initialize codebook matrix self.init_weight() # Topology topo_types = ["rectangular", "hexagonal"] if topo not in topo_types: raise ValueError("Invalid topo. Expected one of: %s" % topo_types) self.topo = topo self.init_grid() self.dist_node() # Neighborhood function neighbor_types = ["gaussian", "bubble", "triangular"] if neighbor not in neighbor_types: raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types) self.neighbor_func = neighbor # Distance function dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"] if dist not in dist_type: raise ValueError("Invalid dist. Expected one of: %s" % dist_type) self.dist_func = dist # Decay decay_types = ["exponential", "linear"] if decay not in decay_types: raise ValueError("Invalid decay. Expected one of: %s" % decay_types) self.decay_func = decay # som() self.epoch = None self.alpha = None self.sigma = None self.initial_learn = None self.initial_r = None # find_bmu() self.bmu = None # plot self.reconstruction_error = None self.dist_normal = None self.project = None def init_weight(self): self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) def init_grid(self): """ [row_pts, col_pts] xdim x ydim rows (points) [1,1] [2,1] [1,2] [2,2] 2---------> 1--------->^ """ self.pts = np.array( np.meshgrid( np.arange(self.net_dim[0]) + 1, np.arange(self.net_dim[1]) + 1 ) ).reshape(2, np.prod(self.net_dim)).T if self.topo == "hexagonal": self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2) self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1] def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False): """ :param data: 3d array. processed data set for Online SOM Detector :param epoch: epoch number :param init_rate: initial learning rate :param init_radius: initial radius of BMU neighborhood :param keep_net: keep every weight matrix path? """ num_obs = data.shape[0] obs_id = np.arange(num_obs) chose_i = np.empty(1) node_id = None hci = None self.epoch = epoch if keep_net: self.net_path = np.empty( (self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) ) # learning rate if init_rate is None: init_rate = .1 self.alpha = init_rate self.initial_learn = init_rate # radius of neighborhood if init_radius is None: init_radius = np.quantile(self.dci, q = 2 / 3, axis = None) self.sigma = init_radius self.initial_r = init_radius # time constant (lambda) rate_constant = epoch radius_constant = epoch / np.log(self.sigma) # distance between nodes bmu_dist = self.dci[1, :] rcst_err = np.empty(epoch) for i in tqdm(range(epoch), desc = "epoch"): chose_i = int(np.random.choice(obs_id, size = 1)) # BMU - self.bmu self.find_bmu(data, chose_i) # reconstruction error - sum of distances from BMU rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])]) bmu_dist = self.dci[self.bmu.astype(int), :].flatten() # decay self.sigma = self.decay(init_radius, i + 1, radius_constant) self.alpha = self.decay(init_rate, i + 1, rate_constant) # neighboring nodes (includes BMU) neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten() for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"): node_id = neighbor_neuron[k] hci = self.neighborhood(bmu_dist[node_id], self.sigma) # update codebook matrices of neighboring nodes self.net[node_id, :, :] += \ self.alpha * hci * \ (data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol)) if keep_net: self.net_path[i, :, :, :] = self.net self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err}) def find_bmu(self, data, index): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. """ dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])]) self.bmu = np.argmin(dist_code) def dist_mat(self, data, index, node): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. :param node: node index :return: distance between input matrix observation and weight matrix of the node """ if self.dist_func == "frobenius": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro") elif self.dist_func == "nuclear": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc") elif self.dist_func == "mahalanobis": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # spectral decomposition sigma = udu.T w, v = np.linalg.eigh(covmat) # inverse = ud^-1u.T w[w == 0] += .0001 covinv = v.dot(np.diag(1 / w)).dot(v.T) ss = x.dot(covinv).dot(x.T) return np.sqrt(np.trace(ss)) elif self.dist_func == "eros": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # svd(covariance) u, s, vh = randomized_svd(covmat, n_components = covmat.shape[1], n_iter = 1, random_state = None) # normalize eigenvalue w = s / s.sum() # distance ss = np.multiply(vh, w).dot(vh.T) return np.sqrt(np.trace(ss)) def dist_node(self): """ :return: distance matrix of SOM neuron """ if self.topo == "hexagonal": self.dci = distance.cdist(self.pts, self.pts, "euclidean") elif self.topo == "rectangular": self.dci = distance.cdist(self.pts, self.pts, "chebyshev") def decay(self, init, time, time_constant): """ :param init: initial value :param time: t :param time_constant: lambda :return: decaying value of alpha or sigma """ if self.decay_func == "exponential": return init * np.exp(-time / time_constant) elif self.decay_func == "linear": return init * (1 - time / time_constant) def neighborhood(self, node_distance, radius): """ :param node_distance: Distance between SOM neurons :param radius: Radius of BMU neighborhood :return: Neighborhood function hci """ if self.neighbor_func == "gaussian": return np.exp(-node_distance ** 2 / (2 * (radius ** 2))) elif self.neighbor_func == "bubble": if node_distance <= radius: return 1.0 else: return 0.0 elif self.neighbor_func == "triangular": if node_distance <= radius: return 1 - np.abs(node_distance) / radius else: return 0.0 def dist_weight(self, data, index): """ :param data: Processed data set for SOM :param index: index for data :return: minimum distance between input matrix and weight matrices, its node id (BMU) """ dist_wt = np.asarray([self.dist_mat(data, index, j) for j in tqdm(range(self.net.shape[0]), desc = "bmu")]) return np.min(dist_wt), np.argmin(dist_wt) def plot_error(self): """ :return: line plot of reconstruction error versus epoch """ fig = px.line(self.reconstruction_error, x = "Epoch", y = "Reconstruction Error") fig.show() def plot_heatmap(self, data): """ :return: Heatmap for SOM """ if self.project is None: normal_distance = np.asarray( [self.dist_weight(data, i) for i in tqdm(range(data.shape[0]), desc="mapping")] ) self.dist_normal = normal_distance[:, 0] self.project = normal_distance[:, 1] x = self.project % self.net_dim[0] y = self.project // self.net_dim[0] if self.topo == "rectangular": fig = go.Figure( go.Histogram2d( x = x, y = y, colorscale = "Viridis" ) ) fig.show() elif self.topo == "hexagonal": x = x + .5 * (y % 2) y = np.sqrt(3) / 2 * y # plt_hex = plt.hexbin(x, y) # plt.close() # fig = tls.mpl_to_plotly(plt_hex) plt.hexbin(x, y) plt.show()
kohonen
identifier_name
__init__.py
import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objs as go # import plotly.tools as tls import matplotlib.pyplot as plt from scipy.spatial import distance from sklearn.utils.extmath import randomized_svd from tqdm import tqdm class kohonen:
""" Matrix SOM Initialize weight matrix For epoch <- 1 to N do Choose input matrix observation randomly - i For k <- 1 to n_node do compute d(input matrix i, weight matrix k) end Best Matching Unit = winning node = node with the smallest distance For k <- 1 to n_node do update weight matrix end end Update weight mi(t + 1) = mi(t) + ⍺(t) * hci(t) [x(t) - mi(t)] Neighborhood function hci(t) = h(dist(rc, ri), t) rc, ri: location vectors of node c and i if Gaussian: hci(t) = exp(-dist^2 / (2 * σ^2(t))) Radius: σ(t) = σ_0 * exp(-t / ƛ) Learning rate: ⍺(t) = ⍺_0 * exp(-t / ƛ) """ def __init__( self, data, xdim, ydim, topo = "rectangular", neighbor = "gaussian", dist = "frobenius", decay = "exponential", seed = None ): """ :param data: 3d array. processed data set for Online SOM Detector :param xdim: Number of x-grid :param ydim: Number of y-grid :param topo: Topology of output space - rectangular or hexagonal :param neighbor: Neighborhood function - gaussian, bubble, or triangular :param dist: Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or :param decay: decaying learning rate and radius - exponential or linear :param seed: Random seed """ np.random.seed(seed = seed) if xdim is None or ydim is None: xdim = int(np.sqrt(5 * np.sqrt(data.shape[0]))) ydim = xdim self.net_dim = np.array([xdim, ydim]) self.ncol = data.shape[2] self.nrow = data.shape[1] # Initialize codebook matrix self.init_weight() # Topology topo_types = ["rectangular", "hexagonal"] if topo not in topo_types: raise ValueError("Invalid topo. Expected one of: %s" % topo_types) self.topo = topo self.init_grid() self.dist_node() # Neighborhood function neighbor_types = ["gaussian", "bubble", "triangular"] if neighbor not in neighbor_types: raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types) self.neighbor_func = neighbor # Distance function dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"] if dist not in dist_type: raise ValueError("Invalid dist. Expected one of: %s" % dist_type) self.dist_func = dist # Decay decay_types = ["exponential", "linear"] if decay not in decay_types: raise ValueError("Invalid decay. Expected one of: %s" % decay_types) self.decay_func = decay # som() self.epoch = None self.alpha = None self.sigma = None self.initial_learn = None self.initial_r = None # find_bmu() self.bmu = None # plot self.reconstruction_error = None self.dist_normal = None self.project = None def init_weight(self): self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) def init_grid(self): """ [row_pts, col_pts] xdim x ydim rows (points) [1,1] [2,1] [1,2] [2,2] 2---------> 1--------->^ """ self.pts = np.array( np.meshgrid( np.arange(self.net_dim[0]) + 1, np.arange(self.net_dim[1]) + 1 ) ).reshape(2, np.prod(self.net_dim)).T if self.topo == "hexagonal": self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2) self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1] def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False): """ :param data: 3d array. processed data set for Online SOM Detector :param epoch: epoch number :param init_rate: initial learning rate :param init_radius: initial radius of BMU neighborhood :param keep_net: keep every weight matrix path? """ num_obs = data.shape[0] obs_id = np.arange(num_obs) chose_i = np.empty(1) node_id = None hci = None self.epoch = epoch if keep_net: self.net_path = np.empty( (self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol) ) # learning rate if init_rate is None: init_rate = .1 self.alpha = init_rate self.initial_learn = init_rate # radius of neighborhood if init_radius is None: init_radius = np.quantile(self.dci, q = 2 / 3, axis = None) self.sigma = init_radius self.initial_r = init_radius # time constant (lambda) rate_constant = epoch radius_constant = epoch / np.log(self.sigma) # distance between nodes bmu_dist = self.dci[1, :] rcst_err = np.empty(epoch) for i in tqdm(range(epoch), desc = "epoch"): chose_i = int(np.random.choice(obs_id, size = 1)) # BMU - self.bmu self.find_bmu(data, chose_i) # reconstruction error - sum of distances from BMU rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])]) bmu_dist = self.dci[self.bmu.astype(int), :].flatten() # decay self.sigma = self.decay(init_radius, i + 1, radius_constant) self.alpha = self.decay(init_rate, i + 1, rate_constant) # neighboring nodes (includes BMU) neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten() for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"): node_id = neighbor_neuron[k] hci = self.neighborhood(bmu_dist[node_id], self.sigma) # update codebook matrices of neighboring nodes self.net[node_id, :, :] += \ self.alpha * hci * \ (data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol)) if keep_net: self.net_path[i, :, :, :] = self.net self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err}) def find_bmu(self, data, index): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. """ dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])]) self.bmu = np.argmin(dist_code) def dist_mat(self, data, index, node): """ :param data: Processed data set for SOM. :param index: Randomly chosen observation id for input matrix among 3d tensor set. :param node: node index :return: distance between input matrix observation and weight matrix of the node """ if self.dist_func == "frobenius": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro") elif self.dist_func == "nuclear": return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc") elif self.dist_func == "mahalanobis": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # spectral decomposition sigma = udu.T w, v = np.linalg.eigh(covmat) # inverse = ud^-1u.T w[w == 0] += .0001 covinv = v.dot(np.diag(1 / w)).dot(v.T) ss = x.dot(covinv).dot(x.T) return np.sqrt(np.trace(ss)) elif self.dist_func == "eros": x = data[index, :, :] - self.net[node, :, :] covmat = np.cov(x, rowvar = False) # svd(covariance) u, s, vh = randomized_svd(covmat, n_components = covmat.shape[1], n_iter = 1, random_state = None) # normalize eigenvalue w = s / s.sum() # distance ss = np.multiply(vh, w).dot(vh.T) return np.sqrt(np.trace(ss)) def dist_node(self): """ :return: distance matrix of SOM neuron """ if self.topo == "hexagonal": self.dci = distance.cdist(self.pts, self.pts, "euclidean") elif self.topo == "rectangular": self.dci = distance.cdist(self.pts, self.pts, "chebyshev") def decay(self, init, time, time_constant): """ :param init: initial value :param time: t :param time_constant: lambda :return: decaying value of alpha or sigma """ if self.decay_func == "exponential": return init * np.exp(-time / time_constant) elif self.decay_func == "linear": return init * (1 - time / time_constant) def neighborhood(self, node_distance, radius): """ :param node_distance: Distance between SOM neurons :param radius: Radius of BMU neighborhood :return: Neighborhood function hci """ if self.neighbor_func == "gaussian": return np.exp(-node_distance ** 2 / (2 * (radius ** 2))) elif self.neighbor_func == "bubble": if node_distance <= radius: return 1.0 else: return 0.0 elif self.neighbor_func == "triangular": if node_distance <= radius: return 1 - np.abs(node_distance) / radius else: return 0.0 def dist_weight(self, data, index): """ :param data: Processed data set for SOM :param index: index for data :return: minimum distance between input matrix and weight matrices, its node id (BMU) """ dist_wt = np.asarray([self.dist_mat(data, index, j) for j in tqdm(range(self.net.shape[0]), desc = "bmu")]) return np.min(dist_wt), np.argmin(dist_wt) def plot_error(self): """ :return: line plot of reconstruction error versus epoch """ fig = px.line(self.reconstruction_error, x = "Epoch", y = "Reconstruction Error") fig.show() def plot_heatmap(self, data): """ :return: Heatmap for SOM """ if self.project is None: normal_distance = np.asarray( [self.dist_weight(data, i) for i in tqdm(range(data.shape[0]), desc="mapping")] ) self.dist_normal = normal_distance[:, 0] self.project = normal_distance[:, 1] x = self.project % self.net_dim[0] y = self.project // self.net_dim[0] if self.topo == "rectangular": fig = go.Figure( go.Histogram2d( x = x, y = y, colorscale = "Viridis" ) ) fig.show() elif self.topo == "hexagonal": x = x + .5 * (y % 2) y = np.sqrt(3) / 2 * y # plt_hex = plt.hexbin(x, y) # plt.close() # fig = tls.mpl_to_plotly(plt_hex) plt.hexbin(x, y) plt.show()
identifier_body
local_audio_visualizer.js
(function () { var requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame;
window.requestAnimationFrame = requestAnimationFrame; })(); window.onload = function () { var element = document.getElementById("waves"); dropAndLoad(element, init, "ArrayBuffer"); }; // Reusable dropAndLoad function: it reads a local file dropped on a // `dropElement` in the DOM in the specified `readFormat` // (In this case, we want an arrayBuffer) function dropAndLoad(dropElement, callback, readFormat) { var readFormat = readFormat || "DataUrl"; dropElement.addEventListener( "dragover", function (e) { e.stopPropagation(); e.preventDefault(); e.dataTransfer.dropEffect = "copy"; }, false ); dropElement.addEventListener( "drop", function (e) { e.stopPropagation(); e.preventDefault(); loadFile(e.dataTransfer.files[0]); }, false ); function loadFile(files) { var file = files; var reader = new FileReader(); reader.onload = function (e) { callback(e.target.result); }; reader["readAs" + readFormat](file); } } var dogBarkingBuffer = null; // Fix up prefixing window.AudioContext = window.AudioContext || window.webkitAudioContext; var context = new AudioContext(); function loadDogSound(url) { var request = new XMLHttpRequest(); request.open("GET", url, true); request.responseType = "arraybuffer"; // Decode asynchronously request.onload = function () { init(request.response); /*context.decodeAudioData(request.response, function(buffer) { dogBarkingBuffer = buffer; // playSound(buffer); init(buffer); });*/ }; request.send(); } function playSound(buffer) { var source = context.createBufferSource(); // creates a sound source source.buffer = buffer; // tell the source which sound to play source.connect(context.destination); // connect the source to the context's destination (the speakers) source.start(0); // play the source now // note: on older systems, may have to use deprecated noteOn(time); } // Once the file is loaded, we start getting our hands dirty. function init(arrayBuffer) { // document.getElementById('instructions').innerHTML = 'Loading ...' // Create a new `audioContext` and its `analyser` window.audioCtx = new AudioContext(); context = audioCtx; window.analyser = audioCtx.createAnalyser(); // If a sound is still playing, stop it. // if (window.source) // source.noteOff(0) // Decode the data in our array into an audio buffer audioCtx.decodeAudioData(arrayBuffer, function (buffer) { // Use the audio buffer with as our audio source window.source = audioCtx.createBufferSource(); source.buffer = buffer; // Connect to the analyser ... source.connect(analyser); // and back to the destination, to play the sound after the analysis. analyser.connect(audioCtx.destination); // Start playing the buffer. source.start(0); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], analyser); // document.getElementById('instructions').innerHTML = '' }); } // The visualizer object. // Calls the `visualization` function every time a new frame // is available. // Is passed an `analyser` (audioContext analyser). function visualizer(visualization, analyser) { var self = this; this.visualization = visualization; var last = Date.now(); var loop = function () { var dt = Date.now() - last; // we get the current byteFreq data from our analyser var byteFreq = new Uint8Array(analyser.frequencyBinCount); analyser.getByteFrequencyData(byteFreq); last = Date.now(); // We might want to use a delta time (`dt`) too for our visualization. self.visualization(byteFreq, dt); requestAnimationFrame(loop); }; requestAnimationFrame(loop); } // A simple visualization. Its update function illustrates how to use // the byte frequency data from an audioContext analyser. function simpleViz(canvas) { var self = this; this.canvas = document.getElementById("canvas"); this.ctx = this.canvas.getContext("2d"); this.copyCtx = document.getElementById("canvas-copy").getContext("2d"); this.ctx.fillStyle = "#fff"; this.barWidth = 4; this.barGap = 2; // We get the total number of bars to display this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap)); // This function is launched for each frame, together with the byte frequency data. this.update = function (byteFreq) { self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height); // We take an element from the byteFreq array for each of the bars. // Let's pretend our byteFreq contains 20 elements, and we have five bars... var step = Math.floor(byteFreq.length / self.bars); // `||||||||||||||||||||` elements // `| | | | | ` elements we'll use for our bars for (var i = 0; i < self.bars; i++) { // Draw each bar var barHeight = byteFreq[i * step]; self.ctx.fillRect( i * (self.barWidth + self.barGap), self.canvas.height - barHeight, self.barWidth, barHeight ); self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height); self.copyCtx.drawImage(self.canvas, 0, 0); } }; } gainNode = null; source = null; function play_sound() { if (!context.createGain) context.createGain = context.createGainNode; gainNode = context.createGain(); var source = context.createBufferSource(); source.buffer = BUFFERS.techno; // Connect source to a gain node source.connect(gainNode); // Connect gain node to destination gainNode.connect(context.destination); // Start playback in a loop source.loop = true; if (!source.start) source.start = source.noteOn; source.start(0); source = source; } function changeVolume() { var element = document.getElementById("change_vol"); var volume = element.value; var fraction = parseInt(element.value) / parseInt(element.max); gainNode.gain.value = fraction * fraction; } function stop_play() { console.log(source); if (!source.stop) source.stop = source.noteOff; source.stop(0); } var url = "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3"; playerElement = document.querySelector("#player"); function Player(url) { this.ac = new (window.AudioContext || webkitAudioContext)(); this.url = url; this.mute = false; // this.el = el; this.button = document.getElementById("play_button"); this.volume_btn = document.getElementById("change_vol"); this.mute_btn = document.getElementById("vol_img"); this.rewind = document.getElementById("rewind"); this.rewind30 = document.getElementById("rewind30"); this.forward = document.getElementById("forward"); this.forward30 = document.getElementById("forward30"); this.speed1xB = document.getElementById("speed1x"); this.speed2xB = document.getElementById("speed2x"); this.left_duration = document.getElementById("left-duration"); this.track = document.getElementById("audio-overlay"); this.progress = document.getElementById("seekbar"); // console.log(this.button); // this.scrubber = el.querySelector('.scrubber'); // this.message = el.querySelector('.message'); // this.message.innerHTML = 'Loading'; this.bindEvents(); this.fetch(); } Player.prototype.bindEvents = function () { this.button.addEventListener("click", this.toggle.bind(this)); this.volume_btn.addEventListener("change", this.changeVolume.bind(this)); this.mute_btn.addEventListener("click", this.muteSound.bind(this)); this.rewind.addEventListener("click", this.rewindSound.bind(this)); this.rewind30.addEventListener("click", this.rewind30Sound.bind(this)); this.forward.addEventListener("click", this.forwardSound.bind(this)); this.forward30.addEventListener("click", this.forward30Sound.bind(this)); this.speed1xB.addEventListener("click", this.speed1x.bind(this)); this.speed2xB.addEventListener("click", this.speed2x.bind(this)); // this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this)); // window.addEventListener('mousemove', this.onDrag.bind(this)); // window.addEventListener('mouseup', this.onMouseUp.bind(this)); }; Player.prototype.fetch = function () { var xhr = new XMLHttpRequest(); xhr.open("GET", this.url, true); xhr.responseType = "arraybuffer"; xhr.onload = function () { this.decode(xhr.response); // init2(xhr.response); }.bind(this); xhr.send(); }; Player.prototype.decode = function (arrayBuffer) { this.ac.decodeAudioData( arrayBuffer, function (audioBuffer) { // this.message.innerHTML = ''; this.buffer = audioBuffer; this.play(); this.draw(); }.bind(this) ); }; Player.prototype.connect = function () { if (this.playing) { this.pause(); } this.source = this.ac.createBufferSource(); this.source.buffer = this.buffer; this.gainNode = this.ac.createGain(); // this.source.connect(); this.analyser = this.ac.createAnalyser(); this.analyser.smoothingTimeConstant = 0.3; this.analyser.fftSize = 1024; // Connect to the analyser ... this.source.connect(this.analyser); this.gainNode.connect(this.ac.destination); // this.source.connect(this.gainNode); this.gainNode.gain.value = 0.5; // 10 % // and back to the destination, to play the sound after the analysis. this.source.connect(this.gainNode); // this.source.connect(this.ac.destination); }; Player.prototype.play = function (position) { this.connect(); this.position = typeof position === "number" ? position : this.position || 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], this.analyser); this.playing = true; document.getElementById("play_button").src = "images/pause_big.png"; console.log("duration = " + this.buffer.duration); document.getElementById("duration").innerHTML = formatTime( this.buffer.duration ); }; Player.prototype.changeVolume = function (element) { element = document.getElementById("change_vol"); var volume = element.value; console.log(volume); var fraction = parseInt(element.value) / parseInt(element.max); // console.log(fraction); // Let's use an x*x curve (x-squared) since simple linear (x) does not // sound as good. this.gainNode.gain.value = volume; //fraction * fraction; console.log(this.gainNode); }; Player.prototype.muteSound = function (element) { if (!this.mute) { this.gainNode.gain.value = 0; document.getElementById("vol_img").src = "images/sound-mute.png"; this.mute = true; } else { this.mute = false; document.getElementById("vol_img").src = "images/sound.png"; var aelement = document.getElementById("change_vol"); var volume = aelement.value; console.log(volume); this.gainNode.gain.value = volume; //fraction * fraction; // console.log(this.gainNode); } }; Player.prototype.pause = function () { if (this.source) { this.source.stop(0); this.source = null; this.position = this.ac.currentTime - this.startTime; this.playing = false; document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.seek = function (time) { if (this.playing) { this.play(time); } else { this.position = time; } }; Player.prototype.updatePosition = function () { this.position = this.playing ? this.ac.currentTime - this.startTime : this.position; console.log(this.position); if (this.position >= this.buffer.duration) { this.position = this.buffer.duration; this.pause(); } var baki_time = this.buffer.duration - this.position; console.log("==" + baki_time); this.left_duration.innerHTML = formatTime(baki_time); document.getElementById("duration").innerHTML = formatTime(this.position); return this.position; }; Player.prototype.toggle = function () { if (!this.playing) { this.play(); document.getElementById("play_button").src = "images/pause_big.png"; } else { this.pause(); document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.rewindSound = function () { this.position = 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.rewind30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; if (this.buffer.duration > 30 && this.position > 30) this.position = this.position - 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.forwardSound = function () { this.position = this.buffer.duration; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.forward30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; console.log(this.buffer.duration + "buffer"); if (this.buffer.duration > 30) this.position = this.position + 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.speed1x = function () { this.source.playbackRate.value = 1.0; }; Player.prototype.speed2x = function () { this.source.playbackRate.value = 2.0; }; Player.prototype.onMouseDown = function (e) { this.dragging = true; this.startX = e.pageX; this.startLeft = parseInt(this.scrubber.style.left || 0, 10); }; Player.prototype.onDrag = function (e) { /*var width, position; if ( !this.dragging ) { return; } width = this.track.offsetWidth; position = this.startLeft + ( e.pageX - this.startX ); position = Math.max(Math.min(width, position), 0); this.scrubber.style.left = position + 'px';*/ }; Player.prototype.onMouseUp = function () { /*var width, left, time; if ( this.dragging ) { width = this.track.offsetWidth; left = parseInt(this.scrubber.style.left || 0, 10); time = left / width * this.buffer.duration; this.seek(time); this.dragging = false; }*/ }; Player.prototype.draw = function () { var progress = this.updatePosition() / this.buffer.duration; // width = this.progress.value; if (this.playing) { this.button.classList.add("fa-pause"); this.button.classList.remove("fa-play"); } else { this.button.classList.add("fa-play"); this.button.classList.remove("fa-pause"); } // console.log("progress = = "+progress); this.progress.value = progress; /*if ( !this.dragging ) { this.scrubber.style.left = ( progress * width ) + 'px'; }*/ requestAnimationFrame(this.draw.bind(this)); };
random_line_split
local_audio_visualizer.js
(function () { var requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame; window.requestAnimationFrame = requestAnimationFrame; })(); window.onload = function () { var element = document.getElementById("waves"); dropAndLoad(element, init, "ArrayBuffer"); }; // Reusable dropAndLoad function: it reads a local file dropped on a // `dropElement` in the DOM in the specified `readFormat` // (In this case, we want an arrayBuffer) function dropAndLoad(dropElement, callback, readFormat) { var readFormat = readFormat || "DataUrl"; dropElement.addEventListener( "dragover", function (e) { e.stopPropagation(); e.preventDefault(); e.dataTransfer.dropEffect = "copy"; }, false ); dropElement.addEventListener( "drop", function (e) { e.stopPropagation(); e.preventDefault(); loadFile(e.dataTransfer.files[0]); }, false ); function loadFile(files) { var file = files; var reader = new FileReader(); reader.onload = function (e) { callback(e.target.result); }; reader["readAs" + readFormat](file); } } var dogBarkingBuffer = null; // Fix up prefixing window.AudioContext = window.AudioContext || window.webkitAudioContext; var context = new AudioContext(); function loadDogSound(url) { var request = new XMLHttpRequest(); request.open("GET", url, true); request.responseType = "arraybuffer"; // Decode asynchronously request.onload = function () { init(request.response); /*context.decodeAudioData(request.response, function(buffer) { dogBarkingBuffer = buffer; // playSound(buffer); init(buffer); });*/ }; request.send(); } function playSound(buffer) { var source = context.createBufferSource(); // creates a sound source source.buffer = buffer; // tell the source which sound to play source.connect(context.destination); // connect the source to the context's destination (the speakers) source.start(0); // play the source now // note: on older systems, may have to use deprecated noteOn(time); } // Once the file is loaded, we start getting our hands dirty. function init(arrayBuffer) { // document.getElementById('instructions').innerHTML = 'Loading ...' // Create a new `audioContext` and its `analyser` window.audioCtx = new AudioContext(); context = audioCtx; window.analyser = audioCtx.createAnalyser(); // If a sound is still playing, stop it. // if (window.source) // source.noteOff(0) // Decode the data in our array into an audio buffer audioCtx.decodeAudioData(arrayBuffer, function (buffer) { // Use the audio buffer with as our audio source window.source = audioCtx.createBufferSource(); source.buffer = buffer; // Connect to the analyser ... source.connect(analyser); // and back to the destination, to play the sound after the analysis. analyser.connect(audioCtx.destination); // Start playing the buffer. source.start(0); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], analyser); // document.getElementById('instructions').innerHTML = '' }); } // The visualizer object. // Calls the `visualization` function every time a new frame // is available. // Is passed an `analyser` (audioContext analyser). function visualizer(visualization, analyser) { var self = this; this.visualization = visualization; var last = Date.now(); var loop = function () { var dt = Date.now() - last; // we get the current byteFreq data from our analyser var byteFreq = new Uint8Array(analyser.frequencyBinCount); analyser.getByteFrequencyData(byteFreq); last = Date.now(); // We might want to use a delta time (`dt`) too for our visualization. self.visualization(byteFreq, dt); requestAnimationFrame(loop); }; requestAnimationFrame(loop); } // A simple visualization. Its update function illustrates how to use // the byte frequency data from an audioContext analyser. function simpleViz(canvas) { var self = this; this.canvas = document.getElementById("canvas"); this.ctx = this.canvas.getContext("2d"); this.copyCtx = document.getElementById("canvas-copy").getContext("2d"); this.ctx.fillStyle = "#fff"; this.barWidth = 4; this.barGap = 2; // We get the total number of bars to display this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap)); // This function is launched for each frame, together with the byte frequency data. this.update = function (byteFreq) { self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height); // We take an element from the byteFreq array for each of the bars. // Let's pretend our byteFreq contains 20 elements, and we have five bars... var step = Math.floor(byteFreq.length / self.bars); // `||||||||||||||||||||` elements // `| | | | | ` elements we'll use for our bars for (var i = 0; i < self.bars; i++) { // Draw each bar var barHeight = byteFreq[i * step]; self.ctx.fillRect( i * (self.barWidth + self.barGap), self.canvas.height - barHeight, self.barWidth, barHeight ); self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height); self.copyCtx.drawImage(self.canvas, 0, 0); } }; } gainNode = null; source = null; function play_sound() { if (!context.createGain) context.createGain = context.createGainNode; gainNode = context.createGain(); var source = context.createBufferSource(); source.buffer = BUFFERS.techno; // Connect source to a gain node source.connect(gainNode); // Connect gain node to destination gainNode.connect(context.destination); // Start playback in a loop source.loop = true; if (!source.start) source.start = source.noteOn; source.start(0); source = source; } function changeVolume() { var element = document.getElementById("change_vol"); var volume = element.value; var fraction = parseInt(element.value) / parseInt(element.max); gainNode.gain.value = fraction * fraction; } function stop_play()
var url = "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3"; playerElement = document.querySelector("#player"); function Player(url) { this.ac = new (window.AudioContext || webkitAudioContext)(); this.url = url; this.mute = false; // this.el = el; this.button = document.getElementById("play_button"); this.volume_btn = document.getElementById("change_vol"); this.mute_btn = document.getElementById("vol_img"); this.rewind = document.getElementById("rewind"); this.rewind30 = document.getElementById("rewind30"); this.forward = document.getElementById("forward"); this.forward30 = document.getElementById("forward30"); this.speed1xB = document.getElementById("speed1x"); this.speed2xB = document.getElementById("speed2x"); this.left_duration = document.getElementById("left-duration"); this.track = document.getElementById("audio-overlay"); this.progress = document.getElementById("seekbar"); // console.log(this.button); // this.scrubber = el.querySelector('.scrubber'); // this.message = el.querySelector('.message'); // this.message.innerHTML = 'Loading'; this.bindEvents(); this.fetch(); } Player.prototype.bindEvents = function () { this.button.addEventListener("click", this.toggle.bind(this)); this.volume_btn.addEventListener("change", this.changeVolume.bind(this)); this.mute_btn.addEventListener("click", this.muteSound.bind(this)); this.rewind.addEventListener("click", this.rewindSound.bind(this)); this.rewind30.addEventListener("click", this.rewind30Sound.bind(this)); this.forward.addEventListener("click", this.forwardSound.bind(this)); this.forward30.addEventListener("click", this.forward30Sound.bind(this)); this.speed1xB.addEventListener("click", this.speed1x.bind(this)); this.speed2xB.addEventListener("click", this.speed2x.bind(this)); // this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this)); // window.addEventListener('mousemove', this.onDrag.bind(this)); // window.addEventListener('mouseup', this.onMouseUp.bind(this)); }; Player.prototype.fetch = function () { var xhr = new XMLHttpRequest(); xhr.open("GET", this.url, true); xhr.responseType = "arraybuffer"; xhr.onload = function () { this.decode(xhr.response); // init2(xhr.response); }.bind(this); xhr.send(); }; Player.prototype.decode = function (arrayBuffer) { this.ac.decodeAudioData( arrayBuffer, function (audioBuffer) { // this.message.innerHTML = ''; this.buffer = audioBuffer; this.play(); this.draw(); }.bind(this) ); }; Player.prototype.connect = function () { if (this.playing) { this.pause(); } this.source = this.ac.createBufferSource(); this.source.buffer = this.buffer; this.gainNode = this.ac.createGain(); // this.source.connect(); this.analyser = this.ac.createAnalyser(); this.analyser.smoothingTimeConstant = 0.3; this.analyser.fftSize = 1024; // Connect to the analyser ... this.source.connect(this.analyser); this.gainNode.connect(this.ac.destination); // this.source.connect(this.gainNode); this.gainNode.gain.value = 0.5; // 10 % // and back to the destination, to play the sound after the analysis. this.source.connect(this.gainNode); // this.source.connect(this.ac.destination); }; Player.prototype.play = function (position) { this.connect(); this.position = typeof position === "number" ? position : this.position || 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], this.analyser); this.playing = true; document.getElementById("play_button").src = "images/pause_big.png"; console.log("duration = " + this.buffer.duration); document.getElementById("duration").innerHTML = formatTime( this.buffer.duration ); }; Player.prototype.changeVolume = function (element) { element = document.getElementById("change_vol"); var volume = element.value; console.log(volume); var fraction = parseInt(element.value) / parseInt(element.max); // console.log(fraction); // Let's use an x*x curve (x-squared) since simple linear (x) does not // sound as good. this.gainNode.gain.value = volume; //fraction * fraction; console.log(this.gainNode); }; Player.prototype.muteSound = function (element) { if (!this.mute) { this.gainNode.gain.value = 0; document.getElementById("vol_img").src = "images/sound-mute.png"; this.mute = true; } else { this.mute = false; document.getElementById("vol_img").src = "images/sound.png"; var aelement = document.getElementById("change_vol"); var volume = aelement.value; console.log(volume); this.gainNode.gain.value = volume; //fraction * fraction; // console.log(this.gainNode); } }; Player.prototype.pause = function () { if (this.source) { this.source.stop(0); this.source = null; this.position = this.ac.currentTime - this.startTime; this.playing = false; document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.seek = function (time) { if (this.playing) { this.play(time); } else { this.position = time; } }; Player.prototype.updatePosition = function () { this.position = this.playing ? this.ac.currentTime - this.startTime : this.position; console.log(this.position); if (this.position >= this.buffer.duration) { this.position = this.buffer.duration; this.pause(); } var baki_time = this.buffer.duration - this.position; console.log("==" + baki_time); this.left_duration.innerHTML = formatTime(baki_time); document.getElementById("duration").innerHTML = formatTime(this.position); return this.position; }; Player.prototype.toggle = function () { if (!this.playing) { this.play(); document.getElementById("play_button").src = "images/pause_big.png"; } else { this.pause(); document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.rewindSound = function () { this.position = 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.rewind30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; if (this.buffer.duration > 30 && this.position > 30) this.position = this.position - 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.forwardSound = function () { this.position = this.buffer.duration; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.forward30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; console.log(this.buffer.duration + "buffer"); if (this.buffer.duration > 30) this.position = this.position + 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.speed1x = function () { this.source.playbackRate.value = 1.0; }; Player.prototype.speed2x = function () { this.source.playbackRate.value = 2.0; }; Player.prototype.onMouseDown = function (e) { this.dragging = true; this.startX = e.pageX; this.startLeft = parseInt(this.scrubber.style.left || 0, 10); }; Player.prototype.onDrag = function (e) { /*var width, position; if ( !this.dragging ) { return; } width = this.track.offsetWidth; position = this.startLeft + ( e.pageX - this.startX ); position = Math.max(Math.min(width, position), 0); this.scrubber.style.left = position + 'px';*/ }; Player.prototype.onMouseUp = function () { /*var width, left, time; if ( this.dragging ) { width = this.track.offsetWidth; left = parseInt(this.scrubber.style.left || 0, 10); time = left / width * this.buffer.duration; this.seek(time); this.dragging = false; }*/ }; Player.prototype.draw = function () { var progress = this.updatePosition() / this.buffer.duration; // width = this.progress.value; if (this.playing) { this.button.classList.add("fa-pause"); this.button.classList.remove("fa-play"); } else { this.button.classList.add("fa-play"); this.button.classList.remove("fa-pause"); } // console.log("progress = = "+progress); this.progress.value = progress; /*if ( !this.dragging ) { this.scrubber.style.left = ( progress * width ) + 'px'; }*/ requestAnimationFrame(this.draw.bind(this)); };
{ console.log(source); if (!source.stop) source.stop = source.noteOff; source.stop(0); }
identifier_body
local_audio_visualizer.js
(function () { var requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame; window.requestAnimationFrame = requestAnimationFrame; })(); window.onload = function () { var element = document.getElementById("waves"); dropAndLoad(element, init, "ArrayBuffer"); }; // Reusable dropAndLoad function: it reads a local file dropped on a // `dropElement` in the DOM in the specified `readFormat` // (In this case, we want an arrayBuffer) function dropAndLoad(dropElement, callback, readFormat) { var readFormat = readFormat || "DataUrl"; dropElement.addEventListener( "dragover", function (e) { e.stopPropagation(); e.preventDefault(); e.dataTransfer.dropEffect = "copy"; }, false ); dropElement.addEventListener( "drop", function (e) { e.stopPropagation(); e.preventDefault(); loadFile(e.dataTransfer.files[0]); }, false ); function loadFile(files) { var file = files; var reader = new FileReader(); reader.onload = function (e) { callback(e.target.result); }; reader["readAs" + readFormat](file); } } var dogBarkingBuffer = null; // Fix up prefixing window.AudioContext = window.AudioContext || window.webkitAudioContext; var context = new AudioContext(); function loadDogSound(url) { var request = new XMLHttpRequest(); request.open("GET", url, true); request.responseType = "arraybuffer"; // Decode asynchronously request.onload = function () { init(request.response); /*context.decodeAudioData(request.response, function(buffer) { dogBarkingBuffer = buffer; // playSound(buffer); init(buffer); });*/ }; request.send(); } function playSound(buffer) { var source = context.createBufferSource(); // creates a sound source source.buffer = buffer; // tell the source which sound to play source.connect(context.destination); // connect the source to the context's destination (the speakers) source.start(0); // play the source now // note: on older systems, may have to use deprecated noteOn(time); } // Once the file is loaded, we start getting our hands dirty. function init(arrayBuffer) { // document.getElementById('instructions').innerHTML = 'Loading ...' // Create a new `audioContext` and its `analyser` window.audioCtx = new AudioContext(); context = audioCtx; window.analyser = audioCtx.createAnalyser(); // If a sound is still playing, stop it. // if (window.source) // source.noteOff(0) // Decode the data in our array into an audio buffer audioCtx.decodeAudioData(arrayBuffer, function (buffer) { // Use the audio buffer with as our audio source window.source = audioCtx.createBufferSource(); source.buffer = buffer; // Connect to the analyser ... source.connect(analyser); // and back to the destination, to play the sound after the analysis. analyser.connect(audioCtx.destination); // Start playing the buffer. source.start(0); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], analyser); // document.getElementById('instructions').innerHTML = '' }); } // The visualizer object. // Calls the `visualization` function every time a new frame // is available. // Is passed an `analyser` (audioContext analyser). function visualizer(visualization, analyser) { var self = this; this.visualization = visualization; var last = Date.now(); var loop = function () { var dt = Date.now() - last; // we get the current byteFreq data from our analyser var byteFreq = new Uint8Array(analyser.frequencyBinCount); analyser.getByteFrequencyData(byteFreq); last = Date.now(); // We might want to use a delta time (`dt`) too for our visualization. self.visualization(byteFreq, dt); requestAnimationFrame(loop); }; requestAnimationFrame(loop); } // A simple visualization. Its update function illustrates how to use // the byte frequency data from an audioContext analyser. function simpleViz(canvas) { var self = this; this.canvas = document.getElementById("canvas"); this.ctx = this.canvas.getContext("2d"); this.copyCtx = document.getElementById("canvas-copy").getContext("2d"); this.ctx.fillStyle = "#fff"; this.barWidth = 4; this.barGap = 2; // We get the total number of bars to display this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap)); // This function is launched for each frame, together with the byte frequency data. this.update = function (byteFreq) { self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height); // We take an element from the byteFreq array for each of the bars. // Let's pretend our byteFreq contains 20 elements, and we have five bars... var step = Math.floor(byteFreq.length / self.bars); // `||||||||||||||||||||` elements // `| | | | | ` elements we'll use for our bars for (var i = 0; i < self.bars; i++) { // Draw each bar var barHeight = byteFreq[i * step]; self.ctx.fillRect( i * (self.barWidth + self.barGap), self.canvas.height - barHeight, self.barWidth, barHeight ); self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height); self.copyCtx.drawImage(self.canvas, 0, 0); } }; } gainNode = null; source = null; function play_sound() { if (!context.createGain) context.createGain = context.createGainNode; gainNode = context.createGain(); var source = context.createBufferSource(); source.buffer = BUFFERS.techno; // Connect source to a gain node source.connect(gainNode); // Connect gain node to destination gainNode.connect(context.destination); // Start playback in a loop source.loop = true; if (!source.start) source.start = source.noteOn; source.start(0); source = source; } function changeVolume() { var element = document.getElementById("change_vol"); var volume = element.value; var fraction = parseInt(element.value) / parseInt(element.max); gainNode.gain.value = fraction * fraction; } function stop_play() { console.log(source); if (!source.stop) source.stop = source.noteOff; source.stop(0); } var url = "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3"; playerElement = document.querySelector("#player"); function Player(url) { this.ac = new (window.AudioContext || webkitAudioContext)(); this.url = url; this.mute = false; // this.el = el; this.button = document.getElementById("play_button"); this.volume_btn = document.getElementById("change_vol"); this.mute_btn = document.getElementById("vol_img"); this.rewind = document.getElementById("rewind"); this.rewind30 = document.getElementById("rewind30"); this.forward = document.getElementById("forward"); this.forward30 = document.getElementById("forward30"); this.speed1xB = document.getElementById("speed1x"); this.speed2xB = document.getElementById("speed2x"); this.left_duration = document.getElementById("left-duration"); this.track = document.getElementById("audio-overlay"); this.progress = document.getElementById("seekbar"); // console.log(this.button); // this.scrubber = el.querySelector('.scrubber'); // this.message = el.querySelector('.message'); // this.message.innerHTML = 'Loading'; this.bindEvents(); this.fetch(); } Player.prototype.bindEvents = function () { this.button.addEventListener("click", this.toggle.bind(this)); this.volume_btn.addEventListener("change", this.changeVolume.bind(this)); this.mute_btn.addEventListener("click", this.muteSound.bind(this)); this.rewind.addEventListener("click", this.rewindSound.bind(this)); this.rewind30.addEventListener("click", this.rewind30Sound.bind(this)); this.forward.addEventListener("click", this.forwardSound.bind(this)); this.forward30.addEventListener("click", this.forward30Sound.bind(this)); this.speed1xB.addEventListener("click", this.speed1x.bind(this)); this.speed2xB.addEventListener("click", this.speed2x.bind(this)); // this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this)); // window.addEventListener('mousemove', this.onDrag.bind(this)); // window.addEventListener('mouseup', this.onMouseUp.bind(this)); }; Player.prototype.fetch = function () { var xhr = new XMLHttpRequest(); xhr.open("GET", this.url, true); xhr.responseType = "arraybuffer"; xhr.onload = function () { this.decode(xhr.response); // init2(xhr.response); }.bind(this); xhr.send(); }; Player.prototype.decode = function (arrayBuffer) { this.ac.decodeAudioData( arrayBuffer, function (audioBuffer) { // this.message.innerHTML = ''; this.buffer = audioBuffer; this.play(); this.draw(); }.bind(this) ); }; Player.prototype.connect = function () { if (this.playing) { this.pause(); } this.source = this.ac.createBufferSource(); this.source.buffer = this.buffer; this.gainNode = this.ac.createGain(); // this.source.connect(); this.analyser = this.ac.createAnalyser(); this.analyser.smoothingTimeConstant = 0.3; this.analyser.fftSize = 1024; // Connect to the analyser ... this.source.connect(this.analyser); this.gainNode.connect(this.ac.destination); // this.source.connect(this.gainNode); this.gainNode.gain.value = 0.5; // 10 % // and back to the destination, to play the sound after the analysis. this.source.connect(this.gainNode); // this.source.connect(this.ac.destination); }; Player.prototype.play = function (position) { this.connect(); this.position = typeof position === "number" ? position : this.position || 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], this.analyser); this.playing = true; document.getElementById("play_button").src = "images/pause_big.png"; console.log("duration = " + this.buffer.duration); document.getElementById("duration").innerHTML = formatTime( this.buffer.duration ); }; Player.prototype.changeVolume = function (element) { element = document.getElementById("change_vol"); var volume = element.value; console.log(volume); var fraction = parseInt(element.value) / parseInt(element.max); // console.log(fraction); // Let's use an x*x curve (x-squared) since simple linear (x) does not // sound as good. this.gainNode.gain.value = volume; //fraction * fraction; console.log(this.gainNode); }; Player.prototype.muteSound = function (element) { if (!this.mute) { this.gainNode.gain.value = 0; document.getElementById("vol_img").src = "images/sound-mute.png"; this.mute = true; } else { this.mute = false; document.getElementById("vol_img").src = "images/sound.png"; var aelement = document.getElementById("change_vol"); var volume = aelement.value; console.log(volume); this.gainNode.gain.value = volume; //fraction * fraction; // console.log(this.gainNode); } }; Player.prototype.pause = function () { if (this.source) { this.source.stop(0); this.source = null; this.position = this.ac.currentTime - this.startTime; this.playing = false; document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.seek = function (time) { if (this.playing) { this.play(time); } else { this.position = time; } }; Player.prototype.updatePosition = function () { this.position = this.playing ? this.ac.currentTime - this.startTime : this.position; console.log(this.position); if (this.position >= this.buffer.duration) { this.position = this.buffer.duration; this.pause(); } var baki_time = this.buffer.duration - this.position; console.log("==" + baki_time); this.left_duration.innerHTML = formatTime(baki_time); document.getElementById("duration").innerHTML = formatTime(this.position); return this.position; }; Player.prototype.toggle = function () { if (!this.playing) { this.play(); document.getElementById("play_button").src = "images/pause_big.png"; } else { this.pause(); document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.rewindSound = function () { this.position = 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.rewind30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; if (this.buffer.duration > 30 && this.position > 30) this.position = this.position - 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.forwardSound = function () { this.position = this.buffer.duration; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.forward30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; console.log(this.buffer.duration + "buffer"); if (this.buffer.duration > 30) this.position = this.position + 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.speed1x = function () { this.source.playbackRate.value = 1.0; }; Player.prototype.speed2x = function () { this.source.playbackRate.value = 2.0; }; Player.prototype.onMouseDown = function (e) { this.dragging = true; this.startX = e.pageX; this.startLeft = parseInt(this.scrubber.style.left || 0, 10); }; Player.prototype.onDrag = function (e) { /*var width, position; if ( !this.dragging ) { return; } width = this.track.offsetWidth; position = this.startLeft + ( e.pageX - this.startX ); position = Math.max(Math.min(width, position), 0); this.scrubber.style.left = position + 'px';*/ }; Player.prototype.onMouseUp = function () { /*var width, left, time; if ( this.dragging ) { width = this.track.offsetWidth; left = parseInt(this.scrubber.style.left || 0, 10); time = left / width * this.buffer.duration; this.seek(time); this.dragging = false; }*/ }; Player.prototype.draw = function () { var progress = this.updatePosition() / this.buffer.duration; // width = this.progress.value; if (this.playing) { this.button.classList.add("fa-pause"); this.button.classList.remove("fa-play"); } else
// console.log("progress = = "+progress); this.progress.value = progress; /*if ( !this.dragging ) { this.scrubber.style.left = ( progress * width ) + 'px'; }*/ requestAnimationFrame(this.draw.bind(this)); };
{ this.button.classList.add("fa-play"); this.button.classList.remove("fa-pause"); }
conditional_block
local_audio_visualizer.js
(function () { var requestAnimationFrame = window.requestAnimationFrame || window.mozRequestAnimationFrame || window.webkitRequestAnimationFrame || window.msRequestAnimationFrame; window.requestAnimationFrame = requestAnimationFrame; })(); window.onload = function () { var element = document.getElementById("waves"); dropAndLoad(element, init, "ArrayBuffer"); }; // Reusable dropAndLoad function: it reads a local file dropped on a // `dropElement` in the DOM in the specified `readFormat` // (In this case, we want an arrayBuffer) function dropAndLoad(dropElement, callback, readFormat) { var readFormat = readFormat || "DataUrl"; dropElement.addEventListener( "dragover", function (e) { e.stopPropagation(); e.preventDefault(); e.dataTransfer.dropEffect = "copy"; }, false ); dropElement.addEventListener( "drop", function (e) { e.stopPropagation(); e.preventDefault(); loadFile(e.dataTransfer.files[0]); }, false ); function loadFile(files) { var file = files; var reader = new FileReader(); reader.onload = function (e) { callback(e.target.result); }; reader["readAs" + readFormat](file); } } var dogBarkingBuffer = null; // Fix up prefixing window.AudioContext = window.AudioContext || window.webkitAudioContext; var context = new AudioContext(); function loadDogSound(url) { var request = new XMLHttpRequest(); request.open("GET", url, true); request.responseType = "arraybuffer"; // Decode asynchronously request.onload = function () { init(request.response); /*context.decodeAudioData(request.response, function(buffer) { dogBarkingBuffer = buffer; // playSound(buffer); init(buffer); });*/ }; request.send(); } function playSound(buffer) { var source = context.createBufferSource(); // creates a sound source source.buffer = buffer; // tell the source which sound to play source.connect(context.destination); // connect the source to the context's destination (the speakers) source.start(0); // play the source now // note: on older systems, may have to use deprecated noteOn(time); } // Once the file is loaded, we start getting our hands dirty. function init(arrayBuffer) { // document.getElementById('instructions').innerHTML = 'Loading ...' // Create a new `audioContext` and its `analyser` window.audioCtx = new AudioContext(); context = audioCtx; window.analyser = audioCtx.createAnalyser(); // If a sound is still playing, stop it. // if (window.source) // source.noteOff(0) // Decode the data in our array into an audio buffer audioCtx.decodeAudioData(arrayBuffer, function (buffer) { // Use the audio buffer with as our audio source window.source = audioCtx.createBufferSource(); source.buffer = buffer; // Connect to the analyser ... source.connect(analyser); // and back to the destination, to play the sound after the analysis. analyser.connect(audioCtx.destination); // Start playing the buffer. source.start(0); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], analyser); // document.getElementById('instructions').innerHTML = '' }); } // The visualizer object. // Calls the `visualization` function every time a new frame // is available. // Is passed an `analyser` (audioContext analyser). function visualizer(visualization, analyser) { var self = this; this.visualization = visualization; var last = Date.now(); var loop = function () { var dt = Date.now() - last; // we get the current byteFreq data from our analyser var byteFreq = new Uint8Array(analyser.frequencyBinCount); analyser.getByteFrequencyData(byteFreq); last = Date.now(); // We might want to use a delta time (`dt`) too for our visualization. self.visualization(byteFreq, dt); requestAnimationFrame(loop); }; requestAnimationFrame(loop); } // A simple visualization. Its update function illustrates how to use // the byte frequency data from an audioContext analyser. function simpleViz(canvas) { var self = this; this.canvas = document.getElementById("canvas"); this.ctx = this.canvas.getContext("2d"); this.copyCtx = document.getElementById("canvas-copy").getContext("2d"); this.ctx.fillStyle = "#fff"; this.barWidth = 4; this.barGap = 2; // We get the total number of bars to display this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap)); // This function is launched for each frame, together with the byte frequency data. this.update = function (byteFreq) { self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height); // We take an element from the byteFreq array for each of the bars. // Let's pretend our byteFreq contains 20 elements, and we have five bars... var step = Math.floor(byteFreq.length / self.bars); // `||||||||||||||||||||` elements // `| | | | | ` elements we'll use for our bars for (var i = 0; i < self.bars; i++) { // Draw each bar var barHeight = byteFreq[i * step]; self.ctx.fillRect( i * (self.barWidth + self.barGap), self.canvas.height - barHeight, self.barWidth, barHeight ); self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height); self.copyCtx.drawImage(self.canvas, 0, 0); } }; } gainNode = null; source = null; function play_sound() { if (!context.createGain) context.createGain = context.createGainNode; gainNode = context.createGain(); var source = context.createBufferSource(); source.buffer = BUFFERS.techno; // Connect source to a gain node source.connect(gainNode); // Connect gain node to destination gainNode.connect(context.destination); // Start playback in a loop source.loop = true; if (!source.start) source.start = source.noteOn; source.start(0); source = source; } function changeVolume() { var element = document.getElementById("change_vol"); var volume = element.value; var fraction = parseInt(element.value) / parseInt(element.max); gainNode.gain.value = fraction * fraction; } function stop_play() { console.log(source); if (!source.stop) source.stop = source.noteOff; source.stop(0); } var url = "https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3"; playerElement = document.querySelector("#player"); function
(url) { this.ac = new (window.AudioContext || webkitAudioContext)(); this.url = url; this.mute = false; // this.el = el; this.button = document.getElementById("play_button"); this.volume_btn = document.getElementById("change_vol"); this.mute_btn = document.getElementById("vol_img"); this.rewind = document.getElementById("rewind"); this.rewind30 = document.getElementById("rewind30"); this.forward = document.getElementById("forward"); this.forward30 = document.getElementById("forward30"); this.speed1xB = document.getElementById("speed1x"); this.speed2xB = document.getElementById("speed2x"); this.left_duration = document.getElementById("left-duration"); this.track = document.getElementById("audio-overlay"); this.progress = document.getElementById("seekbar"); // console.log(this.button); // this.scrubber = el.querySelector('.scrubber'); // this.message = el.querySelector('.message'); // this.message.innerHTML = 'Loading'; this.bindEvents(); this.fetch(); } Player.prototype.bindEvents = function () { this.button.addEventListener("click", this.toggle.bind(this)); this.volume_btn.addEventListener("change", this.changeVolume.bind(this)); this.mute_btn.addEventListener("click", this.muteSound.bind(this)); this.rewind.addEventListener("click", this.rewindSound.bind(this)); this.rewind30.addEventListener("click", this.rewind30Sound.bind(this)); this.forward.addEventListener("click", this.forwardSound.bind(this)); this.forward30.addEventListener("click", this.forward30Sound.bind(this)); this.speed1xB.addEventListener("click", this.speed1x.bind(this)); this.speed2xB.addEventListener("click", this.speed2x.bind(this)); // this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this)); // window.addEventListener('mousemove', this.onDrag.bind(this)); // window.addEventListener('mouseup', this.onMouseUp.bind(this)); }; Player.prototype.fetch = function () { var xhr = new XMLHttpRequest(); xhr.open("GET", this.url, true); xhr.responseType = "arraybuffer"; xhr.onload = function () { this.decode(xhr.response); // init2(xhr.response); }.bind(this); xhr.send(); }; Player.prototype.decode = function (arrayBuffer) { this.ac.decodeAudioData( arrayBuffer, function (audioBuffer) { // this.message.innerHTML = ''; this.buffer = audioBuffer; this.play(); this.draw(); }.bind(this) ); }; Player.prototype.connect = function () { if (this.playing) { this.pause(); } this.source = this.ac.createBufferSource(); this.source.buffer = this.buffer; this.gainNode = this.ac.createGain(); // this.source.connect(); this.analyser = this.ac.createAnalyser(); this.analyser.smoothingTimeConstant = 0.3; this.analyser.fftSize = 1024; // Connect to the analyser ... this.source.connect(this.analyser); this.gainNode.connect(this.ac.destination); // this.source.connect(this.gainNode); this.gainNode.gain.value = 0.5; // 10 % // and back to the destination, to play the sound after the analysis. this.source.connect(this.gainNode); // this.source.connect(this.ac.destination); }; Player.prototype.play = function (position) { this.connect(); this.position = typeof position === "number" ? position : this.position || 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); // Initialize a visualizer object var viz = new simpleViz(); // Finally, initialize the visualizer. new visualizer(viz["update"], this.analyser); this.playing = true; document.getElementById("play_button").src = "images/pause_big.png"; console.log("duration = " + this.buffer.duration); document.getElementById("duration").innerHTML = formatTime( this.buffer.duration ); }; Player.prototype.changeVolume = function (element) { element = document.getElementById("change_vol"); var volume = element.value; console.log(volume); var fraction = parseInt(element.value) / parseInt(element.max); // console.log(fraction); // Let's use an x*x curve (x-squared) since simple linear (x) does not // sound as good. this.gainNode.gain.value = volume; //fraction * fraction; console.log(this.gainNode); }; Player.prototype.muteSound = function (element) { if (!this.mute) { this.gainNode.gain.value = 0; document.getElementById("vol_img").src = "images/sound-mute.png"; this.mute = true; } else { this.mute = false; document.getElementById("vol_img").src = "images/sound.png"; var aelement = document.getElementById("change_vol"); var volume = aelement.value; console.log(volume); this.gainNode.gain.value = volume; //fraction * fraction; // console.log(this.gainNode); } }; Player.prototype.pause = function () { if (this.source) { this.source.stop(0); this.source = null; this.position = this.ac.currentTime - this.startTime; this.playing = false; document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.seek = function (time) { if (this.playing) { this.play(time); } else { this.position = time; } }; Player.prototype.updatePosition = function () { this.position = this.playing ? this.ac.currentTime - this.startTime : this.position; console.log(this.position); if (this.position >= this.buffer.duration) { this.position = this.buffer.duration; this.pause(); } var baki_time = this.buffer.duration - this.position; console.log("==" + baki_time); this.left_duration.innerHTML = formatTime(baki_time); document.getElementById("duration").innerHTML = formatTime(this.position); return this.position; }; Player.prototype.toggle = function () { if (!this.playing) { this.play(); document.getElementById("play_button").src = "images/pause_big.png"; } else { this.pause(); document.getElementById("play_button").src = "images/play.png"; } }; Player.prototype.rewindSound = function () { this.position = 0; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.rewind30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; if (this.buffer.duration > 30 && this.position > 30) this.position = this.position - 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.forwardSound = function () { this.position = this.buffer.duration; this.startTime = this.ac.currentTime - (this.position || 0); this.source.start(this.ac.currentTime, this.position); }; Player.prototype.forward30Sound = function () { this.position = typeof position === "number" ? position : this.position || 0; console.log(this.buffer.duration + "buffer"); if (this.buffer.duration > 30) this.position = this.position + 30; else this.position = 0; this.source.stop(this.ac.currentTime); this.source.disconnect(); this.play(this.position); }; Player.prototype.speed1x = function () { this.source.playbackRate.value = 1.0; }; Player.prototype.speed2x = function () { this.source.playbackRate.value = 2.0; }; Player.prototype.onMouseDown = function (e) { this.dragging = true; this.startX = e.pageX; this.startLeft = parseInt(this.scrubber.style.left || 0, 10); }; Player.prototype.onDrag = function (e) { /*var width, position; if ( !this.dragging ) { return; } width = this.track.offsetWidth; position = this.startLeft + ( e.pageX - this.startX ); position = Math.max(Math.min(width, position), 0); this.scrubber.style.left = position + 'px';*/ }; Player.prototype.onMouseUp = function () { /*var width, left, time; if ( this.dragging ) { width = this.track.offsetWidth; left = parseInt(this.scrubber.style.left || 0, 10); time = left / width * this.buffer.duration; this.seek(time); this.dragging = false; }*/ }; Player.prototype.draw = function () { var progress = this.updatePosition() / this.buffer.duration; // width = this.progress.value; if (this.playing) { this.button.classList.add("fa-pause"); this.button.classList.remove("fa-play"); } else { this.button.classList.add("fa-play"); this.button.classList.remove("fa-pause"); } // console.log("progress = = "+progress); this.progress.value = progress; /*if ( !this.dragging ) { this.scrubber.style.left = ( progress * width ) + 'px'; }*/ requestAnimationFrame(this.draw.bind(this)); };
Player
identifier_name
parser.rs
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (grammar) BOOLOP STRLEN FILETEST FILEOP INTOP STRINGOP ; (vars) LParen StrlenOp use std::ffi::{OsStr, OsString}; use std::iter::Peekable; use super::error::{ParseError, ParseResult}; use uucore::display::Quotable; /// Represents one of the binary comparison operators for strings, integers, or files #[derive(Debug, PartialEq, Eq)] pub enum Operator { String(OsString), Int(OsString), File(OsString), } /// Represents one of the unary test operators for strings or files #[derive(Debug, PartialEq, Eq)] pub enum UnaryOperator { StrlenOp(OsString), FiletestOp(OsString), } /// Represents a parsed token from a test expression #[derive(Debug, PartialEq, Eq)] pub enum Symbol { LParen, Bang, BoolOp(OsString), Literal(OsString), Op(Operator), UnaryOp(UnaryOperator), None, } impl Symbol { /// Create a new Symbol from an OsString. /// /// Returns Symbol::None in place of None fn new(token: Option<OsString>) -> Self { match token { Some(s) => match s.to_str() { Some(t) => match t { "(" => Self::LParen, "!" => Self::Bang, "-a" | "-o" => Self::BoolOp(s), "=" | "==" | "!=" => Self::Op(Operator::String(s)), "-eq" | "-ge" | "-gt" | "-le" | "-lt" | "-ne" => Self::Op(Operator::Int(s)), "-ef" | "-nt" | "-ot" => Self::Op(Operator::File(s)), "-n" | "-z" => Self::UnaryOp(UnaryOperator::StrlenOp(s)), "-b" | "-c" | "-d" | "-e" | "-f" | "-g" | "-G" | "-h" | "-k" | "-L" | "-N" | "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => { Self::UnaryOp(UnaryOperator::FiletestOp(s)) } _ => Self::Literal(s), }, None => Self::Literal(s), }, None => Self::None, } } /// Convert this Symbol into a Symbol::Literal, useful for cases where /// test treats an operator as a string operand (test has no reserved /// words). /// /// # Panics /// /// Panics if `self` is Symbol::None fn into_literal(self) -> Self { Self::Literal(match self { Self::LParen => OsString::from("("), Self::Bang => OsString::from("!"), Self::BoolOp(s) | Self::Literal(s) | Self::Op(Operator::String(s)) | Self::Op(Operator::Int(s)) | Self::Op(Operator::File(s)) | Self::UnaryOp(UnaryOperator::StrlenOp(s)) | Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s, Self::None => panic!(), }) } } /// Implement Display trait for Symbol to make it easier to print useful errors. /// We will try to match the format in which the symbol appears in the input. impl std::fmt::Display for Symbol { /// Format a Symbol for printing fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match &self { Self::LParen => OsStr::new("("), Self::Bang => OsStr::new("!"), Self::BoolOp(s) | Self::Literal(s) | Self::Op(Operator::String(s)) | Self::Op(Operator::Int(s)) | Self::Op(Operator::File(s)) | Self::UnaryOp(UnaryOperator::StrlenOp(s)) | Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s), Self::None => OsStr::new("None"), }; write!(f, "{}", s.quote()) } } /// Recursive descent parser for test, which converts a list of OsStrings /// (typically command line arguments) into a stack of Symbols in postfix /// order. /// /// Grammar: /// /// EXPR → TERM | EXPR BOOLOP EXPR /// TERM → ( EXPR ) /// TERM → ! EXPR /// TERM → UOP str /// UOP → STRLEN | FILETEST /// TERM → str OP str /// TERM → str | 𝜖 /// OP → STRINGOP | INTOP | FILEOP /// STRINGOP → = | == | != /// INTOP → -eq | -ge | -gt | -le | -lt | -ne /// FILEOP → -ef | -nt | -ot /// STRLEN → -n | -z /// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p | /// -r | -s | -S | -t | -u | -w | -x /// BOOLOP → -a | -o /// #[derive(Debug)] struct Parser { tokens: Peekable<std::vec::IntoIter<OsString>>, pub stack: Vec<Symbol>, } impl Parser { /// Construct a new Parser from a `Vec<OsString>` of tokens. fn new(tokens: Vec<OsString>) -> Self { Self { tokens: tokens.into_iter().peekable(), stack: vec![], } } /// Fetch the next token from the input stream as a Symbol. fn next_token(&mut self) -> Symbol { Symbol::new(self.tokens.next()) } /// Consume the next token & verify that it matches the provided value. fn expect(&mut self, value: &str) -> ParseResult<()> { match self.next_token() { Symbol::Literal(s) if s == value => Ok(()), _ => Err(ParseError::Expected(value.quote().to_string())), } } /// Peek at the next token from the input stream, returning it as a Symbol. /// The stream is unchanged and will return the same Symbol on subsequent /// calls to `next()` or `peek()`. fn peek(&mut self) -> Symbol { Symbol::new(self.tokens.peek().map(|s| s.to_os_string())) } /// Test if the next token in the stream is a BOOLOP (-a or -o), without /// removing the token from the stream. fn peek_is_boolop(&mut self) -> bool { matches!(self.peek(), Symbol::BoolOp(_)) } /// Parse an expression. /// /// EXPR → TERM | EXPR BOOLOP EXPR fn expr(&mut self) -> ParseResult<()> { if !self.peek_is_boolop() { self.term()?; } self.maybe_boolop()?; Ok(()) } /// Parse a term token and possible subsequent symbols: "(", "!", UOP, /// literal, or None. fn term(&mut self) -> ParseResult<()> { let symbol = self.next_token(); match symbol { Symbol::LParen => self.lparen()?, Symbol::Bang => self.bang()?, Symbol::UnaryOp(_) => self.uop(symbol), Symbol::None => self.stack.push(symbol), literal => self.literal(literal)?, } Ok(()) } /// Parse a (possibly) parenthesized expression. /// /// test has no reserved keywords, so "(" will be interpreted as a literal /// in certain cases: /// /// * when found at the end of the token stream /// * when followed by a binary operator that is not _itself_ interpreted
/// as a literal /// fn lparen(&mut self) -> ParseResult<()> { // Look ahead up to 3 tokens to determine if the lparen is being used // as a grouping operator or should be treated as a literal string let peek3: Vec<Symbol> = self .tokens .clone() .take(3) .map(|token| Symbol::new(Some(token))) .collect(); match peek3.as_slice() { // case 1: lparen is a literal when followed by nothing [] => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // case 2: error if end of stream is `( <any_token>` [symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))), // case 3: `( uop <any_token> )` → parenthesized unary operation; // this case ensures we don’t get confused by `( -f ) )` // or `( -f ( )`, for example [Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => { let symbol = self.next_token(); self.uop(symbol); self.expect(")")?; Ok(()) } // case 4: binary comparison of literal lparen, e.g. `( != )` [Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _] if s == ")" => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // case 5: after handling the prior cases, any single token inside // parentheses is a literal, e.g. `( -f )` [_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => { let symbol = self.next_token(); self.literal(symbol)?; self.expect(")")?; Ok(()) } // case 6: two binary ops in a row, treat the first op as a literal [Symbol::Op(_), Symbol::Op(_), _] => { let symbol = self.next_token(); self.literal(symbol)?; self.expect(")")?; Ok(()) } // case 7: if earlier cases didn’t match, `( op <any_token>…` // indicates binary comparison of literal lparen with // anything _except_ ")" (case 4) [Symbol::Op(_), _] | [Symbol::Op(_), _, _] => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // Otherwise, lparen indicates the start of a parenthesized // expression _ => { self.expr()?; self.expect(")")?; Ok(()) } } } /// Parse a (possibly) negated expression. /// /// Example cases: /// /// * `! =`: negate the result of the implicit string length test of `=` /// * `! = foo`: compare the literal strings `!` and `foo` /// * `! = = str`: negate comparison of literal `=` and `str` /// * `!`: bang followed by nothing is literal /// * `! EXPR`: negate the result of the expression /// /// Combined Boolean & negation: /// /// * `! ( EXPR ) [BOOLOP EXPR]`: negate the parenthesized expression only /// * `! UOP str BOOLOP EXPR`: negate the unary subexpression /// * `! str BOOLOP str`: negate the entire Boolean expression /// * `! str BOOLOP EXPR BOOLOP EXPR`: negate the value of the first `str` term /// fn bang(&mut self) -> ParseResult<()> { match self.peek() { Symbol::Op(_) | Symbol::BoolOp(_) => { // we need to peek ahead one more token to disambiguate the first // three cases listed above let peek2 = Symbol::new(self.tokens.clone().nth(1)); match peek2 { // case 1: `! <OP as literal>` // case 3: `! = OP str` Symbol::Op(_) | Symbol::None => { // op is literal let op = self.next_token().into_literal(); self.literal(op)?; self.stack.push(Symbol::Bang); } // case 2: `<! as literal> OP str [BOOLOP EXPR]`. _ => { // bang is literal; parsing continues with op self.literal(Symbol::Bang.into_literal())?; self.maybe_boolop()?; } } } // bang followed by nothing is literal Symbol::None => self.stack.push(Symbol::Bang.into_literal()), _ => { // peek ahead up to 4 tokens to determine if we need to negate // the entire expression or just the first term let peek4: Vec<Symbol> = self .tokens .clone() .take(4) .map(|token| Symbol::new(Some(token))) .collect(); match peek4.as_slice() { // we peeked ahead 4 but there were only 3 tokens left [Symbol::Literal(_), Symbol::BoolOp(_), Symbol::Literal(_)] => { self.expr()?; self.stack.push(Symbol::Bang); } _ => { self.term()?; self.stack.push(Symbol::Bang); } } } } Ok(()) } /// Peek at the next token and parse it as a BOOLOP or string literal, /// as appropriate. fn maybe_boolop(&mut self) -> ParseResult<()> { if self.peek_is_boolop() { let symbol = self.next_token(); // BoolOp by itself interpreted as Literal if let Symbol::None = self.peek() { self.literal(symbol.into_literal())?; } else { self.boolop(symbol)?; self.maybe_boolop()?; } } Ok(()) } /// Parse a Boolean expression. /// /// Logical and (-a) has higher precedence than or (-o), so in an /// expression like `foo -o '' -a ''`, the and subexpression is evaluated /// first. fn boolop(&mut self, op: Symbol) -> ParseResult<()> { if op == Symbol::BoolOp(OsString::from("-a")) { self.term()?; } else { self.expr()?; } self.stack.push(op); Ok(()) } /// Parse a (possible) unary argument test (string length or file /// attribute check). /// /// If a UOP is followed by nothing it is interpreted as a literal string. fn uop(&mut self, op: Symbol) { match self.next_token() { Symbol::None => self.stack.push(op.into_literal()), symbol => { self.stack.push(symbol.into_literal()); self.stack.push(op); } } } /// Parse a string literal, optionally followed by a comparison operator /// and a second string literal. fn literal(&mut self, token: Symbol) -> ParseResult<()> { self.stack.push(token.into_literal()); // EXPR → str OP str if let Symbol::Op(_) = self.peek() { let op = self.next_token(); match self.next_token() { Symbol::None => { return Err(ParseError::MissingArgument(format!("{op}"))); } token => self.stack.push(token.into_literal()), } self.stack.push(op); } Ok(()) } /// Parser entry point: parse the token stream `self.tokens`, storing the /// resulting `Symbol` stack in `self.stack`. fn parse(&mut self) -> ParseResult<()> { self.expr()?; match self.tokens.next() { Some(token) => Err(ParseError::ExtraArgument(token.quote().to_string())), None => Ok(()), } } } /// Parse the token stream `args`, returning a `Symbol` stack representing the /// operations to perform in postfix order. pub fn parse(args: Vec<OsString>) -> ParseResult<Vec<Symbol>> { let mut p = Parser::new(args); p.parse()?; Ok(p.stack) }
random_line_split
parser.rs
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (grammar) BOOLOP STRLEN FILETEST FILEOP INTOP STRINGOP ; (vars) LParen StrlenOp use std::ffi::{OsStr, OsString}; use std::iter::Peekable; use super::error::{ParseError, ParseResult}; use uucore::display::Quotable; /// Represents one of the binary comparison operators for strings, integers, or files #[derive(Debug, PartialEq, Eq)] pub enum Operator { String(OsString), Int(OsString), File(OsString), } /// Represents one of the unary test operators for strings or files #[derive(Debug, PartialEq, Eq)] pub enum UnaryOperator { StrlenOp(OsString), FiletestOp(OsString), } /// Represents a parsed token from a test expression #[derive(Debug, PartialEq, Eq)] pub enum Symbol { LParen, Bang, BoolOp(OsString), Literal(OsString), Op(Operator), UnaryOp(UnaryOperator), None, } impl Symbol { /// Create a new Symbol from an OsString. /// /// Returns Symbol::None in place of None fn new(token: Option<OsString>) -> Self { match token { Some(s) => match s.to_str() { Some(t) => match t { "(" => Self::LParen, "!" => Self::Bang, "-a" | "-o" => Self::BoolOp(s), "=" | "==" | "!=" => Self::Op(Operator::String(s)), "-eq" | "-ge" | "-gt" | "-le" | "-lt" | "-ne" => Self::Op(Operator::Int(s)), "-ef" | "-nt" | "-ot" => Self::Op(Operator::File(s)), "-n" | "-z" => Self::UnaryOp(UnaryOperator::StrlenOp(s)), "-b" | "-c" | "-d" | "-e" | "-f" | "-g" | "-G" | "-h" | "-k" | "-L" | "-N" | "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => { Self::UnaryOp(UnaryOperator::FiletestOp(s)) } _ => Self::Literal(s), }, None => Self::Literal(s), }, None => Self::None, } } /// Convert this Symbol into a Symbol::Literal, useful for cases where /// test treats an operator as a string operand (test has no reserved /// words). /// /// # Panics /// /// Panics if `self` is Symbol::None fn into_literal(self) -> Self { Self::Literal(match self { Self::LParen => OsString::from("("), Self::Bang => OsString::from("!"), Self::BoolOp(s) | Self::Literal(s) | Self::Op(Operator::String(s)) | Self::Op(Operator::Int(s)) | Self::Op(Operator::File(s)) | Self::UnaryOp(UnaryOperator::StrlenOp(s)) | Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s, Self::None => panic!(), }) } } /// Implement Display trait for Symbol to make it easier to print useful errors. /// We will try to match the format in which the symbol appears in the input. impl std::fmt::Display for Symbol { /// Format a Symbol for printing fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match &self { Self::LParen => OsStr::new("("), Self::Bang => OsStr::new("!"), Self::BoolOp(s) | Self::Literal(s) | Self::Op(Operator::String(s)) | Self::Op(Operator::Int(s)) | Self::Op(Operator::File(s)) | Self::UnaryOp(UnaryOperator::StrlenOp(s)) | Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s), Self::None => OsStr::new("None"), }; write!(f, "{}", s.quote()) } } /// Recursive descent parser for test, which converts a list of OsStrings /// (typically command line arguments) into a stack of Symbols in postfix /// order. /// /// Grammar: /// /// EXPR → TERM | EXPR BOOLOP EXPR /// TERM → ( EXPR ) /// TERM → ! EXPR /// TERM → UOP str /// UOP → STRLEN | FILETEST /// TERM → str OP str /// TERM → str | 𝜖 /// OP → STRINGOP | INTOP | FILEOP /// STRINGOP → = | == | != /// INTOP → -eq | -ge | -gt | -le | -lt | -ne /// FILEOP → -ef | -nt | -ot /// STRLEN → -n | -z /// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p | /// -r | -s | -S | -t | -u | -w | -x /// BOOLOP → -a | -o /// #[derive(Debug)] struct Parser { tokens: Peekable<std::vec::IntoIter<OsString>>, pub stack: Vec<Symbol>, } impl Parser { /// Construct a new Parser from a `Vec<OsString>` of tokens. fn new(tokens: Vec<OsString>) -> Self { Self { tokens: tokens.into_iter().peekable(), stack: vec![], } } /// Fetch the next token from the input stream as a Symbol. fn next_token(&mut self) -> Symbol { Symbol::new(self.tokens.next()) } /// Consume the next token & verify that it matches the provided value. fn expect(&mut self, value: &str) -> ParseResult<()> { match self.next_token() { Symbol::Literal(s) if s == value => Ok(()), _ => Err(ParseError::Expected(value.quote().to_string())), } } /// Peek at the next token from the input stream, returning it as a Symbol. /// The stream is unchanged and will return the same Symbol on subsequent /// calls to `next()` or `peek()`. fn peek(&mut self) -> Symbol { Symbol::new(self.tokens.peek().map(|s| s.to_os_string())) } /// Test if the next token in the stream is a BOOLOP (-a or -o), without /// removing the token from the stream. fn peek_is_boolop(&mut self) -> bool { matches!(self.peek(), Symbol::BoolOp(_)) } /// Parse an expression. /// /// EXPR → TERM | EXPR BOOLOP EXPR fn expr(&mut self) -> ParseResult<()> { if !self.peek_is_boolop() { self.term()?; } self.maybe_boolop()?; Ok(()) } /// Parse a term token and possible subsequent symbols: "(", "!", UOP, /// literal, or None. fn term(&mut self) -> ParseResult<()> { let symbol = self.next_t
thesized expression. /// /// test has no reserved keywords, so "(" will be interpreted as a literal /// in certain cases: /// /// * when found at the end of the token stream /// * when followed by a binary operator that is not _itself_ interpreted /// as a literal /// fn lparen(&mut self) -> ParseResult<()> { // Look ahead up to 3 tokens to determine if the lparen is being used // as a grouping operator or should be treated as a literal string let peek3: Vec<Symbol> = self .tokens .clone() .take(3) .map(|token| Symbol::new(Some(token))) .collect(); match peek3.as_slice() { // case 1: lparen is a literal when followed by nothing [] => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // case 2: error if end of stream is `( <any_token>` [symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))), // case 3: `( uop <any_token> )` → parenthesized unary operation; // this case ensures we don’t get confused by `( -f ) )` // or `( -f ( )`, for example [Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => { let symbol = self.next_token(); self.uop(symbol); self.expect(")")?; Ok(()) } // case 4: binary comparison of literal lparen, e.g. `( != )` [Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _] if s == ")" => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // case 5: after handling the prior cases, any single token inside // parentheses is a literal, e.g. `( -f )` [_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => { let symbol = self.next_token(); self.literal(symbol)?; self.expect(")")?; Ok(()) } // case 6: two binary ops in a row, treat the first op as a literal [Symbol::Op(_), Symbol::Op(_), _] => { let symbol = self.next_token(); self.literal(symbol)?; self.expect(")")?; Ok(()) } // case 7: if earlier cases didn’t match, `( op <any_token>…` // indicates binary comparison of literal lparen with // anything _except_ ")" (case 4) [Symbol::Op(_), _] | [Symbol::Op(_), _, _] => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // Otherwise, lparen indicates the start of a parenthesized // expression _ => { self.expr()?; self.expect(")")?; Ok(()) } } } /// Parse a (possibly) negated expression. /// /// Example cases: /// /// * `! =`: negate the result of the implicit string length test of `=` /// * `! = foo`: compare the literal strings `!` and `foo` /// * `! = = str`: negate comparison of literal `=` and `str` /// * `!`: bang followed by nothing is literal /// * `! EXPR`: negate the result of the expression /// /// Combined Boolean & negation: /// /// * `! ( EXPR ) [BOOLOP EXPR]`: negate the parenthesized expression only /// * `! UOP str BOOLOP EXPR`: negate the unary subexpression /// * `! str BOOLOP str`: negate the entire Boolean expression /// * `! str BOOLOP EXPR BOOLOP EXPR`: negate the value of the first `str` term /// fn bang(&mut self) -> ParseResult<()> { match self.peek() { Symbol::Op(_) | Symbol::BoolOp(_) => { // we need to peek ahead one more token to disambiguate the first // three cases listed above let peek2 = Symbol::new(self.tokens.clone().nth(1)); match peek2 { // case 1: `! <OP as literal>` // case 3: `! = OP str` Symbol::Op(_) | Symbol::None => { // op is literal let op = self.next_token().into_literal(); self.literal(op)?; self.stack.push(Symbol::Bang); } // case 2: `<! as literal> OP str [BOOLOP EXPR]`. _ => { // bang is literal; parsing continues with op self.literal(Symbol::Bang.into_literal())?; self.maybe_boolop()?; } } } // bang followed by nothing is literal Symbol::None => self.stack.push(Symbol::Bang.into_literal()), _ => { // peek ahead up to 4 tokens to determine if we need to negate // the entire expression or just the first term let peek4: Vec<Symbol> = self .tokens .clone() .take(4) .map(|token| Symbol::new(Some(token))) .collect(); match peek4.as_slice() { // we peeked ahead 4 but there were only 3 tokens left [Symbol::Literal(_), Symbol::BoolOp(_), Symbol::Literal(_)] => { self.expr()?; self.stack.push(Symbol::Bang); } _ => { self.term()?; self.stack.push(Symbol::Bang); } } } } Ok(()) } /// Peek at the next token and parse it as a BOOLOP or string literal, /// as appropriate. fn maybe_boolop(&mut self) -> ParseResult<()> { if self.peek_is_boolop() { let symbol = self.next_token(); // BoolOp by itself interpreted as Literal if let Symbol::None = self.peek() { self.literal(symbol.into_literal())?; } else { self.boolop(symbol)?; self.maybe_boolop()?; } } Ok(()) } /// Parse a Boolean expression. /// /// Logical and (-a) has higher precedence than or (-o), so in an /// expression like `foo -o '' -a ''`, the and subexpression is evaluated /// first. fn boolop(&mut self, op: Symbol) -> ParseResult<()> { if op == Symbol::BoolOp(OsString::from("-a")) { self.term()?; } else { self.expr()?; } self.stack.push(op); Ok(()) } /// Parse a (possible) unary argument test (string length or file /// attribute check). /// /// If a UOP is followed by nothing it is interpreted as a literal string. fn uop(&mut self, op: Symbol) { match self.next_token() { Symbol::None => self.stack.push(op.into_literal()), symbol => { self.stack.push(symbol.into_literal()); self.stack.push(op); } } } /// Parse a string literal, optionally followed by a comparison operator /// and a second string literal. fn literal(&mut self, token: Symbol) -> ParseResult<()> { self.stack.push(token.into_literal()); // EXPR → str OP str if let Symbol::Op(_) = self.peek() { let op = self.next_token(); match self.next_token() { Symbol::None => { return Err(ParseError::MissingArgument(format!("{op}"))); } token => self.stack.push(token.into_literal()), } self.stack.push(op); } Ok(()) } /// Parser entry point: parse the token stream `self.tokens`, storing the /// resulting `Symbol` stack in `self.stack`. fn parse(&mut self) -> ParseResult<()> { self.expr()?; match self.tokens.next() { Some(token) => Err(ParseError::ExtraArgument(token.quote().to_string())), None => Ok(()), } } } /// Parse the token stream `args`, returning a `Symbol` stack representing the /// operations to perform in postfix order. pub fn parse(args: Vec<OsString>) -> ParseResult<Vec<Symbol>> { let mut p = Parser::new(args); p.parse()?; Ok(p.stack) }
oken(); match symbol { Symbol::LParen => self.lparen()?, Symbol::Bang => self.bang()?, Symbol::UnaryOp(_) => self.uop(symbol), Symbol::None => self.stack.push(symbol), literal => self.literal(literal)?, } Ok(()) } /// Parse a (possibly) paren
identifier_body
parser.rs
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (grammar) BOOLOP STRLEN FILETEST FILEOP INTOP STRINGOP ; (vars) LParen StrlenOp use std::ffi::{OsStr, OsString}; use std::iter::Peekable; use super::error::{ParseError, ParseResult}; use uucore::display::Quotable; /// Represents one of the binary comparison operators for strings, integers, or files #[derive(Debug, PartialEq, Eq)] pub enum Operator { String(OsString), Int(OsString), File(OsString), } /// Represents one of the unary test operators for strings or files #[derive(Debug, PartialEq, Eq)] pub enum UnaryOperator { StrlenOp(OsString), FiletestOp(OsString), } /// Represents a parsed token from a test expression #[derive(Debug, PartialEq, Eq)] pub enum Symbol { LParen, Bang, BoolOp(OsString), Literal(OsString), Op(Operator), UnaryOp(UnaryOperator), None, } impl Symbol { /// Create a new Symbol from an OsString. /// /// Returns Symbol::None in place of None fn new(token: Option<OsString>) -> Self { match token { Some(s) => match s.to_str() { Some(t) => match t { "(" => Self::LParen, "!" => Self::Bang, "-a" | "-o" => Self::BoolOp(s), "=" | "==" | "!=" => Self::Op(Operator::String(s)), "-eq" | "-ge" | "-gt" | "-le" | "-lt" | "-ne" => Self::Op(Operator::Int(s)), "-ef" | "-nt" | "-ot" => Self::Op(Operator::File(s)), "-n" | "-z" => Self::UnaryOp(UnaryOperator::StrlenOp(s)), "-b" | "-c" | "-d" | "-e" | "-f" | "-g" | "-G" | "-h" | "-k" | "-L" | "-N" | "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => { Self::UnaryOp(UnaryOperator::FiletestOp(s)) } _ => Self::Literal(s), }, None => Self::Literal(s), }, None => Self::None, } } /// Convert this Symbol into a Symbol::Literal, useful for cases where /// test treats an operator as a string operand (test has no reserved /// words). /// /// # Panics /// /// Panics if `self` is Symbol::None fn into_literal(self) -> Self { Self::Literal(match self { Self::LParen => OsString::from("("), Self::Bang => OsString::from("!"), Self::BoolOp(s) | Self::Literal(s) | Self::Op(Operator::String(s)) | Self::Op(Operator::Int(s)) | Self::Op(Operator::File(s)) | Self::UnaryOp(UnaryOperator::StrlenOp(s)) | Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s, Self::None => panic!(), }) } } /// Implement Display trait for Symbol to make it easier to print useful errors. /// We will try to match the format in which the symbol appears in the input. impl std::fmt::Display for Symbol { /// Format a Symbol for printing fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match &self { Self::LParen => OsStr::new("("), Self::Bang => OsStr::new("!"), Self::BoolOp(s) | Self::Literal(s) | Self::Op(Operator::String(s)) | Self::Op(Operator::Int(s)) | Self::Op(Operator::File(s)) | Self::UnaryOp(UnaryOperator::StrlenOp(s)) | Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s), Self::None => OsStr::new("None"), }; write!(f, "{}", s.quote()) } } /// Recursive descent parser for test, which converts a list of OsStrings /// (typically command line arguments) into a stack of Symbols in postfix /// order. /// /// Grammar: /// /// EXPR → TERM | EXPR BOOLOP EXPR /// TERM → ( EXPR ) /// TERM → ! EXPR /// TERM → UOP str /// UOP → STRLEN | FILETEST /// TERM → str OP str /// TERM → str | 𝜖 /// OP → STRINGOP | INTOP | FILEOP /// STRINGOP → = | == | != /// INTOP → -eq | -ge | -gt | -le | -lt | -ne /// FILEOP → -ef | -nt | -ot /// STRLEN → -n | -z /// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p | /// -r | -s | -S | -t | -u | -w | -x /// BOOLOP → -a | -o /// #[derive(Debug)] struct Parser { tokens: Peekable<std::vec::IntoIter<OsString>>, pub stack: Vec<Symbol>, } impl Parser { /// Construct a new Parser from a `Vec<OsString>` of tokens. fn new(tokens: Vec<OsString>) -> Self { Self { tokens: tokens.into_iter().peekable(), stack: vec![], } } /// Fetch the next token from the input stream as a Symbol. fn next_token(&mut self) -> Symbol { Symbol::new(self.tokens.next()) } /// Consume the next token & verify that it matches the provided value. fn expect(&mut self, value: &str) -> ParseResult<()> { match self.next_token() { Symbol::Literal(s) if s == value => Ok(()), _ => Err(ParseError::Expected(value.quote().to_string())), } } /// Peek at the next token from the input stream, returning it as a Symbol. /// The stream is unchanged and will return the same Symbol on subsequent /// calls to `next()` or `peek()`. fn peek(&mut self) -> Symbol { Symbol::new(self.tokens.peek().map(|s| s.to_os_string())) } /// Test if the next token in the stream is a BOOLOP (-a or -o), without /// removing the token from the stream. fn peek_is_boolop(&mut self) -> bool { matches!(self.peek(), Symbol::BoolOp(_)) } /// Parse an expression. /// /// EXPR → TERM | EXPR BOOLOP EXPR fn expr(&mut self) -> ParseResult<()> { if !self.peek_is_boolop() { self.term()?; } self.maybe_boolop()?; Ok(()) } /// Parse a term token and possible subsequent symbols: "(", "!", UOP, /// literal, or None. fn term(&mut self) -> ParseResult<()>
let symbol = self.next_token(); match symbol { Symbol::LParen => self.lparen()?, Symbol::Bang => self.bang()?, Symbol::UnaryOp(_) => self.uop(symbol), Symbol::None => self.stack.push(symbol), literal => self.literal(literal)?, } Ok(()) } /// Parse a (possibly) parenthesized expression. /// /// test has no reserved keywords, so "(" will be interpreted as a literal /// in certain cases: /// /// * when found at the end of the token stream /// * when followed by a binary operator that is not _itself_ interpreted /// as a literal /// fn lparen(&mut self) -> ParseResult<()> { // Look ahead up to 3 tokens to determine if the lparen is being used // as a grouping operator or should be treated as a literal string let peek3: Vec<Symbol> = self .tokens .clone() .take(3) .map(|token| Symbol::new(Some(token))) .collect(); match peek3.as_slice() { // case 1: lparen is a literal when followed by nothing [] => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // case 2: error if end of stream is `( <any_token>` [symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))), // case 3: `( uop <any_token> )` → parenthesized unary operation; // this case ensures we don’t get confused by `( -f ) )` // or `( -f ( )`, for example [Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => { let symbol = self.next_token(); self.uop(symbol); self.expect(")")?; Ok(()) } // case 4: binary comparison of literal lparen, e.g. `( != )` [Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _] if s == ")" => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // case 5: after handling the prior cases, any single token inside // parentheses is a literal, e.g. `( -f )` [_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => { let symbol = self.next_token(); self.literal(symbol)?; self.expect(")")?; Ok(()) } // case 6: two binary ops in a row, treat the first op as a literal [Symbol::Op(_), Symbol::Op(_), _] => { let symbol = self.next_token(); self.literal(symbol)?; self.expect(")")?; Ok(()) } // case 7: if earlier cases didn’t match, `( op <any_token>…` // indicates binary comparison of literal lparen with // anything _except_ ")" (case 4) [Symbol::Op(_), _] | [Symbol::Op(_), _, _] => { self.literal(Symbol::LParen.into_literal())?; Ok(()) } // Otherwise, lparen indicates the start of a parenthesized // expression _ => { self.expr()?; self.expect(")")?; Ok(()) } } } /// Parse a (possibly) negated expression. /// /// Example cases: /// /// * `! =`: negate the result of the implicit string length test of `=` /// * `! = foo`: compare the literal strings `!` and `foo` /// * `! = = str`: negate comparison of literal `=` and `str` /// * `!`: bang followed by nothing is literal /// * `! EXPR`: negate the result of the expression /// /// Combined Boolean & negation: /// /// * `! ( EXPR ) [BOOLOP EXPR]`: negate the parenthesized expression only /// * `! UOP str BOOLOP EXPR`: negate the unary subexpression /// * `! str BOOLOP str`: negate the entire Boolean expression /// * `! str BOOLOP EXPR BOOLOP EXPR`: negate the value of the first `str` term /// fn bang(&mut self) -> ParseResult<()> { match self.peek() { Symbol::Op(_) | Symbol::BoolOp(_) => { // we need to peek ahead one more token to disambiguate the first // three cases listed above let peek2 = Symbol::new(self.tokens.clone().nth(1)); match peek2 { // case 1: `! <OP as literal>` // case 3: `! = OP str` Symbol::Op(_) | Symbol::None => { // op is literal let op = self.next_token().into_literal(); self.literal(op)?; self.stack.push(Symbol::Bang); } // case 2: `<! as literal> OP str [BOOLOP EXPR]`. _ => { // bang is literal; parsing continues with op self.literal(Symbol::Bang.into_literal())?; self.maybe_boolop()?; } } } // bang followed by nothing is literal Symbol::None => self.stack.push(Symbol::Bang.into_literal()), _ => { // peek ahead up to 4 tokens to determine if we need to negate // the entire expression or just the first term let peek4: Vec<Symbol> = self .tokens .clone() .take(4) .map(|token| Symbol::new(Some(token))) .collect(); match peek4.as_slice() { // we peeked ahead 4 but there were only 3 tokens left [Symbol::Literal(_), Symbol::BoolOp(_), Symbol::Literal(_)] => { self.expr()?; self.stack.push(Symbol::Bang); } _ => { self.term()?; self.stack.push(Symbol::Bang); } } } } Ok(()) } /// Peek at the next token and parse it as a BOOLOP or string literal, /// as appropriate. fn maybe_boolop(&mut self) -> ParseResult<()> { if self.peek_is_boolop() { let symbol = self.next_token(); // BoolOp by itself interpreted as Literal if let Symbol::None = self.peek() { self.literal(symbol.into_literal())?; } else { self.boolop(symbol)?; self.maybe_boolop()?; } } Ok(()) } /// Parse a Boolean expression. /// /// Logical and (-a) has higher precedence than or (-o), so in an /// expression like `foo -o '' -a ''`, the and subexpression is evaluated /// first. fn boolop(&mut self, op: Symbol) -> ParseResult<()> { if op == Symbol::BoolOp(OsString::from("-a")) { self.term()?; } else { self.expr()?; } self.stack.push(op); Ok(()) } /// Parse a (possible) unary argument test (string length or file /// attribute check). /// /// If a UOP is followed by nothing it is interpreted as a literal string. fn uop(&mut self, op: Symbol) { match self.next_token() { Symbol::None => self.stack.push(op.into_literal()), symbol => { self.stack.push(symbol.into_literal()); self.stack.push(op); } } } /// Parse a string literal, optionally followed by a comparison operator /// and a second string literal. fn literal(&mut self, token: Symbol) -> ParseResult<()> { self.stack.push(token.into_literal()); // EXPR → str OP str if let Symbol::Op(_) = self.peek() { let op = self.next_token(); match self.next_token() { Symbol::None => { return Err(ParseError::MissingArgument(format!("{op}"))); } token => self.stack.push(token.into_literal()), } self.stack.push(op); } Ok(()) } /// Parser entry point: parse the token stream `self.tokens`, storing the /// resulting `Symbol` stack in `self.stack`. fn parse(&mut self) -> ParseResult<()> { self.expr()?; match self.tokens.next() { Some(token) => Err(ParseError::ExtraArgument(token.quote().to_string())), None => Ok(()), } } } /// Parse the token stream `args`, returning a `Symbol` stack representing the /// operations to perform in postfix order. pub fn parse(args: Vec<OsString>) -> ParseResult<Vec<Symbol>> { let mut p = Parser::new(args); p.parse()?; Ok(p.stack) }
{
identifier_name
builder.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. // Package checks implements Compliance Agent checks package checks import ( "errors" "fmt" "strings" "time" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/compliance/checks/env" "github.com/DataDog/datadog-agent/pkg/compliance/eval" "github.com/DataDog/datadog-agent/pkg/compliance/event" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" "github.com/DataDog/datadog-agent/pkg/util/log" cache "github.com/patrickmn/go-cache" ) // ErrResourceNotSupported is returned when resource type is not supported by Builder var ErrResourceNotSupported = errors.New("resource type not supported") // ErrRuleScopeNotSupported is returned when resource scope is not supported var ErrRuleScopeNotSupported = errors.New("rule scope not supported") // ErrRuleDoesNotApply is returned when a rule cannot be applied to the current environment var ErrRuleDoesNotApply = errors.New("rule does not apply to this environment") const ( builderFuncExec = "exec" builderFuncShell = "shell" builderFuncProcessFlag = "process.flag" builderFuncJSON = "json" builderFuncYAML = "yaml" ) // Builder defines an interface to build checks from rules type Builder interface { ChecksFromFile(file string, onCheck compliance.CheckVisitor) error GetCheckStatus() compliance.CheckStatusList Close() error } // BuilderOption defines a configuration option for the builder type BuilderOption func(*builder) error // WithInterval configures default check interval func WithInterval(interval time.Duration) BuilderOption { return func(b *builder) error { b.checkInterval = interval return nil } } // WithHostname configures hostname used by checks func WithHostname(hostname string) BuilderOption { return func(b *builder) error { b.hostname = hostname return nil } } // WithHostRootMount defines host root filesystem mount location func
(hostRootMount string) BuilderOption { return func(b *builder) error { log.Infof("Host root filesystem will be remapped to %s", hostRootMount) b.pathMapper = &pathMapper{ hostMountPath: hostRootMount, } return nil } } // WithDocker configures using docker func WithDocker() BuilderOption { return func(b *builder) error { cli, err := newDockerClient() if err == nil { b.dockerClient = cli } return err } } // WithDockerClient configurs specific docker client func WithDockerClient(cli env.DockerClient) BuilderOption { return func(b *builder) error { b.dockerClient = cli return nil } } // WithAudit configures using audit checks func WithAudit() BuilderOption { return func(b *builder) error { cli, err := newAuditClient() if err == nil { b.auditClient = cli } return err } } // WithAuditClient configures using specific audit client func WithAuditClient(cli env.AuditClient) BuilderOption { return func(b *builder) error { b.auditClient = cli return nil } } // WithKubernetesClient allows specific Kubernetes client func WithKubernetesClient(cli env.KubeClient) BuilderOption { return func(b *builder) error { b.kubeClient = cli return nil } } // WithIsLeader allows check runner to know if its a leader instance or not (DCA) func WithIsLeader(isLeader func() bool) BuilderOption { return func(b *builder) error { b.isLeaderFunc = isLeader return nil } } // SuiteMatcher checks if a compliance suite is included type SuiteMatcher func(*compliance.SuiteMeta) bool // WithMatchSuite configures builder to use a suite matcher func WithMatchSuite(matcher SuiteMatcher) BuilderOption { return func(b *builder) error { b.suiteMatcher = matcher return nil } } // RuleMatcher checks if a compliance rule is included type RuleMatcher func(*compliance.Rule) bool // WithMatchRule configures builder to use a suite matcher func WithMatchRule(matcher RuleMatcher) BuilderOption { return func(b *builder) error { b.ruleMatcher = matcher return nil } } // MayFail configures a builder option to succeed on failures and logs an error func MayFail(o BuilderOption) BuilderOption { return func(b *builder) error { if err := o(b); err != nil { log.Warnf("Ignoring builder initialization failure: %v", err) } return nil } } // WithNodeLabels configures a builder to use specified Kubernetes node labels func WithNodeLabels(nodeLabels map[string]string) BuilderOption { return func(b *builder) error { b.nodeLabels = map[string]string{} for k, v := range nodeLabels { k, v := hostinfo.LabelPreprocessor(k, v) b.nodeLabels[k] = v } return nil } } // IsFramework matches a compliance suite by the name of the framework func IsFramework(framework string) SuiteMatcher { return func(s *compliance.SuiteMeta) bool { return s.Framework == framework } } // IsRuleID matches a compliance rule by ID func IsRuleID(ruleID string) RuleMatcher { return func(r *compliance.Rule) bool { return r.ID == ruleID } } // NewBuilder constructs a check builder func NewBuilder(reporter event.Reporter, options ...BuilderOption) (Builder, error) { b := &builder{ reporter: reporter, checkInterval: 20 * time.Minute, etcGroupPath: "/etc/group", status: newStatus(), } for _, o := range options { if err := o(b); err != nil { return nil, err } } b.valueCache = cache.New( b.checkInterval/2, b.checkInterval/4, ) return b, nil } type builder struct { checkInterval time.Duration reporter event.Reporter valueCache *cache.Cache hostname string pathMapper *pathMapper etcGroupPath string nodeLabels map[string]string suiteMatcher SuiteMatcher ruleMatcher RuleMatcher dockerClient env.DockerClient auditClient env.AuditClient kubeClient env.KubeClient isLeaderFunc func() bool status *status } func (b *builder) Close() error { if b.dockerClient != nil { if err := b.dockerClient.Close(); err != nil { return err } } if b.auditClient != nil { if err := b.auditClient.Close(); err != nil { return err } } return nil } func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error { suite, err := compliance.ParseSuite(file) if err != nil { return err } if b.suiteMatcher != nil { if b.suiteMatcher(&suite.Meta) { log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file) } else { log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file) return nil } } log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file) matchedCount := 0 for _, r := range suite.Rules { if b.ruleMatcher != nil { if b.ruleMatcher(&r) { log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) } else { log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) continue } } matchedCount++ if len(r.Resources) == 0 { log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID) continue } log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID) check, err := b.checkFromRule(&suite.Meta, &r) if err != nil { if err != ErrRuleDoesNotApply { log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err) } log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID) } if b.status != nil { b.status.addCheck(&compliance.CheckStatus{ RuleID: r.ID, Description: r.Description, Name: compliance.CheckName(r.ID, r.Description), Framework: suite.Meta.Framework, Source: suite.Meta.Source, Version: suite.Meta.Version, InitError: err, }) } ok := onCheck(&r, check, err) if !ok { log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version) return err } } if b.ruleMatcher != nil && matchedCount == 0 { log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version) } return nil } func (b *builder) GetCheckStatus() compliance.CheckStatusList { if b.status != nil { return b.status.getChecksStatus() } return compliance.CheckStatusList{} } func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) { ruleScope, err := getRuleScope(meta, rule) if err != nil { return nil, err } eligible, err := b.hostMatcher(ruleScope, rule) if err != nil { return nil, err } if !eligible { log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID) return nil, ErrRuleDoesNotApply } return b.newCheck(meta, ruleScope, rule) } func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) { switch { case rule.Scope.Includes(compliance.DockerScope): return compliance.DockerScope, nil case rule.Scope.Includes(compliance.KubernetesNodeScope): return compliance.KubernetesNodeScope, nil case rule.Scope.Includes(compliance.KubernetesClusterScope): return compliance.KubernetesClusterScope, nil default: return "", ErrRuleScopeNotSupported } } func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) { switch scope { case compliance.DockerScope: if b.dockerClient == nil { log.Infof("rule %s skipped - not running in a docker environment", rule.ID) return false, nil } case compliance.KubernetesClusterScope: if b.kubeClient == nil { log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID) return false, nil } case compliance.KubernetesNodeScope: if config.IsKubernetes() { return b.isKubernetesNodeEligible(rule.HostSelector) } log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID) return false, nil } return true, nil } func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) { if hostSelector == "" { return true, nil } expr, err := eval.ParseExpression(hostSelector) if err != nil { return false, err } nodeInstance := &eval.Instance{ Functions: eval.FunctionMap{ "node.hasLabel": b.nodeHasLabel, "node.label": b.nodeLabel, }, Vars: eval.VarMap{ "node.labels": b.nodeLabelKeys(), }, } result, err := expr.Evaluate(nodeInstance) if err != nil { return false, err } eligible, ok := result.(bool) if !ok { return false, fmt.Errorf("hostSelector %q does not evaluate to a boolean value", hostSelector) } return eligible, nil } func (b *builder) getNodeLabel(args ...interface{}) (string, bool, error) { if len(args) == 0 { return "", false, errors.New(`expecting one argument for label`) } label, ok := args[0].(string) if !ok { return "", false, fmt.Errorf(`expecting string value for label argument`) } if b.nodeLabels == nil { return "", false, nil } v, ok := b.nodeLabels[label] return v, ok, nil } func (b *builder) nodeHasLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { _, ok, err := b.getNodeLabel(args...) return ok, err } func (b *builder) nodeLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { v, _, err := b.getNodeLabel(args...) return v, err } func (b *builder) nodeLabelKeys() []string { var keys []string for k := range b.nodeLabels { keys = append(keys, k) } return keys } func (b *builder) newCheck(meta *compliance.SuiteMeta, ruleScope compliance.RuleScope, rule *compliance.Rule) (compliance.Check, error) { checkable, err := newResourceCheckList(b, rule.ID, rule.Resources) if err != nil { return nil, err } var notify eventNotify if b.status != nil { notify = b.status.updateCheck } // We capture err as configuration error but do not prevent check creation return &complianceCheck{ Env: b, ruleID: rule.ID, description: rule.Description, interval: b.checkInterval, suiteMeta: meta, // For now we are using rule scope (e.g. docker, kubernetesNode) as resource type resourceType: string(ruleScope), resourceID: b.hostname, checkable: checkable, eventNotify: notify, }, nil } func (b *builder) Reporter() event.Reporter { return b.reporter } func (b *builder) DockerClient() env.DockerClient { return b.dockerClient } func (b *builder) AuditClient() env.AuditClient { return b.auditClient } func (b *builder) KubeClient() env.KubeClient { return b.kubeClient } func (b *builder) Hostname() string { return b.hostname } func (b *builder) EtcGroupPath() string { return b.etcGroupPath } func (b *builder) NormalizeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.normalizeToHostRoot(path) } func (b *builder) RelativeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.relativeToHostRoot(path) } func (b *builder) IsLeader() bool { if b.isLeaderFunc != nil { return b.isLeaderFunc() } return true } func (b *builder) EvaluateFromCache(ev eval.Evaluatable) (interface{}, error) { instance := &eval.Instance{ Functions: eval.FunctionMap{ builderFuncShell: b.withValueCache(builderFuncShell, evalCommandShell), builderFuncExec: b.withValueCache(builderFuncExec, evalCommandExec), builderFuncProcessFlag: b.withValueCache(builderFuncProcessFlag, evalProcessFlag), builderFuncJSON: b.withValueCache(builderFuncJSON, b.evalValueFromFile(jsonGetter)), builderFuncYAML: b.withValueCache(builderFuncYAML, b.evalValueFromFile(yamlGetter)), }, } return ev.Evaluate(instance) } func (b *builder) withValueCache(funcName string, fn eval.Function) eval.Function { return func(instance *eval.Instance, args ...interface{}) (interface{}, error) { var sargs []string for _, arg := range args { sargs = append(sargs, fmt.Sprintf("%v", arg)) } key := fmt.Sprintf("%s(%s)", funcName, strings.Join(sargs, ",")) if v, ok := b.valueCache.Get(key); ok { return v, nil } v, err := fn(instance, args...) if err == nil { b.valueCache.Set(key, v, cache.DefaultExpiration) } return v, err } } func evalCommandShell(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } command, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for command argument`) } var shellAndArgs []string if len(args) > 1 { for _, arg := range args[1:] { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string value for shell command and arguments`) } shellAndArgs = append(shellAndArgs, s) } } return valueFromShellCommand(command, shellAndArgs...) } func valueFromShellCommand(command string, shellAndArgs ...string) (interface{}, error) { log.Debugf("Resolving value from shell command: %s, args [%s]", command, strings.Join(shellAndArgs, ",")) shellCmd := &compliance.ShellCmd{ Run: command, } if len(shellAndArgs) > 0 { shellCmd.Shell = &compliance.BinaryCmd{ Name: shellAndArgs[0], Args: shellAndArgs[1:], } } execCommand := shellCmdToBinaryCmd(shellCmd) exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", command, err) } return stdout, nil } func evalCommandExec(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } var cmdArgs []string for _, arg := range args { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string values for arguments`) } cmdArgs = append(cmdArgs, s) } return valueFromBinaryCommand(cmdArgs[0], cmdArgs[1:]...) } func valueFromBinaryCommand(name string, args ...string) (interface{}, error) { log.Debugf("Resolving value from command: %s, args [%s]", name, strings.Join(args, ",")) execCommand := &compliance.BinaryCmd{ Name: name, Args: args, } exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", execCommand, err) } return stdout, nil } func evalProcessFlag(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, errors.New(`expecting two arguments`) } name, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process name argument`) } flag, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process flag argument`) } return valueFromProcessFlag(name, flag) } func valueFromProcessFlag(name string, flag string) (interface{}, error) { log.Debugf("Resolving value from process: %s, flag %s", name, flag) processes, err := getProcesses(cacheValidity) if err != nil { return "", fmt.Errorf("unable to fetch processes: %w", err) } matchedProcesses := processes.findProcessesByName(name) for _, mp := range matchedProcesses { flagValues := parseProcessCmdLine(mp.Cmdline) return flagValues[flag], nil } return "", fmt.Errorf("failed to find process: %s", name) } func (b *builder) evalValueFromFile(get getter) eval.Function { return func(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, fmt.Errorf(`invalid number of arguments, expecting 1 got %d`, len(args)) } path, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for path argument`) } path = b.NormalizeToHostRoot(path) query, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for query argument`) } return queryValueFromFile(path, query, get) } }
WithHostRootMount
identifier_name
builder.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. // Package checks implements Compliance Agent checks package checks import ( "errors" "fmt" "strings" "time" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/compliance/checks/env" "github.com/DataDog/datadog-agent/pkg/compliance/eval" "github.com/DataDog/datadog-agent/pkg/compliance/event" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" "github.com/DataDog/datadog-agent/pkg/util/log" cache "github.com/patrickmn/go-cache" ) // ErrResourceNotSupported is returned when resource type is not supported by Builder var ErrResourceNotSupported = errors.New("resource type not supported") // ErrRuleScopeNotSupported is returned when resource scope is not supported var ErrRuleScopeNotSupported = errors.New("rule scope not supported") // ErrRuleDoesNotApply is returned when a rule cannot be applied to the current environment var ErrRuleDoesNotApply = errors.New("rule does not apply to this environment") const ( builderFuncExec = "exec" builderFuncShell = "shell" builderFuncProcessFlag = "process.flag" builderFuncJSON = "json" builderFuncYAML = "yaml" ) // Builder defines an interface to build checks from rules type Builder interface { ChecksFromFile(file string, onCheck compliance.CheckVisitor) error GetCheckStatus() compliance.CheckStatusList Close() error } // BuilderOption defines a configuration option for the builder type BuilderOption func(*builder) error // WithInterval configures default check interval func WithInterval(interval time.Duration) BuilderOption { return func(b *builder) error { b.checkInterval = interval return nil } } // WithHostname configures hostname used by checks func WithHostname(hostname string) BuilderOption { return func(b *builder) error { b.hostname = hostname return nil } } // WithHostRootMount defines host root filesystem mount location func WithHostRootMount(hostRootMount string) BuilderOption { return func(b *builder) error { log.Infof("Host root filesystem will be remapped to %s", hostRootMount) b.pathMapper = &pathMapper{ hostMountPath: hostRootMount, } return nil } } // WithDocker configures using docker func WithDocker() BuilderOption { return func(b *builder) error { cli, err := newDockerClient() if err == nil { b.dockerClient = cli } return err } } // WithDockerClient configurs specific docker client func WithDockerClient(cli env.DockerClient) BuilderOption {
b.dockerClient = cli return nil } } // WithAudit configures using audit checks func WithAudit() BuilderOption { return func(b *builder) error { cli, err := newAuditClient() if err == nil { b.auditClient = cli } return err } } // WithAuditClient configures using specific audit client func WithAuditClient(cli env.AuditClient) BuilderOption { return func(b *builder) error { b.auditClient = cli return nil } } // WithKubernetesClient allows specific Kubernetes client func WithKubernetesClient(cli env.KubeClient) BuilderOption { return func(b *builder) error { b.kubeClient = cli return nil } } // WithIsLeader allows check runner to know if its a leader instance or not (DCA) func WithIsLeader(isLeader func() bool) BuilderOption { return func(b *builder) error { b.isLeaderFunc = isLeader return nil } } // SuiteMatcher checks if a compliance suite is included type SuiteMatcher func(*compliance.SuiteMeta) bool // WithMatchSuite configures builder to use a suite matcher func WithMatchSuite(matcher SuiteMatcher) BuilderOption { return func(b *builder) error { b.suiteMatcher = matcher return nil } } // RuleMatcher checks if a compliance rule is included type RuleMatcher func(*compliance.Rule) bool // WithMatchRule configures builder to use a suite matcher func WithMatchRule(matcher RuleMatcher) BuilderOption { return func(b *builder) error { b.ruleMatcher = matcher return nil } } // MayFail configures a builder option to succeed on failures and logs an error func MayFail(o BuilderOption) BuilderOption { return func(b *builder) error { if err := o(b); err != nil { log.Warnf("Ignoring builder initialization failure: %v", err) } return nil } } // WithNodeLabels configures a builder to use specified Kubernetes node labels func WithNodeLabels(nodeLabels map[string]string) BuilderOption { return func(b *builder) error { b.nodeLabels = map[string]string{} for k, v := range nodeLabels { k, v := hostinfo.LabelPreprocessor(k, v) b.nodeLabels[k] = v } return nil } } // IsFramework matches a compliance suite by the name of the framework func IsFramework(framework string) SuiteMatcher { return func(s *compliance.SuiteMeta) bool { return s.Framework == framework } } // IsRuleID matches a compliance rule by ID func IsRuleID(ruleID string) RuleMatcher { return func(r *compliance.Rule) bool { return r.ID == ruleID } } // NewBuilder constructs a check builder func NewBuilder(reporter event.Reporter, options ...BuilderOption) (Builder, error) { b := &builder{ reporter: reporter, checkInterval: 20 * time.Minute, etcGroupPath: "/etc/group", status: newStatus(), } for _, o := range options { if err := o(b); err != nil { return nil, err } } b.valueCache = cache.New( b.checkInterval/2, b.checkInterval/4, ) return b, nil } type builder struct { checkInterval time.Duration reporter event.Reporter valueCache *cache.Cache hostname string pathMapper *pathMapper etcGroupPath string nodeLabels map[string]string suiteMatcher SuiteMatcher ruleMatcher RuleMatcher dockerClient env.DockerClient auditClient env.AuditClient kubeClient env.KubeClient isLeaderFunc func() bool status *status } func (b *builder) Close() error { if b.dockerClient != nil { if err := b.dockerClient.Close(); err != nil { return err } } if b.auditClient != nil { if err := b.auditClient.Close(); err != nil { return err } } return nil } func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error { suite, err := compliance.ParseSuite(file) if err != nil { return err } if b.suiteMatcher != nil { if b.suiteMatcher(&suite.Meta) { log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file) } else { log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file) return nil } } log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file) matchedCount := 0 for _, r := range suite.Rules { if b.ruleMatcher != nil { if b.ruleMatcher(&r) { log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) } else { log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) continue } } matchedCount++ if len(r.Resources) == 0 { log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID) continue } log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID) check, err := b.checkFromRule(&suite.Meta, &r) if err != nil { if err != ErrRuleDoesNotApply { log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err) } log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID) } if b.status != nil { b.status.addCheck(&compliance.CheckStatus{ RuleID: r.ID, Description: r.Description, Name: compliance.CheckName(r.ID, r.Description), Framework: suite.Meta.Framework, Source: suite.Meta.Source, Version: suite.Meta.Version, InitError: err, }) } ok := onCheck(&r, check, err) if !ok { log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version) return err } } if b.ruleMatcher != nil && matchedCount == 0 { log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version) } return nil } func (b *builder) GetCheckStatus() compliance.CheckStatusList { if b.status != nil { return b.status.getChecksStatus() } return compliance.CheckStatusList{} } func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) { ruleScope, err := getRuleScope(meta, rule) if err != nil { return nil, err } eligible, err := b.hostMatcher(ruleScope, rule) if err != nil { return nil, err } if !eligible { log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID) return nil, ErrRuleDoesNotApply } return b.newCheck(meta, ruleScope, rule) } func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) { switch { case rule.Scope.Includes(compliance.DockerScope): return compliance.DockerScope, nil case rule.Scope.Includes(compliance.KubernetesNodeScope): return compliance.KubernetesNodeScope, nil case rule.Scope.Includes(compliance.KubernetesClusterScope): return compliance.KubernetesClusterScope, nil default: return "", ErrRuleScopeNotSupported } } func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) { switch scope { case compliance.DockerScope: if b.dockerClient == nil { log.Infof("rule %s skipped - not running in a docker environment", rule.ID) return false, nil } case compliance.KubernetesClusterScope: if b.kubeClient == nil { log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID) return false, nil } case compliance.KubernetesNodeScope: if config.IsKubernetes() { return b.isKubernetesNodeEligible(rule.HostSelector) } log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID) return false, nil } return true, nil } func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) { if hostSelector == "" { return true, nil } expr, err := eval.ParseExpression(hostSelector) if err != nil { return false, err } nodeInstance := &eval.Instance{ Functions: eval.FunctionMap{ "node.hasLabel": b.nodeHasLabel, "node.label": b.nodeLabel, }, Vars: eval.VarMap{ "node.labels": b.nodeLabelKeys(), }, } result, err := expr.Evaluate(nodeInstance) if err != nil { return false, err } eligible, ok := result.(bool) if !ok { return false, fmt.Errorf("hostSelector %q does not evaluate to a boolean value", hostSelector) } return eligible, nil } func (b *builder) getNodeLabel(args ...interface{}) (string, bool, error) { if len(args) == 0 { return "", false, errors.New(`expecting one argument for label`) } label, ok := args[0].(string) if !ok { return "", false, fmt.Errorf(`expecting string value for label argument`) } if b.nodeLabels == nil { return "", false, nil } v, ok := b.nodeLabels[label] return v, ok, nil } func (b *builder) nodeHasLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { _, ok, err := b.getNodeLabel(args...) return ok, err } func (b *builder) nodeLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { v, _, err := b.getNodeLabel(args...) return v, err } func (b *builder) nodeLabelKeys() []string { var keys []string for k := range b.nodeLabels { keys = append(keys, k) } return keys } func (b *builder) newCheck(meta *compliance.SuiteMeta, ruleScope compliance.RuleScope, rule *compliance.Rule) (compliance.Check, error) { checkable, err := newResourceCheckList(b, rule.ID, rule.Resources) if err != nil { return nil, err } var notify eventNotify if b.status != nil { notify = b.status.updateCheck } // We capture err as configuration error but do not prevent check creation return &complianceCheck{ Env: b, ruleID: rule.ID, description: rule.Description, interval: b.checkInterval, suiteMeta: meta, // For now we are using rule scope (e.g. docker, kubernetesNode) as resource type resourceType: string(ruleScope), resourceID: b.hostname, checkable: checkable, eventNotify: notify, }, nil } func (b *builder) Reporter() event.Reporter { return b.reporter } func (b *builder) DockerClient() env.DockerClient { return b.dockerClient } func (b *builder) AuditClient() env.AuditClient { return b.auditClient } func (b *builder) KubeClient() env.KubeClient { return b.kubeClient } func (b *builder) Hostname() string { return b.hostname } func (b *builder) EtcGroupPath() string { return b.etcGroupPath } func (b *builder) NormalizeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.normalizeToHostRoot(path) } func (b *builder) RelativeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.relativeToHostRoot(path) } func (b *builder) IsLeader() bool { if b.isLeaderFunc != nil { return b.isLeaderFunc() } return true } func (b *builder) EvaluateFromCache(ev eval.Evaluatable) (interface{}, error) { instance := &eval.Instance{ Functions: eval.FunctionMap{ builderFuncShell: b.withValueCache(builderFuncShell, evalCommandShell), builderFuncExec: b.withValueCache(builderFuncExec, evalCommandExec), builderFuncProcessFlag: b.withValueCache(builderFuncProcessFlag, evalProcessFlag), builderFuncJSON: b.withValueCache(builderFuncJSON, b.evalValueFromFile(jsonGetter)), builderFuncYAML: b.withValueCache(builderFuncYAML, b.evalValueFromFile(yamlGetter)), }, } return ev.Evaluate(instance) } func (b *builder) withValueCache(funcName string, fn eval.Function) eval.Function { return func(instance *eval.Instance, args ...interface{}) (interface{}, error) { var sargs []string for _, arg := range args { sargs = append(sargs, fmt.Sprintf("%v", arg)) } key := fmt.Sprintf("%s(%s)", funcName, strings.Join(sargs, ",")) if v, ok := b.valueCache.Get(key); ok { return v, nil } v, err := fn(instance, args...) if err == nil { b.valueCache.Set(key, v, cache.DefaultExpiration) } return v, err } } func evalCommandShell(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } command, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for command argument`) } var shellAndArgs []string if len(args) > 1 { for _, arg := range args[1:] { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string value for shell command and arguments`) } shellAndArgs = append(shellAndArgs, s) } } return valueFromShellCommand(command, shellAndArgs...) } func valueFromShellCommand(command string, shellAndArgs ...string) (interface{}, error) { log.Debugf("Resolving value from shell command: %s, args [%s]", command, strings.Join(shellAndArgs, ",")) shellCmd := &compliance.ShellCmd{ Run: command, } if len(shellAndArgs) > 0 { shellCmd.Shell = &compliance.BinaryCmd{ Name: shellAndArgs[0], Args: shellAndArgs[1:], } } execCommand := shellCmdToBinaryCmd(shellCmd) exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", command, err) } return stdout, nil } func evalCommandExec(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } var cmdArgs []string for _, arg := range args { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string values for arguments`) } cmdArgs = append(cmdArgs, s) } return valueFromBinaryCommand(cmdArgs[0], cmdArgs[1:]...) } func valueFromBinaryCommand(name string, args ...string) (interface{}, error) { log.Debugf("Resolving value from command: %s, args [%s]", name, strings.Join(args, ",")) execCommand := &compliance.BinaryCmd{ Name: name, Args: args, } exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", execCommand, err) } return stdout, nil } func evalProcessFlag(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, errors.New(`expecting two arguments`) } name, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process name argument`) } flag, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process flag argument`) } return valueFromProcessFlag(name, flag) } func valueFromProcessFlag(name string, flag string) (interface{}, error) { log.Debugf("Resolving value from process: %s, flag %s", name, flag) processes, err := getProcesses(cacheValidity) if err != nil { return "", fmt.Errorf("unable to fetch processes: %w", err) } matchedProcesses := processes.findProcessesByName(name) for _, mp := range matchedProcesses { flagValues := parseProcessCmdLine(mp.Cmdline) return flagValues[flag], nil } return "", fmt.Errorf("failed to find process: %s", name) } func (b *builder) evalValueFromFile(get getter) eval.Function { return func(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, fmt.Errorf(`invalid number of arguments, expecting 1 got %d`, len(args)) } path, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for path argument`) } path = b.NormalizeToHostRoot(path) query, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for query argument`) } return queryValueFromFile(path, query, get) } }
return func(b *builder) error {
random_line_split
builder.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. // Package checks implements Compliance Agent checks package checks import ( "errors" "fmt" "strings" "time" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/compliance/checks/env" "github.com/DataDog/datadog-agent/pkg/compliance/eval" "github.com/DataDog/datadog-agent/pkg/compliance/event" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" "github.com/DataDog/datadog-agent/pkg/util/log" cache "github.com/patrickmn/go-cache" ) // ErrResourceNotSupported is returned when resource type is not supported by Builder var ErrResourceNotSupported = errors.New("resource type not supported") // ErrRuleScopeNotSupported is returned when resource scope is not supported var ErrRuleScopeNotSupported = errors.New("rule scope not supported") // ErrRuleDoesNotApply is returned when a rule cannot be applied to the current environment var ErrRuleDoesNotApply = errors.New("rule does not apply to this environment") const ( builderFuncExec = "exec" builderFuncShell = "shell" builderFuncProcessFlag = "process.flag" builderFuncJSON = "json" builderFuncYAML = "yaml" ) // Builder defines an interface to build checks from rules type Builder interface { ChecksFromFile(file string, onCheck compliance.CheckVisitor) error GetCheckStatus() compliance.CheckStatusList Close() error } // BuilderOption defines a configuration option for the builder type BuilderOption func(*builder) error // WithInterval configures default check interval func WithInterval(interval time.Duration) BuilderOption { return func(b *builder) error { b.checkInterval = interval return nil } } // WithHostname configures hostname used by checks func WithHostname(hostname string) BuilderOption { return func(b *builder) error { b.hostname = hostname return nil } } // WithHostRootMount defines host root filesystem mount location func WithHostRootMount(hostRootMount string) BuilderOption { return func(b *builder) error { log.Infof("Host root filesystem will be remapped to %s", hostRootMount) b.pathMapper = &pathMapper{ hostMountPath: hostRootMount, } return nil } } // WithDocker configures using docker func WithDocker() BuilderOption { return func(b *builder) error { cli, err := newDockerClient() if err == nil { b.dockerClient = cli } return err } } // WithDockerClient configurs specific docker client func WithDockerClient(cli env.DockerClient) BuilderOption { return func(b *builder) error { b.dockerClient = cli return nil } } // WithAudit configures using audit checks func WithAudit() BuilderOption { return func(b *builder) error { cli, err := newAuditClient() if err == nil { b.auditClient = cli } return err } } // WithAuditClient configures using specific audit client func WithAuditClient(cli env.AuditClient) BuilderOption { return func(b *builder) error { b.auditClient = cli return nil } } // WithKubernetesClient allows specific Kubernetes client func WithKubernetesClient(cli env.KubeClient) BuilderOption { return func(b *builder) error { b.kubeClient = cli return nil } } // WithIsLeader allows check runner to know if its a leader instance or not (DCA) func WithIsLeader(isLeader func() bool) BuilderOption { return func(b *builder) error { b.isLeaderFunc = isLeader return nil } } // SuiteMatcher checks if a compliance suite is included type SuiteMatcher func(*compliance.SuiteMeta) bool // WithMatchSuite configures builder to use a suite matcher func WithMatchSuite(matcher SuiteMatcher) BuilderOption { return func(b *builder) error { b.suiteMatcher = matcher return nil } } // RuleMatcher checks if a compliance rule is included type RuleMatcher func(*compliance.Rule) bool // WithMatchRule configures builder to use a suite matcher func WithMatchRule(matcher RuleMatcher) BuilderOption { return func(b *builder) error { b.ruleMatcher = matcher return nil } } // MayFail configures a builder option to succeed on failures and logs an error func MayFail(o BuilderOption) BuilderOption { return func(b *builder) error { if err := o(b); err != nil { log.Warnf("Ignoring builder initialization failure: %v", err) } return nil } } // WithNodeLabels configures a builder to use specified Kubernetes node labels func WithNodeLabels(nodeLabels map[string]string) BuilderOption { return func(b *builder) error { b.nodeLabels = map[string]string{} for k, v := range nodeLabels { k, v := hostinfo.LabelPreprocessor(k, v) b.nodeLabels[k] = v } return nil } } // IsFramework matches a compliance suite by the name of the framework func IsFramework(framework string) SuiteMatcher { return func(s *compliance.SuiteMeta) bool { return s.Framework == framework } } // IsRuleID matches a compliance rule by ID func IsRuleID(ruleID string) RuleMatcher { return func(r *compliance.Rule) bool { return r.ID == ruleID } } // NewBuilder constructs a check builder func NewBuilder(reporter event.Reporter, options ...BuilderOption) (Builder, error) { b := &builder{ reporter: reporter, checkInterval: 20 * time.Minute, etcGroupPath: "/etc/group", status: newStatus(), } for _, o := range options { if err := o(b); err != nil { return nil, err } } b.valueCache = cache.New( b.checkInterval/2, b.checkInterval/4, ) return b, nil } type builder struct { checkInterval time.Duration reporter event.Reporter valueCache *cache.Cache hostname string pathMapper *pathMapper etcGroupPath string nodeLabels map[string]string suiteMatcher SuiteMatcher ruleMatcher RuleMatcher dockerClient env.DockerClient auditClient env.AuditClient kubeClient env.KubeClient isLeaderFunc func() bool status *status } func (b *builder) Close() error { if b.dockerClient != nil { if err := b.dockerClient.Close(); err != nil { return err } } if b.auditClient != nil { if err := b.auditClient.Close(); err != nil { return err } } return nil } func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error { suite, err := compliance.ParseSuite(file) if err != nil { return err } if b.suiteMatcher != nil { if b.suiteMatcher(&suite.Meta) { log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file) } else { log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file) return nil } } log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file) matchedCount := 0 for _, r := range suite.Rules { if b.ruleMatcher != nil { if b.ruleMatcher(&r) { log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) } else { log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) continue } } matchedCount++ if len(r.Resources) == 0 { log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID) continue } log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID) check, err := b.checkFromRule(&suite.Meta, &r) if err != nil { if err != ErrRuleDoesNotApply { log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err) } log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID) } if b.status != nil { b.status.addCheck(&compliance.CheckStatus{ RuleID: r.ID, Description: r.Description, Name: compliance.CheckName(r.ID, r.Description), Framework: suite.Meta.Framework, Source: suite.Meta.Source, Version: suite.Meta.Version, InitError: err, }) } ok := onCheck(&r, check, err) if !ok { log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version) return err } } if b.ruleMatcher != nil && matchedCount == 0 { log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version) } return nil } func (b *builder) GetCheckStatus() compliance.CheckStatusList { if b.status != nil { return b.status.getChecksStatus() } return compliance.CheckStatusList{} } func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) { ruleScope, err := getRuleScope(meta, rule) if err != nil { return nil, err } eligible, err := b.hostMatcher(ruleScope, rule) if err != nil { return nil, err } if !eligible { log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID) return nil, ErrRuleDoesNotApply } return b.newCheck(meta, ruleScope, rule) } func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) { switch { case rule.Scope.Includes(compliance.DockerScope): return compliance.DockerScope, nil case rule.Scope.Includes(compliance.KubernetesNodeScope): return compliance.KubernetesNodeScope, nil case rule.Scope.Includes(compliance.KubernetesClusterScope): return compliance.KubernetesClusterScope, nil default: return "", ErrRuleScopeNotSupported } } func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) { switch scope { case compliance.DockerScope: if b.dockerClient == nil { log.Infof("rule %s skipped - not running in a docker environment", rule.ID) return false, nil } case compliance.KubernetesClusterScope: if b.kubeClient == nil { log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID) return false, nil } case compliance.KubernetesNodeScope: if config.IsKubernetes() { return b.isKubernetesNodeEligible(rule.HostSelector) } log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID) return false, nil } return true, nil } func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) { if hostSelector == "" { return true, nil } expr, err := eval.ParseExpression(hostSelector) if err != nil { return false, err } nodeInstance := &eval.Instance{ Functions: eval.FunctionMap{ "node.hasLabel": b.nodeHasLabel, "node.label": b.nodeLabel, }, Vars: eval.VarMap{ "node.labels": b.nodeLabelKeys(), }, } result, err := expr.Evaluate(nodeInstance) if err != nil { return false, err } eligible, ok := result.(bool) if !ok { return false, fmt.Errorf("hostSelector %q does not evaluate to a boolean value", hostSelector) } return eligible, nil } func (b *builder) getNodeLabel(args ...interface{}) (string, bool, error) { if len(args) == 0 { return "", false, errors.New(`expecting one argument for label`) } label, ok := args[0].(string) if !ok { return "", false, fmt.Errorf(`expecting string value for label argument`) } if b.nodeLabels == nil { return "", false, nil } v, ok := b.nodeLabels[label] return v, ok, nil } func (b *builder) nodeHasLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { _, ok, err := b.getNodeLabel(args...) return ok, err } func (b *builder) nodeLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { v, _, err := b.getNodeLabel(args...) return v, err } func (b *builder) nodeLabelKeys() []string { var keys []string for k := range b.nodeLabels { keys = append(keys, k) } return keys } func (b *builder) newCheck(meta *compliance.SuiteMeta, ruleScope compliance.RuleScope, rule *compliance.Rule) (compliance.Check, error) { checkable, err := newResourceCheckList(b, rule.ID, rule.Resources) if err != nil { return nil, err } var notify eventNotify if b.status != nil { notify = b.status.updateCheck } // We capture err as configuration error but do not prevent check creation return &complianceCheck{ Env: b, ruleID: rule.ID, description: rule.Description, interval: b.checkInterval, suiteMeta: meta, // For now we are using rule scope (e.g. docker, kubernetesNode) as resource type resourceType: string(ruleScope), resourceID: b.hostname, checkable: checkable, eventNotify: notify, }, nil } func (b *builder) Reporter() event.Reporter { return b.reporter } func (b *builder) DockerClient() env.DockerClient { return b.dockerClient } func (b *builder) AuditClient() env.AuditClient { return b.auditClient } func (b *builder) KubeClient() env.KubeClient { return b.kubeClient } func (b *builder) Hostname() string { return b.hostname } func (b *builder) EtcGroupPath() string { return b.etcGroupPath } func (b *builder) NormalizeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.normalizeToHostRoot(path) } func (b *builder) RelativeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.relativeToHostRoot(path) } func (b *builder) IsLeader() bool
func (b *builder) EvaluateFromCache(ev eval.Evaluatable) (interface{}, error) { instance := &eval.Instance{ Functions: eval.FunctionMap{ builderFuncShell: b.withValueCache(builderFuncShell, evalCommandShell), builderFuncExec: b.withValueCache(builderFuncExec, evalCommandExec), builderFuncProcessFlag: b.withValueCache(builderFuncProcessFlag, evalProcessFlag), builderFuncJSON: b.withValueCache(builderFuncJSON, b.evalValueFromFile(jsonGetter)), builderFuncYAML: b.withValueCache(builderFuncYAML, b.evalValueFromFile(yamlGetter)), }, } return ev.Evaluate(instance) } func (b *builder) withValueCache(funcName string, fn eval.Function) eval.Function { return func(instance *eval.Instance, args ...interface{}) (interface{}, error) { var sargs []string for _, arg := range args { sargs = append(sargs, fmt.Sprintf("%v", arg)) } key := fmt.Sprintf("%s(%s)", funcName, strings.Join(sargs, ",")) if v, ok := b.valueCache.Get(key); ok { return v, nil } v, err := fn(instance, args...) if err == nil { b.valueCache.Set(key, v, cache.DefaultExpiration) } return v, err } } func evalCommandShell(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } command, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for command argument`) } var shellAndArgs []string if len(args) > 1 { for _, arg := range args[1:] { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string value for shell command and arguments`) } shellAndArgs = append(shellAndArgs, s) } } return valueFromShellCommand(command, shellAndArgs...) } func valueFromShellCommand(command string, shellAndArgs ...string) (interface{}, error) { log.Debugf("Resolving value from shell command: %s, args [%s]", command, strings.Join(shellAndArgs, ",")) shellCmd := &compliance.ShellCmd{ Run: command, } if len(shellAndArgs) > 0 { shellCmd.Shell = &compliance.BinaryCmd{ Name: shellAndArgs[0], Args: shellAndArgs[1:], } } execCommand := shellCmdToBinaryCmd(shellCmd) exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", command, err) } return stdout, nil } func evalCommandExec(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } var cmdArgs []string for _, arg := range args { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string values for arguments`) } cmdArgs = append(cmdArgs, s) } return valueFromBinaryCommand(cmdArgs[0], cmdArgs[1:]...) } func valueFromBinaryCommand(name string, args ...string) (interface{}, error) { log.Debugf("Resolving value from command: %s, args [%s]", name, strings.Join(args, ",")) execCommand := &compliance.BinaryCmd{ Name: name, Args: args, } exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", execCommand, err) } return stdout, nil } func evalProcessFlag(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, errors.New(`expecting two arguments`) } name, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process name argument`) } flag, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process flag argument`) } return valueFromProcessFlag(name, flag) } func valueFromProcessFlag(name string, flag string) (interface{}, error) { log.Debugf("Resolving value from process: %s, flag %s", name, flag) processes, err := getProcesses(cacheValidity) if err != nil { return "", fmt.Errorf("unable to fetch processes: %w", err) } matchedProcesses := processes.findProcessesByName(name) for _, mp := range matchedProcesses { flagValues := parseProcessCmdLine(mp.Cmdline) return flagValues[flag], nil } return "", fmt.Errorf("failed to find process: %s", name) } func (b *builder) evalValueFromFile(get getter) eval.Function { return func(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, fmt.Errorf(`invalid number of arguments, expecting 1 got %d`, len(args)) } path, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for path argument`) } path = b.NormalizeToHostRoot(path) query, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for query argument`) } return queryValueFromFile(path, query, get) } }
{ if b.isLeaderFunc != nil { return b.isLeaderFunc() } return true }
identifier_body
builder.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2020 Datadog, Inc. // Package checks implements Compliance Agent checks package checks import ( "errors" "fmt" "strings" "time" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/compliance/checks/env" "github.com/DataDog/datadog-agent/pkg/compliance/eval" "github.com/DataDog/datadog-agent/pkg/compliance/event" "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" "github.com/DataDog/datadog-agent/pkg/util/log" cache "github.com/patrickmn/go-cache" ) // ErrResourceNotSupported is returned when resource type is not supported by Builder var ErrResourceNotSupported = errors.New("resource type not supported") // ErrRuleScopeNotSupported is returned when resource scope is not supported var ErrRuleScopeNotSupported = errors.New("rule scope not supported") // ErrRuleDoesNotApply is returned when a rule cannot be applied to the current environment var ErrRuleDoesNotApply = errors.New("rule does not apply to this environment") const ( builderFuncExec = "exec" builderFuncShell = "shell" builderFuncProcessFlag = "process.flag" builderFuncJSON = "json" builderFuncYAML = "yaml" ) // Builder defines an interface to build checks from rules type Builder interface { ChecksFromFile(file string, onCheck compliance.CheckVisitor) error GetCheckStatus() compliance.CheckStatusList Close() error } // BuilderOption defines a configuration option for the builder type BuilderOption func(*builder) error // WithInterval configures default check interval func WithInterval(interval time.Duration) BuilderOption { return func(b *builder) error { b.checkInterval = interval return nil } } // WithHostname configures hostname used by checks func WithHostname(hostname string) BuilderOption { return func(b *builder) error { b.hostname = hostname return nil } } // WithHostRootMount defines host root filesystem mount location func WithHostRootMount(hostRootMount string) BuilderOption { return func(b *builder) error { log.Infof("Host root filesystem will be remapped to %s", hostRootMount) b.pathMapper = &pathMapper{ hostMountPath: hostRootMount, } return nil } } // WithDocker configures using docker func WithDocker() BuilderOption { return func(b *builder) error { cli, err := newDockerClient() if err == nil { b.dockerClient = cli } return err } } // WithDockerClient configurs specific docker client func WithDockerClient(cli env.DockerClient) BuilderOption { return func(b *builder) error { b.dockerClient = cli return nil } } // WithAudit configures using audit checks func WithAudit() BuilderOption { return func(b *builder) error { cli, err := newAuditClient() if err == nil { b.auditClient = cli } return err } } // WithAuditClient configures using specific audit client func WithAuditClient(cli env.AuditClient) BuilderOption { return func(b *builder) error { b.auditClient = cli return nil } } // WithKubernetesClient allows specific Kubernetes client func WithKubernetesClient(cli env.KubeClient) BuilderOption { return func(b *builder) error { b.kubeClient = cli return nil } } // WithIsLeader allows check runner to know if its a leader instance or not (DCA) func WithIsLeader(isLeader func() bool) BuilderOption { return func(b *builder) error { b.isLeaderFunc = isLeader return nil } } // SuiteMatcher checks if a compliance suite is included type SuiteMatcher func(*compliance.SuiteMeta) bool // WithMatchSuite configures builder to use a suite matcher func WithMatchSuite(matcher SuiteMatcher) BuilderOption { return func(b *builder) error { b.suiteMatcher = matcher return nil } } // RuleMatcher checks if a compliance rule is included type RuleMatcher func(*compliance.Rule) bool // WithMatchRule configures builder to use a suite matcher func WithMatchRule(matcher RuleMatcher) BuilderOption { return func(b *builder) error { b.ruleMatcher = matcher return nil } } // MayFail configures a builder option to succeed on failures and logs an error func MayFail(o BuilderOption) BuilderOption { return func(b *builder) error { if err := o(b); err != nil { log.Warnf("Ignoring builder initialization failure: %v", err) } return nil } } // WithNodeLabels configures a builder to use specified Kubernetes node labels func WithNodeLabels(nodeLabels map[string]string) BuilderOption { return func(b *builder) error { b.nodeLabels = map[string]string{} for k, v := range nodeLabels { k, v := hostinfo.LabelPreprocessor(k, v) b.nodeLabels[k] = v } return nil } } // IsFramework matches a compliance suite by the name of the framework func IsFramework(framework string) SuiteMatcher { return func(s *compliance.SuiteMeta) bool { return s.Framework == framework } } // IsRuleID matches a compliance rule by ID func IsRuleID(ruleID string) RuleMatcher { return func(r *compliance.Rule) bool { return r.ID == ruleID } } // NewBuilder constructs a check builder func NewBuilder(reporter event.Reporter, options ...BuilderOption) (Builder, error) { b := &builder{ reporter: reporter, checkInterval: 20 * time.Minute, etcGroupPath: "/etc/group", status: newStatus(), } for _, o := range options { if err := o(b); err != nil { return nil, err } } b.valueCache = cache.New( b.checkInterval/2, b.checkInterval/4, ) return b, nil } type builder struct { checkInterval time.Duration reporter event.Reporter valueCache *cache.Cache hostname string pathMapper *pathMapper etcGroupPath string nodeLabels map[string]string suiteMatcher SuiteMatcher ruleMatcher RuleMatcher dockerClient env.DockerClient auditClient env.AuditClient kubeClient env.KubeClient isLeaderFunc func() bool status *status } func (b *builder) Close() error { if b.dockerClient != nil { if err := b.dockerClient.Close(); err != nil { return err } } if b.auditClient != nil { if err := b.auditClient.Close(); err != nil { return err } } return nil } func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error { suite, err := compliance.ParseSuite(file) if err != nil { return err } if b.suiteMatcher != nil { if b.suiteMatcher(&suite.Meta) { log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file) } else { log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file) return nil } } log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file) matchedCount := 0 for _, r := range suite.Rules { if b.ruleMatcher != nil { if b.ruleMatcher(&r) { log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) } else { log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file) continue } } matchedCount++ if len(r.Resources) == 0 { log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID) continue } log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID) check, err := b.checkFromRule(&suite.Meta, &r) if err != nil { if err != ErrRuleDoesNotApply { log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err) } log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID) } if b.status != nil { b.status.addCheck(&compliance.CheckStatus{ RuleID: r.ID, Description: r.Description, Name: compliance.CheckName(r.ID, r.Description), Framework: suite.Meta.Framework, Source: suite.Meta.Source, Version: suite.Meta.Version, InitError: err, }) } ok := onCheck(&r, check, err) if !ok { log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version) return err } } if b.ruleMatcher != nil && matchedCount == 0 { log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version) } return nil } func (b *builder) GetCheckStatus() compliance.CheckStatusList { if b.status != nil { return b.status.getChecksStatus() } return compliance.CheckStatusList{} } func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) { ruleScope, err := getRuleScope(meta, rule) if err != nil { return nil, err } eligible, err := b.hostMatcher(ruleScope, rule) if err != nil { return nil, err } if !eligible { log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID) return nil, ErrRuleDoesNotApply } return b.newCheck(meta, ruleScope, rule) } func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) { switch { case rule.Scope.Includes(compliance.DockerScope): return compliance.DockerScope, nil case rule.Scope.Includes(compliance.KubernetesNodeScope): return compliance.KubernetesNodeScope, nil case rule.Scope.Includes(compliance.KubernetesClusterScope): return compliance.KubernetesClusterScope, nil default: return "", ErrRuleScopeNotSupported } } func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) { switch scope { case compliance.DockerScope: if b.dockerClient == nil { log.Infof("rule %s skipped - not running in a docker environment", rule.ID) return false, nil } case compliance.KubernetesClusterScope: if b.kubeClient == nil { log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID) return false, nil } case compliance.KubernetesNodeScope: if config.IsKubernetes() { return b.isKubernetesNodeEligible(rule.HostSelector) } log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID) return false, nil } return true, nil } func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) { if hostSelector == "" { return true, nil } expr, err := eval.ParseExpression(hostSelector) if err != nil { return false, err } nodeInstance := &eval.Instance{ Functions: eval.FunctionMap{ "node.hasLabel": b.nodeHasLabel, "node.label": b.nodeLabel, }, Vars: eval.VarMap{ "node.labels": b.nodeLabelKeys(), }, } result, err := expr.Evaluate(nodeInstance) if err != nil { return false, err } eligible, ok := result.(bool) if !ok { return false, fmt.Errorf("hostSelector %q does not evaluate to a boolean value", hostSelector) } return eligible, nil } func (b *builder) getNodeLabel(args ...interface{}) (string, bool, error) { if len(args) == 0 { return "", false, errors.New(`expecting one argument for label`) } label, ok := args[0].(string) if !ok { return "", false, fmt.Errorf(`expecting string value for label argument`) } if b.nodeLabels == nil { return "", false, nil } v, ok := b.nodeLabels[label] return v, ok, nil } func (b *builder) nodeHasLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { _, ok, err := b.getNodeLabel(args...) return ok, err } func (b *builder) nodeLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) { v, _, err := b.getNodeLabel(args...) return v, err } func (b *builder) nodeLabelKeys() []string { var keys []string for k := range b.nodeLabels { keys = append(keys, k) } return keys } func (b *builder) newCheck(meta *compliance.SuiteMeta, ruleScope compliance.RuleScope, rule *compliance.Rule) (compliance.Check, error) { checkable, err := newResourceCheckList(b, rule.ID, rule.Resources) if err != nil { return nil, err } var notify eventNotify if b.status != nil { notify = b.status.updateCheck } // We capture err as configuration error but do not prevent check creation return &complianceCheck{ Env: b, ruleID: rule.ID, description: rule.Description, interval: b.checkInterval, suiteMeta: meta, // For now we are using rule scope (e.g. docker, kubernetesNode) as resource type resourceType: string(ruleScope), resourceID: b.hostname, checkable: checkable, eventNotify: notify, }, nil } func (b *builder) Reporter() event.Reporter { return b.reporter } func (b *builder) DockerClient() env.DockerClient { return b.dockerClient } func (b *builder) AuditClient() env.AuditClient { return b.auditClient } func (b *builder) KubeClient() env.KubeClient { return b.kubeClient } func (b *builder) Hostname() string { return b.hostname } func (b *builder) EtcGroupPath() string { return b.etcGroupPath } func (b *builder) NormalizeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.normalizeToHostRoot(path) } func (b *builder) RelativeToHostRoot(path string) string { if b.pathMapper == nil { return path } return b.pathMapper.relativeToHostRoot(path) } func (b *builder) IsLeader() bool { if b.isLeaderFunc != nil { return b.isLeaderFunc() } return true } func (b *builder) EvaluateFromCache(ev eval.Evaluatable) (interface{}, error) { instance := &eval.Instance{ Functions: eval.FunctionMap{ builderFuncShell: b.withValueCache(builderFuncShell, evalCommandShell), builderFuncExec: b.withValueCache(builderFuncExec, evalCommandExec), builderFuncProcessFlag: b.withValueCache(builderFuncProcessFlag, evalProcessFlag), builderFuncJSON: b.withValueCache(builderFuncJSON, b.evalValueFromFile(jsonGetter)), builderFuncYAML: b.withValueCache(builderFuncYAML, b.evalValueFromFile(yamlGetter)), }, } return ev.Evaluate(instance) } func (b *builder) withValueCache(funcName string, fn eval.Function) eval.Function { return func(instance *eval.Instance, args ...interface{}) (interface{}, error) { var sargs []string for _, arg := range args { sargs = append(sargs, fmt.Sprintf("%v", arg)) } key := fmt.Sprintf("%s(%s)", funcName, strings.Join(sargs, ",")) if v, ok := b.valueCache.Get(key); ok { return v, nil } v, err := fn(instance, args...) if err == nil { b.valueCache.Set(key, v, cache.DefaultExpiration) } return v, err } } func evalCommandShell(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } command, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for command argument`) } var shellAndArgs []string if len(args) > 1 { for _, arg := range args[1:] { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string value for shell command and arguments`) } shellAndArgs = append(shellAndArgs, s) } } return valueFromShellCommand(command, shellAndArgs...) } func valueFromShellCommand(command string, shellAndArgs ...string) (interface{}, error) { log.Debugf("Resolving value from shell command: %s, args [%s]", command, strings.Join(shellAndArgs, ",")) shellCmd := &compliance.ShellCmd{ Run: command, } if len(shellAndArgs) > 0 { shellCmd.Shell = &compliance.BinaryCmd{ Name: shellAndArgs[0], Args: shellAndArgs[1:], } } execCommand := shellCmdToBinaryCmd(shellCmd) exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", command, err) } return stdout, nil } func evalCommandExec(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) == 0 { return nil, errors.New(`expecting at least one argument`) } var cmdArgs []string for _, arg := range args { s, ok := arg.(string) if !ok { return nil, fmt.Errorf(`expecting only string values for arguments`) } cmdArgs = append(cmdArgs, s) } return valueFromBinaryCommand(cmdArgs[0], cmdArgs[1:]...) } func valueFromBinaryCommand(name string, args ...string) (interface{}, error) { log.Debugf("Resolving value from command: %s, args [%s]", name, strings.Join(args, ",")) execCommand := &compliance.BinaryCmd{ Name: name, Args: args, } exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout) if exitCode != 0 || err != nil { return nil, fmt.Errorf("command '%v' execution failed, error: %v", execCommand, err) } return stdout, nil } func evalProcessFlag(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, errors.New(`expecting two arguments`) } name, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process name argument`) } flag, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for process flag argument`) } return valueFromProcessFlag(name, flag) } func valueFromProcessFlag(name string, flag string) (interface{}, error) { log.Debugf("Resolving value from process: %s, flag %s", name, flag) processes, err := getProcesses(cacheValidity) if err != nil { return "", fmt.Errorf("unable to fetch processes: %w", err) } matchedProcesses := processes.findProcessesByName(name) for _, mp := range matchedProcesses
return "", fmt.Errorf("failed to find process: %s", name) } func (b *builder) evalValueFromFile(get getter) eval.Function { return func(_ *eval.Instance, args ...interface{}) (interface{}, error) { if len(args) != 2 { return nil, fmt.Errorf(`invalid number of arguments, expecting 1 got %d`, len(args)) } path, ok := args[0].(string) if !ok { return nil, fmt.Errorf(`expecting string value for path argument`) } path = b.NormalizeToHostRoot(path) query, ok := args[1].(string) if !ok { return nil, fmt.Errorf(`expecting string value for query argument`) } return queryValueFromFile(path, query, get) } }
{ flagValues := parseProcessCmdLine(mp.Cmdline) return flagValues[flag], nil }
conditional_block
base_function.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2018/12/14 15:11 # @Author : zsj # @File : base_function.py import os import pickle import re import time from datetime import datetime import numpy as np import matplotlib.pyplot as plt from django_redis import get_redis_connection from db.mysql_operation import connectdb, query_datas, closedb, query_table, create_table, insert_train_datas, \ update_datas, query_model_info, insert_file2uuid, query_uuid_from_file2uuid_by_filename from isolate_model.isolate_class import Isolate def load_csv(file_name): """ 使用numpy加载csv文件,并把除了host_id的都转换成float类型,因为孤立森林只能判别数值类型 csv文件的格式是: host_id(主机群和主机标识), timestamp, kpi_1, kpi_2, kpi_3, kpi_4..... :param file_name: 要解析的csv文件名 :return: """ array = np.loadtxt(file_name, dtype = str, delimiter = ",") # array = np.loadtxt(file_name, dtype = str, delimiter = ",", encoding = 'utf-8') print(type(array)) return array def show_csv(array, array_x, array_y): """ 将读取的csv文件中某两列取出来作为图形展示的x轴和y轴 :param array:转置后的数组 :param array_x:数组第x列,一般来说x轴是时间 :param array_y:同上 :return: """ # 从第三个值开始取,因为第一个是host_id,第二个是时间戳 x_value = array[1:, array_x] y_value = array[1:, array_y] # 获取label标签,知道是那两行作图 label_x = array[0, array_x] label_y = array[0, array_y] if "timestamp" in label_x: # 一般来说x轴都是时间戳 # x_value = [format_time(x) for x in x_value] x_value = [x for x in x_value] else: x_value = [float(x) for x in x_value] y_value = [float(y) for y in y_value] plt.plot(x_value, y_value, c = 'r', ls = '--', marker = 'o', lw = 1.5, label = label_x) plt.xticks(range(0, len(x_value), int(len(x_value) / 30)), rotation = 90) # plt.figure(dpi=128, figsize=(10, 6)) plt.xlabel(label_x) plt.ylabel(label_y) plt.show() def timestamp_to_time(timestamp): """ 单个时间戳转换成时间,格式为2018-12-14 19:00:00 :param timestamp: :return: """ timestamp = int(timestamp) time_local = time.localtime(timestamp) return time.strftime("%Y-%m-%d: %H:%M:%S", time_local) def simplify_timestamp(timestamps): """ 时间戳批量转换成时间 :param timestamps: 时间戳list :return: """ return [timestamp_to_time(timestamp) for timestamp in timestamps] def get_uniform_cases(arrays, size=257): """ 由于传入的测试集不可能刚好是256个,所以需要均匀取周期内的256个case作为测试集 仅仅用于训练模型时使用 :param arrays:测试集数组 :param size:int, 要求均匀分为的份额,一般为256,用户可以自己设置,第一行为标签 :return: """ length = len(arrays) if length < 200: print("测试集大小:", length) return "测试集数据小于200,请重新传入大于200条数据的测试集" elif length < 256: print("测试集大小:", length) return arrays indexs = np.linspace(0, length - 1, size) indexs = np.array(indexs, dtype = int) res_arr = arrays[indexs] print("测试集大小:", len(indexs) - 1) return res_arr def format_time(time): """ 将传入的时间格式化,转换成没有秒的时间格式 yyyy-MM-DD hh-mm :param time: :return: """ # year, month, day, hour, minute, scend = re.split(r"/| |:", time) # print(year, month, day, hour, minute, scend) return time[0:-3] def draw_with_diff_color(np_array): """ 根据标签展示散点图,不同的标签具有不同颜色 :param np_array: :return: """ red_arr = [] green_arr = [] for arr in np_array: if arr[-1] == '0': red_arr.append(arr) else: green_arr.append(arr) print(red_arr) print(green_arr) def save_datas_with_labels(file_name, abnormal_rate): """ 存储已经由孤立森林学习过的带有标签的数据 :return:True or False """ cases = load_csv(file_name) # file_name是文件路径名 print("file name", file_name) # 文件名 title = file_name.split("/")[-1] print(type(title), title) isolate1 = Isolate('isolate', cases, rate = abnormal_rate) np_array = isolate1.merge_arrays() table_name = np_array[1, 0] db = connectdb() if not query_table(db, table_name): create_table(db, np_array[0], table_name) # 插入数据,表名为uuid if insert_train_datas(db, table_name, np_array[1:]): # 数据集列表存储表名(redis存储),断电就清空 redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', title) # sv.data_set.append(title) # 存储数据集表名(磁盘存储),断电可恢复 save_dataset_name_to_file(title) # 存储文件与UUID对应关系到file2uuid表中 insert_file2uuid(title, table_name) return True return False def save_dataset_name_to_file(file_name): """ 将文件名存储到磁盘中,断电重启时能够保证继续运行 :param file_name: 文件名称 :return: """ print(os.getcwd()) file_path = "./models_file/data_set_name" with open(file_path, 'a+') as file: file.write(file_name + "\n") def load_dataset_name_to_list(): """ 加载磁盘文件中数据集名到缓存中,data_set :return: """ file_path = "./models_file/data_set_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', line) def load_xgboost_name_to_list(): """ 加载磁盘文件中XGBoost模型名到缓存中,xgboost_name :return: """ file_path = "./models_file/xgboost_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('xgboost_name', line) def load_lstm_name_to_list(): """ 加载磁盘文件中LSTM模型名到缓存中,lstm_name :return: """ file_path = "./models_file/lstm_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('lstm_name', line) # def load_lstm_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/lstm_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.lstm_model_dict.keys(): # sv.lstm_model_dict[line] = load_lstm_class(line) # print("lstm---------------------", sv.lstm_model_dict) # # # def load_xgboost_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/xgboost_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.xgboost_model_dict.keys(): # sv.xgboost_model_dict[line] = load_xgboost_class(line) # print("xgboost-------------------", sv.xgboost_model_dict) def load_datas_from_disk_to_memory(): load_dataset_name_to_list() load_xgboost_name_to_list() load_lstm_name_to_list() def str_to_time_hour_minute(time): week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday() year, month, day, hour, minute, secend = re.split(r"[/ :]", time) return [hour, minute, week] def use_XGBoost_predict(json_data): """ 使用已训练的XGBoost模型检测异常 :param json_data: :return: """ model_name = json_data["host_id"] times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S') print(times.hour) print(type(times)) predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]] print(predict_list) predict_array = np.array(predict_list) # 转换成XGBoost能使用的数据格式 tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3)) # 由于频次较低,每次从磁盘文件中读取模型然后判断 XGBoost_model = load_xgboost_class(model_name) print("load xgboost0000000000000000000000") print("model name", XGBoost_model.name) return XGBoost_model.predict(tmp) def translate_to_xgboost_datas_from_realtime(): pass def translate_to_xgboost_datas_from_mysql(np_array): """ 将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变 :param np_array:输入的数组 :return:时间转换后的数组,仅仅在时间上做出改变,其他列不变 """ # 删除id列 np_array = np.delete(np_array, 0, axis = 1) # 获取时间列 time_array = np_array[:, 0] # 删除时间列 np_array = np.delete(np_array, 0, axis = 1) hour = [] minute = [] week = [] for time in time_array: hour.append(time.hour) minute.append(time.minute) week.append(time.weekday()) np_array = np.insert(np_array, 0, values = minute, axis = 1) np_array = np.insert(np_array, 0, values = hour, axis = 1) np_array = np.insert(np_array, 0, values = week, axis = 1) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_xgboost_from_mysql(table_name, number_data=20000): """ 从数据库为xgboost模型读取数据,并进行时间格式转换 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 要读取的表名 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) # # 删除id列 # np_array = np.delete(np_array, 0, axis = 1) # # 获取时间列 # time_array = np_array[:, 0] # # 删除时间列 # np_array = np.delete(np_array, 0, axis = 1) # hour = [] # minute = [] # week = [] # for time in time_array: # hour.append(time.hour) # minute.append(time.minute) # week.append(time.weekday()) # np_array = np.insert(np_array, 0, values = minute, axis = 1) # np_array = np.insert(np_array, 0, values = hour, axis = 1) # np_array = np.insert(np_array, 0, values = week, axis = 1) np_array = translate_to_xgboost_datas_from_mysql(np_array) closedb(db) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_lstm_from_mysql(table_name, number_data=20000): """ 从数据库为lstm模型读取一天的数据 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 表名 :param end_time: 最后截止时间,即什么时刻开始预测 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) closedb(db) return np_array[:, -2]
:param model: :return: """ # 存储模型 print("sava_xgboost_path", os.getcwd()) file_name = "./models_file/xgboost/%s" % model.name with open(file_name, 'wb') as file_obj: pickle.dump(model, file_obj) # 存储名称 file_model_name = "./models_file/xgboost_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(model.file_name + "\n") def load_xgboost_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ file_name = "./models_file/xgboost/%s" % model_name # return pickle.load(open(file_name, "rb")) print(os.getcwd()) print(file_name) with open(file_name, 'rb') as f: xgboost_class = pickle.load(f) return xgboost_class def save_lstm_class(LSTM_model): """ lstm 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 file_name = "./models_file/lstm/%s" % LSTM_model.name print("save lstm path", file_name) with open(file_name, 'wb') as file_obj: pickle.dump(LSTM_model, file_obj) # 存储名称 file_model_name = "./models_file/lstm_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(LSTM_model.file_name + "\n") def load_lstm_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ print(os.getcwd()) file_name = "./models_file/lstm/%s" % model_name print(file_name) with open(file_name, 'rb') as f: lstm_class = pickle.load(f) return lstm_class def print_model(model_kind, data_name): print("print_model", model_kind, data_name) def train_model(model_kind, data_name, force = 0): """训练模型""" redis_conn = get_redis_connection("default") print("类型", type(data_name)) print(data_name) uuid = query_uuid_from_file2uuid_by_filename(data_name) print(type(uuid), uuid) if model_kind == "XGBoost": # 多进程训练模型 if redis_conn.sismember("xgboost_name", data_name) and force != 1: return 0 else: from xgboost_model.xgboost_class import XGBoost xgboost_train = XGBoost(data_name, uuid) # 存储到redis中 redis_conn.hset('xgboost_model', data_name, pickle.dumps(xgboost_train)) redis_conn.sadd('xgboost_name', data_name) # 模型持久化 save_xgboost_class(xgboost_train) print("xgboost_name", data_name) return 1 elif model_kind == 'LSTM': # 多进程训练模型 if redis_conn.sismember("lstm_name", data_name) and force != 1: print("存在000000000", data_name) return 0 else: print("训练过程0000000") print("类型", type(data_name)) from lstm_model.lstm_class import LSTMModel # data_name是文件名,uuid是文件唯一标识 lstm_train = LSTMModel(data_name, uuid) print("lasted", lstm_train.lasted_update) # 存储到redis中 redis_conn.hset('lstm_model', data_name, pickle.dumps(lstm_train)) redis_conn.sadd('lstm_name', data_name) # 模型持久化 save_lstm_class(lstm_train) print("xgboost_name", data_name) return 1 def get_datas_for_tag(table_name, start_time=0, end_time=0, label=(0, 1)): """ 按条件查询数据库表信息 :param table_name: :param start_time: :param end_time: :param label: :return: """ if len(label) > 1: label = (0, 1) else: label = int(label) uuid = query_uuid_from_file2uuid_by_filename(table_name) result = query_datas(connectdb(), table_name = uuid, label = label, start_time = start_time, end_time = end_time) return result def update_datas_for_tag(table_name, label, start_time=0, end_time=0): """ :param table_name: :param start_time: :param end_time: :param label: :return: """ uuid = query_uuid_from_file2uuid_by_filename(table_name) if update_datas(connectdb(), table_name = uuid, label = int(label), start_time = start_time, end_time = end_time): return get_datas_for_tag(table_name = table_name, start_time = start_time, end_time = end_time, label = label) def predict_future_30(table_name): """ 首先从redis中获取模型,如果没有则从磁盘中获取,并将模型存储至redis中 :param table_name:lstm模型名, :return: """ # redis连接池 uuid = query_uuid_from_file2uuid_by_filename(table_name) redis_conn = get_redis_connection("default") # 如果模型不存在于redis的lstm_model hash中就加载,如果在就直接从redis中获取模型进行判断 if not redis_conn.hexists('lstm_model', table_name): # 从磁盘中加载LSTM模型对象 lstm_model_tmp = load_lstm_class(uuid) # 初始化时就预测,避免因为graph冲突 lstm_model_tmp.model.predict(np.zeros((1, 1, 50))) # 将模型加到redis中 redis_conn.hset('lstm_model', uuid, pickle.dumps(lstm_model_tmp)) model_tmp = lstm_model_tmp else: # 从redis中获取模型对象 model_bytes = redis_conn.hget('lstm_model', table_name) # 解析为对象 model_tmp = pickle.loads(model_bytes) model_tmp.model.predict(np.zeros((1, 1, 50))) # 预测值 res = model_tmp.predict_values() # 设置横轴 predict_xAxis = list(range(1, len(res) + 1)) return predict_xAxis, res def get_model_info(kind): res = query_model_info(kind) print(res) print(len(res)) print(type(res))
def save_xgboost_class(model): """ xgboost 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称
random_line_split
base_function.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2018/12/14 15:11 # @Author : zsj # @File : base_function.py import os import pickle import re import time from datetime import datetime import numpy as np import matplotlib.pyplot as plt from django_redis import get_redis_connection from db.mysql_operation import connectdb, query_datas, closedb, query_table, create_table, insert_train_datas, \ update_datas, query_model_info, insert_file2uuid, query_uuid_from_file2uuid_by_filename from isolate_model.isolate_class import Isolate def load_csv(file_name): """ 使用numpy加载csv文件,并把除了host_id的都转换成float类型,因为孤立森林只能判别数值类型 csv文件的格式是: host_id(主机群和主机标识), timestamp, kpi_1, kpi_2, kpi_3, kpi_4..... :param file_name: 要解析的csv文件名 :return: """ array = np.loadtxt(file_name, dtype = str, delimiter = ",") # array = np.loadtxt(file_name, dtype = str, delimiter = ",", encoding = 'utf-8') print(type(array)) return array def show_csv(array, array_x, array_y): """ 将读取的csv文件中某两列取出来作为图形展示的x轴和y轴 :param array:转置后的数组 :param array_x:数组第x列,一般来说x轴是时间 :param array_y:同上 :return: """ # 从第三个值开始取,因为第一个是host_id,第二个是时间戳 x_value = array[1:, array_x] y_value = array[1:, array_y] # 获取label标签,知道是那两行作图 label_x = array[0, array_x] label_y = array[0, array_y] if "timestamp" in label_x: # 一般来说x轴都是时间戳 # x_value = [format_time(x) for x in x_value] x_value = [x for x in x_value] else: x_value = [float(x) for x in x_value] y_value = [float(y) for y in y_value] plt.plot(x_value, y_value, c = 'r', ls = '--', marker = 'o', lw = 1.5, label = label_x) plt.xticks(range(0, len(x_value), int(len(x_value) / 30)), rotation = 90) # plt.figure(dpi=128, figsize=(10, 6)) plt.xlabel(label_x) plt.ylabel(label_y) plt.show() def timestamp_to_time(timestamp): """ 单个时间戳转换成时间,格式为2018-12-14 19:00:00 :param timestamp: :return: """ timestamp = int(timestamp) time_local = time.localtime(timestamp) return time.strftime("%Y-%m-%d: %H:%M:%S", time_local) def simplify_timestamp(timestamps): """ 时间戳批量转换成时间 :param timestamps: 时间戳list :return: """ return [timestamp_to_time(timestamp) for timestamp in timestamps] def get_uniform_cases(arrays, size=257): """ 由于传入的测试集不可能刚好是256个,所以需要均匀取周期内的256个case作为测试集 仅仅用于训练模型时使用 :param arrays:测试集数组 :param size:int, 要求均匀分为的份额,一般为256,用户可以自己设置,第一行为标签 :return: """ length = len(arrays) if length < 200: print("测试集大小:", length) return "测试集数据小于200,请重新传入大于200条数据的测试集" elif length < 256: print("测试集大小:", length) return arrays indexs = np.linspace(0, length - 1, size) indexs = np.array(indexs, dtype = int) res_arr = arrays[indexs] print("测试集大小:", len(indexs) - 1) return res_arr def format_time(time): """ 将传入的时间格式化,转换成没有秒的时间格式 yyyy-MM-DD hh-mm :param time: :return: """ # year, month, day, hour, minute, scend = re.split(r"/| |:", time) # print(year, month, day, hour, minute, scend) return time[0:-3] def draw_with_diff_color(np_array): """ 根据标签展示散点图,不同的标签具有不同颜色 :param np_array: :return: """ red_arr = [] green_arr = [] for arr in np_array: if arr[-1] == '0': red_arr.append(arr) else: green_arr.append(arr) print(red_arr) print(green_arr) def save_datas_with_labels(file_name, abnormal_rate): """ 存储已经由孤立森林学习过的带有标签的数据 :return:True or False """ cases = load_csv(file_name) # file_name是文件路径名 print("file name", file_name) # 文件名 title = file_name.split("/")[-1] print(type(title), title) isolate1 = Isolate('isolate', cases, rate = abnormal_rate) np_array = isolate1.merge_arrays() table_name = np_array[1, 0] db = connectdb() if not query_table(db, table_name): create_table(db, np_array[0], table_name) # 插入数据,表名为uuid if insert_train_datas(db, table_name, np_array[1:]): # 数据集列表存储表名(redis存储),断电就清空 redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', title) # sv.data_set.append(title) # 存储数据集表名(磁盘存储),断电可恢复 save_dataset_name_to_file(title) # 存储文件与UUID对应关系到file2uuid表中 insert_file2uuid(title, table_name) return True return False def save_dataset_name_to_file(file_name): """ 将文件名存储到磁盘中,断电重启时能够保证继续运行 :param file_name: 文件名称 :return: """ print(os.getcwd()) file_path = "./models_file/data_set_name" with open(file_path, 'a+') as file: file.write(file_name + "\n") def load_dataset_name_to_list(): """ 加载磁盘文件中数据集名到缓存中,data_set :return: """ file_path = "./models_file/data_set_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', line) def load_xgboost_name_to_list(): """ 加载磁盘文件中XGBoost模型名到缓存中,xgboost_name :return: """ file_path = "./models_file/xgboost_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('xgboost_name', line) def load_lstm_name_to_list(): """ 加载磁盘文件中LSTM模型名到缓存中,lstm_name :return: """ file_path = "./models_file/lstm_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('lstm_name', line) # def load_lstm_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/lstm_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.lstm_model_dict.keys(): # sv.lstm_model_dict[line] = load_lstm_class(line) # print("lstm---------------------", sv.lstm_model_dict) # # # def load_xgboost_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/xgboost_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.xgboost_model_dict.keys(): # sv.xgboost_model_dict[line] = load_xgboost_class(line) # print("xgboost-------------------", sv.xgboost_model_dict) def load_datas_from_disk_to_memory(): load_dataset_name_to_list() load_xgboost_name_to_list() load_lstm_name_to_list() def str_to_time_hour_minute(time): week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday() year, month, day, hour, minute, secend = re.split(r"[/ :]", time) return [hour, minute, week] def use_XGBoost_predict(json_data): """ 使用已训练的XGBoost模型检测异常 :param json_data: :return: """ model_name = json_data["host_id"] times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S') print(times.hour) print(type(times)) predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]] print(predict_list) predict_array = np.array(predict_list) # 转换成XGBoost能使用的数据格式 tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3)) # 由于频次较低,每次从磁盘文件中读取模型然后判断 XGBoost_model = load_xgboost_class(model_name) print("load xgboost0000000000000000000000") print("model name", XGBoost_model.name) return XGBoost_model.predict(tmp) def translate_to_xgboost_datas_from_realtime(): pass def translate_to_xgboost_datas_from_mysql(np_array): """ 将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变 :param np_array:输入的数组 :return:时间转换后的数组,仅仅在时间上做出改变,其他列不变 """ # 删除id列 np_array = np.delete(np_array, 0, axis = 1) # 获取时间列 time_array = np_array[:, 0] # 删除时间列 np_array = np.delete(np_array, 0, axis = 1) hour = [] minute = [] week = [] for time in time_array: hour.append(time.hour) minute.append(time.minute) week.append(time.weekday()) np_array = np.insert(np_array, 0, values = minute, axis = 1) np_array = np.insert(np_array, 0, values = hour, axis = 1) np_array = np.insert(np_array, 0, values = week, axis = 1) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_xgboost_from_mysql(table_name, number_data=20000): """ 从数据库为xgboost模型读取数据,并进行时间格式转换 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 要读取的表名 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) # # 删除id列 # np_array = np.delete(np_array, 0, axis = 1) # # 获取时间列 # time_array = np_array[:, 0] # # 删除时间列 # np_array = np.delete(np_array, 0, axis = 1) # hour = [] # minute = [] # week = [] # for time in time_array: # hour.append(time.hour) # minute.append(time.minute) # week.append(time.weekday()) # np_array = np.insert(np_array, 0, values = minute, axis = 1) # np_array = np.insert(np_array, 0, values = hour, axis = 1) # np_array = np.insert(np_array, 0, values = week, axis = 1) np_array = translate_to_xgboost_datas_from_mysql(np_array) closedb(db) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_lstm_from_mysql(table_name, number_data=20000): """ 从数据库为lstm模型读取一天的数据 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 表名 :param end_time: 最后截止时间,即什么时刻开始预测 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) closedb(db) return np_array[:, -2] def save_xgboost_class(model): """ xgboost 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 print("sava_xgboost_path", os.getcwd()) file_name = "./models_file/xgboost/%s" % model.name with open(file_name, 'wb') as file_obj: pickle.dump(model, file_obj) # 存储名称 file_model_name = "./models_file/xgboost_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(model.file_name + "\n") def load_xgboost_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ file_name = "./models_file/xgboost/%s" % model_name # return pickle.load(open(file_name, "rb")) print(os.getcwd()) print(file_name) with open(file_name, 'rb') as f: xgboost_class = pickle.load(f) return xgboost_class def save_lstm_class(LSTM_model): """ lstm 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 file_name = "./models_file/lstm/%s" % LSTM_model.name print("save lstm path", file_name) with open(file_name, 'wb') as file_obj: pickle.dump(LSTM_model, file_obj) # 存储名称 file_model_name = "./models_file/lstm_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(LSTM_model.file_name + "\n") def load_lstm_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ print(os.getcwd()) file_name = "./models_file/lstm/%s" % model_name print(file_name) with open(file_name, 'rb') as f: lstm_class = pickle.load(f) return lstm_class def print_model(model_kind, data_name): print("print_model", model_kind, data_name) def train_model(model_kind, data_name, force = 0): """训练模型""" redis_conn = get_redis_connection("default") print("类型", type(data_name)) print(data_name) uuid = query_uuid_from_file2uuid_by_filename(data_name) print(type(uuid), uuid) if model_kind == "XGBoost": # 多进程训练模型 if redis_conn.sismember("xgboost_name", data_name) and force != 1: return 0 else: from xgboost_model.xgboost_class import XGBoost xgboost_train = XGBoost(data_name, uuid) # 存储到redis中 redis_conn.hset('xgboost_model', data_name, pickle.dumps(xgboost_train)) redis_conn.sadd('xgboost_name', data_name) # 模型持久化 save_xgboost_class(xgboost_train)
date) # 存储到redis中 redis_conn.hset('lstm_model', data_name, pickle.dumps(lstm_train)) redis_conn.sadd('lstm_name', data_name) # 模型持久化 save_lstm_class(lstm_train) print("xgboost_name", data_name) return 1 def get_datas_for_tag(table_name, start_time=0, end_time=0, label=(0, 1)): """ 按条件查询数据库表信息 :param table_name: :param start_time: :param end_time: :param label: :return: """ if len(label) > 1: label = (0, 1) else: label = int(label) uuid = query_uuid_from_file2uuid_by_filename(table_name) result = query_datas(connectdb(), table_name = uuid, label = label, start_time = start_time, end_time = end_time) return result def update_datas_for_tag(table_name, label, start_time=0, end_time=0): """ :param table_name: :param start_time: :param end_time: :param label: :return: """ uuid = query_uuid_from_file2uuid_by_filename(table_name) if update_datas(connectdb(), table_name = uuid, label = int(label), start_time = start_time, end_time = end_time): return get_datas_for_tag(table_name = table_name, start_time = start_time, end_time = end_time, label = label) def predict_future_30(table_name): """ 首先从redis中获取模型,如果没有则从磁盘中获取,并将模型存储至redis中 :param table_name:lstm模型名, :return: """ # redis连接池 uuid = query_uuid_from_file2uuid_by_filename(table_name) redis_conn = get_redis_connection("default") # 如果模型不存在于redis的lstm_model hash中就加载,如果在就直接从redis中获取模型进行判断 if not redis_conn.hexists('lstm_model', table_name): # 从磁盘中加载LSTM模型对象 lstm_model_tmp = load_lstm_class(uuid) # 初始化时就预测,避免因为graph冲突 lstm_model_tmp.model.predict(np.zeros((1, 1, 50))) # 将模型加到redis中 redis_conn.hset('lstm_model', uuid, pickle.dumps(lstm_model_tmp)) model_tmp = lstm_model_tmp else: # 从redis中获取模型对象 model_bytes = redis_conn.hget('lstm_model', table_name) # 解析为对象 model_tmp = pickle.loads(model_bytes) model_tmp.model.predict(np.zeros((1, 1, 50))) # 预测值 res = model_tmp.predict_values() # 设置横轴 predict_xAxis = list(range(1, len(res) + 1)) return predict_xAxis, res def get_model_info(kind): res = query_model_info(kind) print(res) print(len(res)) print(type(res))
print("xgboost_name", data_name) return 1 elif model_kind == 'LSTM': # 多进程训练模型 if redis_conn.sismember("lstm_name", data_name) and force != 1: print("存在000000000", data_name) return 0 else: print("训练过程0000000") print("类型", type(data_name)) from lstm_model.lstm_class import LSTMModel # data_name是文件名,uuid是文件唯一标识 lstm_train = LSTMModel(data_name, uuid) print("lasted", lstm_train.lasted_up
identifier_body
base_function.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2018/12/14 15:11 # @Author : zsj # @File : base_function.py import os import pickle import re import time from datetime import datetime import numpy as np import matplotlib.pyplot as plt from django_redis import get_redis_connection from db.mysql_operation import connectdb, query_datas, closedb, query_table, create_table, insert_train_datas, \ update_datas, query_model_info, insert_file2uuid, query_uuid_from_file2uuid_by_filename from isolate_model.isolate_class import Isolate def load_csv(file_name): """ 使用numpy加载csv文件,并把除了host_id的都转换成float类型,因为孤立森林只能判别数值类型 csv文件的格式是: host_id(主机群和主机标识), timestamp, kpi_1, kpi_2, kpi_3, kpi_4..... :param file_name: 要解析的csv文件名 :return: """ array = np.loadtxt(file_name, dtype = str, delimiter = ",") # array = np.loadtxt(file_name, dtype = str, delimiter = ",", encoding = 'utf-8') print(type(array)) return array def show_csv(array, array_x, array_y): """ 将读取的csv文件中某两列取出来作为图形展示的x轴和y轴 :param array:转置后的数组 :param array_x:数组第x列,一般来说x轴是时间 :param array_y:同上 :return: """ # 从第三个值开始取,因为第一个是host_id,第二个是时间戳 x_value = array[1:, array_x] y_value = array[1:, array_y] # 获取label标签,知道是那两行作图 label_x = array[0, array_x] label_y = array[0, array_y] if "timestamp" in label_x: # 一般来说x轴都是时间戳 # x_value = [format_time(x) for x in x_value] x_value = [x for x in x_value] else: x_value = [float(x) for x in x_value] y_value = [float(y) for y in y_value] plt.plot(x_value, y_value, c = 'r', ls = '--', marker = 'o', lw = 1.5, label = label_x) plt.xticks(range(0, len(x_value), int(len(x_value) / 30)), rotation = 90) # plt.figure(dpi=128, figsize=(10, 6)) plt.xlabel(label_x) plt.ylabel(label_y) plt.show() def timestamp_to_time(timestamp): """ 单个时间戳转换成时间,格式为2018-12-14 19:00:00 :param timestamp: :return: """ timestamp = int(timestamp) time_local = time.localtime(timestamp) return time.strftime("%Y-%m-%d: %H:%M:%S", time_local) def simplify_timestamp(timestamps): """ 时间戳批量转换成时间 :param timestamps: 时间戳list :return: """ return [timestamp_to_time(timestamp) for timestamp in timestamps] def get_uniform_cases(arrays, size=257): """ 由于传入的测试集不可能刚好是256个,所以需要均匀取周期内的256个case作为测试集 仅仅用于训练模型时使用 :param arrays:测试集数组 :param size:int, 要求均匀分为的份额,一般为256,用户可以自己设置,第一行为标签 :return: """ length = len(arrays) if length < 200: print("测试集大小:", length) return "测试集数据小于200,请重新传入大于200条数据的测试集" elif length < 256: print("测试集大小:", length) return arrays indexs = np.linspace(0, length - 1, size) indexs = np.array(indexs, dtype = int) res_arr = arrays[indexs] print("测试集大小:", len(indexs) - 1) return res_arr def format_time(time): """ 将传入的时间格式化,转换成没有秒的时间格式 yyyy-MM-DD hh-mm :param time: :return: """ # year, month, day, hour, minute, scend = re.split(r"/| |:", time) # print(year, month, day, hour, minute, scend) return time[0:-3] def draw_with_diff_color(np_array): """ 根据标签展示散点图,不同的标签具有不同颜色 :param np_array: :return: """ red_arr = [] green_arr = [] for arr in np_array: if arr[-1] == '0': red_arr.append(arr) else: green_arr.append(arr) print(red_arr) print(green_arr) def save_datas_with_labels(file_name, abnormal_rate): """ 存储已经由孤立森林学习过的带有标签的数据 :return:True or False """ cases = load_csv(file_name) # file_name是文件路径名 print("file name", file_name) # 文件名 title = file_name.split("/")[-1] print(type(title), title) isolate1 = Isolate('isolate', cases, rate = abnormal_rate) np_array = isolate1.merge_arrays() table_name = np_array[1, 0] db = connectdb() if not query_table(db, table_name): create_table(db, np_array[0], table_name) # 插入数据,表名为uuid if insert_train_datas(db, table_name, np_array[1:]): # 数据集列表存储表名(redis存储),断电就清空 redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', title) # sv.data_set.append(title) # 存储数据集表名(磁盘存储),断电可恢复 save_dataset_name_to_file(title) # 存储文件与UUID对应关系到file2uuid表中 insert_file2uuid(title, table_name) return True return False def save_dataset_name_to_file(file_name): """ 将文件名存储到磁盘中,断电重启时能够保证继续运行 :param file_name: 文件名称 :return: """ print(os.getcwd()) file_path = "./models_file/data_set_name" with open(file_path, 'a+') as file: file.write(file_name + "\n") def load_dataset_name_to_list(): """ 加载磁盘文件中数据集名到缓存中,data_set :return: """ file_path = "./models_file/data_set_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', line) def load_xgboost_name_to_list(): """ 加载磁盘文件中XGBoost模型名到缓存中,xgboost_name :return: """ file_path = "./models_file/xgboost_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('xgboost_name', line) def load_lstm_name_to_list(): """ 加载磁盘文件中LSTM模型名到缓存中,lstm_name :return: """ file_path = "./models_file/lstm_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('lstm_name', line) # def load_lstm_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/lstm_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.lstm_model_dict.keys(): # sv.lstm_model_dict[line] = load_lstm_class(line) # print("lstm---------------------", sv.lstm_model_dict) # # # def load_xgboost_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/xgboost_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.xgboost_model_dict.keys(): # sv.xgboost_model_dict[line] = load_xgboost_class(line) # print("xgboost-------------------", sv.xgboost_model_dict) def load_datas_from_disk_to_memory(): load_dataset_name_to_list() load_xgboost_name_to_list() load_lstm_name_to_list() def str_to_time_hour_minute(time): week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday() year, month, day, hour, minute, secend = re.split(r"[/ :]", time) return [hour, minute, week] def use_XGBoost_predict(json_data): """ 使用已训练的XGBoost模型检测异常 :param json_data: :return: """ model_name = json_data["host_id"] times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S') print(times.hour) print(type(times)) predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]] print(predict_list) predict_array = np.array(predict_list) # 转换成XGBoost能使用的数据格式 tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3)) # 由于频次较低,每次从磁盘文件中读取模型然后判断 XGBoost_model = load_xgboost_class(model_name) print("load xgboost0000000000000000000000") print("model name", XGBoost_model.name) return XGBoost_model.predict(tmp) def translate_to_xgboost_datas_from_realtime(): pass def translate_to_xgboost_datas_from_mysql(np_array): """ 将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变 :param np_array:输入的数组 :return:时间转换后的数组,仅仅在时间上做出改变,其他列不变 """ # 删除id列 np_array = np.delete(np_array, 0, axis = 1) # 获取时间列 time_array = np_array[:, 0] # 删除时间列 np_array = np.delete(np_array, 0, axis = 1) hour = [] minute = [] week = [] for time in time_array: hour.append(time.hour) minute.append(time.minute) week.append(time.weekday()) np_array = np.insert(np_array, 0, values = minute, axis = 1) np_array = np.insert(np_array, 0, values = hour, axis = 1) np_array = np.insert(np_array, 0, values = week, axis = 1) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_xgboost_from_mysql(table_name, number_data=20000): """ 从数据库为xgboost模型读取数据,并进行时间格式转换 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 要读取的表名 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) # # 删除id列 # np_array = np.de
取时间列 # time_array = np_array[:, 0] # # 删除时间列 # np_array = np.delete(np_array, 0, axis = 1) # hour = [] # minute = [] # week = [] # for time in time_array: # hour.append(time.hour) # minute.append(time.minute) # week.append(time.weekday()) # np_array = np.insert(np_array, 0, values = minute, axis = 1) # np_array = np.insert(np_array, 0, values = hour, axis = 1) # np_array = np.insert(np_array, 0, values = week, axis = 1) np_array = translate_to_xgboost_datas_from_mysql(np_array) closedb(db) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_lstm_from_mysql(table_name, number_data=20000): """ 从数据库为lstm模型读取一天的数据 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 表名 :param end_time: 最后截止时间,即什么时刻开始预测 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) closedb(db) return np_array[:, -2] def save_xgboost_class(model): """ xgboost 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 print("sava_xgboost_path", os.getcwd()) file_name = "./models_file/xgboost/%s" % model.name with open(file_name, 'wb') as file_obj: pickle.dump(model, file_obj) # 存储名称 file_model_name = "./models_file/xgboost_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(model.file_name + "\n") def load_xgboost_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ file_name = "./models_file/xgboost/%s" % model_name # return pickle.load(open(file_name, "rb")) print(os.getcwd()) print(file_name) with open(file_name, 'rb') as f: xgboost_class = pickle.load(f) return xgboost_class def save_lstm_class(LSTM_model): """ lstm 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 file_name = "./models_file/lstm/%s" % LSTM_model.name print("save lstm path", file_name) with open(file_name, 'wb') as file_obj: pickle.dump(LSTM_model, file_obj) # 存储名称 file_model_name = "./models_file/lstm_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(LSTM_model.file_name + "\n") def load_lstm_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ print(os.getcwd()) file_name = "./models_file/lstm/%s" % model_name print(file_name) with open(file_name, 'rb') as f: lstm_class = pickle.load(f) return lstm_class def print_model(model_kind, data_name): print("print_model", model_kind, data_name) def train_model(model_kind, data_name, force = 0): """训练模型""" redis_conn = get_redis_connection("default") print("类型", type(data_name)) print(data_name) uuid = query_uuid_from_file2uuid_by_filename(data_name) print(type(uuid), uuid) if model_kind == "XGBoost": # 多进程训练模型 if redis_conn.sismember("xgboost_name", data_name) and force != 1: return 0 else: from xgboost_model.xgboost_class import XGBoost xgboost_train = XGBoost(data_name, uuid) # 存储到redis中 redis_conn.hset('xgboost_model', data_name, pickle.dumps(xgboost_train)) redis_conn.sadd('xgboost_name', data_name) # 模型持久化 save_xgboost_class(xgboost_train) print("xgboost_name", data_name) return 1 elif model_kind == 'LSTM': # 多进程训练模型 if redis_conn.sismember("lstm_name", data_name) and force != 1: print("存在000000000", data_name) return 0 else: print("训练过程0000000") print("类型", type(data_name)) from lstm_model.lstm_class import LSTMModel # data_name是文件名,uuid是文件唯一标识 lstm_train = LSTMModel(data_name, uuid) print("lasted", lstm_train.lasted_update) # 存储到redis中 redis_conn.hset('lstm_model', data_name, pickle.dumps(lstm_train)) redis_conn.sadd('lstm_name', data_name) # 模型持久化 save_lstm_class(lstm_train) print("xgboost_name", data_name) return 1 def get_datas_for_tag(table_name, start_time=0, end_time=0, label=(0, 1)): """ 按条件查询数据库表信息 :param table_name: :param start_time: :param end_time: :param label: :return: """ if len(label) > 1: label = (0, 1) else: label = int(label) uuid = query_uuid_from_file2uuid_by_filename(table_name) result = query_datas(connectdb(), table_name = uuid, label = label, start_time = start_time, end_time = end_time) return result def update_datas_for_tag(table_name, label, start_time=0, end_time=0): """ :param table_name: :param start_time: :param end_time: :param label: :return: """ uuid = query_uuid_from_file2uuid_by_filename(table_name) if update_datas(connectdb(), table_name = uuid, label = int(label), start_time = start_time, end_time = end_time): return get_datas_for_tag(table_name = table_name, start_time = start_time, end_time = end_time, label = label) def predict_future_30(table_name): """ 首先从redis中获取模型,如果没有则从磁盘中获取,并将模型存储至redis中 :param table_name:lstm模型名, :return: """ # redis连接池 uuid = query_uuid_from_file2uuid_by_filename(table_name) redis_conn = get_redis_connection("default") # 如果模型不存在于redis的lstm_model hash中就加载,如果在就直接从redis中获取模型进行判断 if not redis_conn.hexists('lstm_model', table_name): # 从磁盘中加载LSTM模型对象 lstm_model_tmp = load_lstm_class(uuid) # 初始化时就预测,避免因为graph冲突 lstm_model_tmp.model.predict(np.zeros((1, 1, 50))) # 将模型加到redis中 redis_conn.hset('lstm_model', uuid, pickle.dumps(lstm_model_tmp)) model_tmp = lstm_model_tmp else: # 从redis中获取模型对象 model_bytes = redis_conn.hget('lstm_model', table_name) # 解析为对象 model_tmp = pickle.loads(model_bytes) model_tmp.model.predict(np.zeros((1, 1, 50))) # 预测值 res = model_tmp.predict_values() # 设置横轴 predict_xAxis = list(range(1, len(res) + 1)) return predict_xAxis, res def get_model_info(kind): res = query_model_info(kind) print(res) print(len(res)) print(type(res))
lete(np_array, 0, axis = 1) # # 获
identifier_name
base_function.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2018/12/14 15:11 # @Author : zsj # @File : base_function.py import os import pickle import re import time from datetime import datetime import numpy as np import matplotlib.pyplot as plt from django_redis import get_redis_connection from db.mysql_operation import connectdb, query_datas, closedb, query_table, create_table, insert_train_datas, \ update_datas, query_model_info, insert_file2uuid, query_uuid_from_file2uuid_by_filename from isolate_model.isolate_class import Isolate def load_csv(file_name): """ 使用numpy加载csv文件,并把除了host_id的都转换成float类型,因为孤立森林只能判别数值类型 csv文件的格式是: host_id(主机群和主机标识), timestamp, kpi_1, kpi_2, kpi_3, kpi_4..... :param file_name: 要解析的csv文件名 :return: """ array = np.loadtxt(file_name, dtype = str, delimiter = ",") # array = np.loadtxt(file_name, dtype = str, delimiter = ",", encoding = 'utf-8') print(type(array)) return array def show_csv(array, array_x, array_y): """ 将读取的csv文件中某两列取出来作为图形展示的x轴和y轴 :param array:转置后的数组 :param array_x:数组第x列,一般来说x轴是时间 :param array_y:同上 :return: """ # 从第三个值开始取,因为第一个是host_id,第二个是时间戳 x_value = array[1:, array_x] y_value = array[1:, array_y] # 获取label标签,知道是那两行作图 label_x = array[0, array_x] label_y = array[0, array_y] if "timestamp" in label_x: # 一般来说x轴都是时间戳 # x_value = [format_time(x) for x in x_value] x_value = [x for x in x_value] else: x_value = [float(x) for x in x_value] y_value = [float(y) for y in y_value] plt.plot(x_value, y_value, c = 'r', ls = '--', marker = 'o', lw = 1.5, label = label_x) plt.xticks(range(0, len(x_value), int(len(x_value) / 30)), rotation = 90) # plt.figure(dpi=128, figsize=(10, 6)) plt.xlabel(label_x) plt.ylabel(label_y) plt.show() def timestamp_to_time(timestamp): """ 单个时间戳转换成时间,格式为2018-12-14 19:00:00 :param timestamp: :return: """ timestamp = int(timestamp) time_local = time.localtime(timestamp) return time.strftime("%Y-%m-%d: %H:%M:%S", time_local) def simplify_timestamp(timestamps): """ 时间戳批量转换成时间 :param timestamps: 时间戳list :return: """ return [timestamp_to_time(timestamp) for timestamp in timestamps] def get_uniform_cases(arrays, size=257): """ 由于传入的测试集不可能刚好是256个,所以需要均匀取周期内的256个case作为测试集 仅仅用于训练模型时使用 :param arrays:测试集数组 :param size:int, 要求均匀分为的份额,一般为256,用户可以自己设置,第一行为标签 :return: """ length = len(arrays) if length < 200: print("测试集大小:", length) return "测试集数据小于200,请重新传入大于200条数据的测试集" elif length < 256: print("测试集大小:", length) return arrays indexs = np.linspace(0, length - 1, size) indexs = np.array(indexs, dtype = int) res_arr = arrays[indexs] print("测试集大小:", len(indexs) - 1) return res_arr def format_time(time): """ 将传入的时间格式化,转换成没有秒的时间格式 yyyy-MM-DD hh-mm :param time: :return: """ # year, month, day, hour, minute, scend = re.split(r"/| |:", time) # print(year, month, day, hour, minute, scend) return time[0:-3] def draw_with_diff_color(np_array): """ 根据标签展示散点图,不同的标签具有不同颜色 :param np_array: :return: """ red_arr = [] green_arr = [] for arr in np_array: if arr[-1] == '0': red_arr.append(arr) else: green_arr.append(arr) print(red_arr) print(green_arr) def save_datas_with_labels(file_name, abnormal_rate): """ 存储已经由孤立森林学习过的带有标签的数据 :return:True or False """ cases = load_csv(file_name) # file_name是文件路径名 print("file name", file_name) # 文件名 title = file_name.split("/")[-1] print(type(title), title) isolate1 = Isolate('isolate', cases, rate = abnormal_rate) np_array = isolate1.merge_arrays() table_name = np_array[1, 0] db = connectdb() if not query_table(db, table_name): create_table(db, np_array[0], table_name) # 插入数据,表名为uuid if insert_train_datas(db, table_name,
# 数据集列表存储表名(redis存储),断电就清空 redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', title) # sv.data_set.append(title) # 存储数据集表名(磁盘存储),断电可恢复 save_dataset_name_to_file(title) # 存储文件与UUID对应关系到file2uuid表中 insert_file2uuid(title, table_name) return True return False def save_dataset_name_to_file(file_name): """ 将文件名存储到磁盘中,断电重启时能够保证继续运行 :param file_name: 文件名称 :return: """ print(os.getcwd()) file_path = "./models_file/data_set_name" with open(file_path, 'a+') as file: file.write(file_name + "\n") def load_dataset_name_to_list(): """ 加载磁盘文件中数据集名到缓存中,data_set :return: """ file_path = "./models_file/data_set_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('data_set_name', line) def load_xgboost_name_to_list(): """ 加载磁盘文件中XGBoost模型名到缓存中,xgboost_name :return: """ file_path = "./models_file/xgboost_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('xgboost_name', line) def load_lstm_name_to_list(): """ 加载磁盘文件中LSTM模型名到缓存中,lstm_name :return: """ file_path = "./models_file/lstm_name" with open(file_path, 'r') as file: lines = file.read().splitlines() for line in lines: if line is None or line == "": continue redis_conn = get_redis_connection("default") redis_conn.sadd('lstm_name', line) # def load_lstm_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/lstm_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.lstm_model_dict.keys(): # sv.lstm_model_dict[line] = load_lstm_class(line) # print("lstm---------------------", sv.lstm_model_dict) # # # def load_xgboost_name_to_dict(): # """ # 加载磁盘文件中LSTM模型到内存中,lstm_name # :return: # """ # file_path = "./models_file/xgboost_name" # with open(file_path, 'r') as file: # lines = file.read().splitlines() # for line in lines: # if line is None or line == "": # continue # elif line not in sv.xgboost_model_dict.keys(): # sv.xgboost_model_dict[line] = load_xgboost_class(line) # print("xgboost-------------------", sv.xgboost_model_dict) def load_datas_from_disk_to_memory(): load_dataset_name_to_list() load_xgboost_name_to_list() load_lstm_name_to_list() def str_to_time_hour_minute(time): week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday() year, month, day, hour, minute, secend = re.split(r"[/ :]", time) return [hour, minute, week] def use_XGBoost_predict(json_data): """ 使用已训练的XGBoost模型检测异常 :param json_data: :return: """ model_name = json_data["host_id"] times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S') print(times.hour) print(type(times)) predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]] print(predict_list) predict_array = np.array(predict_list) # 转换成XGBoost能使用的数据格式 tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3)) # 由于频次较低,每次从磁盘文件中读取模型然后判断 XGBoost_model = load_xgboost_class(model_name) print("load xgboost0000000000000000000000") print("model name", XGBoost_model.name) return XGBoost_model.predict(tmp) def translate_to_xgboost_datas_from_realtime(): pass def translate_to_xgboost_datas_from_mysql(np_array): """ 将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变 :param np_array:输入的数组 :return:时间转换后的数组,仅仅在时间上做出改变,其他列不变 """ # 删除id列 np_array = np.delete(np_array, 0, axis = 1) # 获取时间列 time_array = np_array[:, 0] # 删除时间列 np_array = np.delete(np_array, 0, axis = 1) hour = [] minute = [] week = [] for time in time_array: hour.append(time.hour) minute.append(time.minute) week.append(time.weekday()) np_array = np.insert(np_array, 0, values = minute, axis = 1) np_array = np.insert(np_array, 0, values = hour, axis = 1) np_array = np.insert(np_array, 0, values = week, axis = 1) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_xgboost_from_mysql(table_name, number_data=20000): """ 从数据库为xgboost模型读取数据,并进行时间格式转换 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 要读取的表名 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) # # 删除id列 # np_array = np.delete(np_array, 0, axis = 1) # # 获取时间列 # time_array = np_array[:, 0] # # 删除时间列 # np_array = np.delete(np_array, 0, axis = 1) # hour = [] # minute = [] # week = [] # for time in time_array: # hour.append(time.hour) # minute.append(time.minute) # week.append(time.weekday()) # np_array = np.insert(np_array, 0, values = minute, axis = 1) # np_array = np.insert(np_array, 0, values = hour, axis = 1) # np_array = np.insert(np_array, 0, values = week, axis = 1) np_array = translate_to_xgboost_datas_from_mysql(np_array) closedb(db) # 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label return np_array def load_data_for_lstm_from_mysql(table_name, number_data=20000): """ 从数据库为lstm模型读取一天的数据 :param number_data: 取最后多少个数据来训练或者预测 :param table_name: 表名 :param end_time: 最后截止时间,即什么时刻开始预测 :return: """ db = connectdb() np_array = np.array(query_datas(db, table_name = table_name, number = number_data)) closedb(db) return np_array[:, -2] def save_xgboost_class(model): """ xgboost 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 print("sava_xgboost_path", os.getcwd()) file_name = "./models_file/xgboost/%s" % model.name with open(file_name, 'wb') as file_obj: pickle.dump(model, file_obj) # 存储名称 file_model_name = "./models_file/xgboost_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(model.file_name + "\n") def load_xgboost_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ file_name = "./models_file/xgboost/%s" % model_name # return pickle.load(open(file_name, "rb")) print(os.getcwd()) print(file_name) with open(file_name, 'rb') as f: xgboost_class = pickle.load(f) return xgboost_class def save_lstm_class(LSTM_model): """ lstm 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 :param model: :return: """ # 存储模型 file_name = "./models_file/lstm/%s" % LSTM_model.name print("save lstm path", file_name) with open(file_name, 'wb') as file_obj: pickle.dump(LSTM_model, file_obj) # 存储名称 file_model_name = "./models_file/lstm_name" with open(file_model_name, 'a+') as name_obj: name_obj.write(LSTM_model.file_name + "\n") def load_lstm_class(model_name): """ 根据模型名称加载模型,返回model :param model_name:模型名 :return: 返回模型 """ print(os.getcwd()) file_name = "./models_file/lstm/%s" % model_name print(file_name) with open(file_name, 'rb') as f: lstm_class = pickle.load(f) return lstm_class def print_model(model_kind, data_name): print("print_model", model_kind, data_name) def train_model(model_kind, data_name, force = 0): """训练模型""" redis_conn = get_redis_connection("default") print("类型", type(data_name)) print(data_name) uuid = query_uuid_from_file2uuid_by_filename(data_name) print(type(uuid), uuid) if model_kind == "XGBoost": # 多进程训练模型 if redis_conn.sismember("xgboost_name", data_name) and force != 1: return 0 else: from xgboost_model.xgboost_class import XGBoost xgboost_train = XGBoost(data_name, uuid) # 存储到redis中 redis_conn.hset('xgboost_model', data_name, pickle.dumps(xgboost_train)) redis_conn.sadd('xgboost_name', data_name) # 模型持久化 save_xgboost_class(xgboost_train) print("xgboost_name", data_name) return 1 elif model_kind == 'LSTM': # 多进程训练模型 if redis_conn.sismember("lstm_name", data_name) and force != 1: print("存在000000000", data_name) return 0 else: print("训练过程0000000") print("类型", type(data_name)) from lstm_model.lstm_class import LSTMModel # data_name是文件名,uuid是文件唯一标识 lstm_train = LSTMModel(data_name, uuid) print("lasted", lstm_train.lasted_update) # 存储到redis中 redis_conn.hset('lstm_model', data_name, pickle.dumps(lstm_train)) redis_conn.sadd('lstm_name', data_name) # 模型持久化 save_lstm_class(lstm_train) print("xgboost_name", data_name) return 1 def get_datas_for_tag(table_name, start_time=0, end_time=0, label=(0, 1)): """ 按条件查询数据库表信息 :param table_name: :param start_time: :param end_time: :param label: :return: """ if len(label) > 1: label = (0, 1) else: label = int(label) uuid = query_uuid_from_file2uuid_by_filename(table_name) result = query_datas(connectdb(), table_name = uuid, label = label, start_time = start_time, end_time = end_time) return result def update_datas_for_tag(table_name, label, start_time=0, end_time=0): """ :param table_name: :param start_time: :param end_time: :param label: :return: """ uuid = query_uuid_from_file2uuid_by_filename(table_name) if update_datas(connectdb(), table_name = uuid, label = int(label), start_time = start_time, end_time = end_time): return get_datas_for_tag(table_name = table_name, start_time = start_time, end_time = end_time, label = label) def predict_future_30(table_name): """ 首先从redis中获取模型,如果没有则从磁盘中获取,并将模型存储至redis中 :param table_name:lstm模型名, :return: """ # redis连接池 uuid = query_uuid_from_file2uuid_by_filename(table_name) redis_conn = get_redis_connection("default") # 如果模型不存在于redis的lstm_model hash中就加载,如果在就直接从redis中获取模型进行判断 if not redis_conn.hexists('lstm_model', table_name): # 从磁盘中加载LSTM模型对象 lstm_model_tmp = load_lstm_class(uuid) # 初始化时就预测,避免因为graph冲突 lstm_model_tmp.model.predict(np.zeros((1, 1, 50))) # 将模型加到redis中 redis_conn.hset('lstm_model', uuid, pickle.dumps(lstm_model_tmp)) model_tmp = lstm_model_tmp else: # 从redis中获取模型对象 model_bytes = redis_conn.hget('lstm_model', table_name) # 解析为对象 model_tmp = pickle.loads(model_bytes) model_tmp.model.predict(np.zeros((1, 1, 50))) # 预测值 res = model_tmp.predict_values() # 设置横轴 predict_xAxis = list(range(1, len(res) + 1)) return predict_xAxis, res def get_model_info(kind): res = query_model_info(kind) print(res) print(len(res)) print(type(res))
np_array[1:]):
conditional_block
logger.go
/* * MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package logger import ( "context" "encoding/hex" "fmt" "go/build" "hash" "path/filepath" "reflect" "runtime" "strings" "time" "github.com/minio/highwayhash" "github.com/minio/minio-go/v6/pkg/set" "github.com/minio/minio/cmd/logger/message/log" ) var ( // HighwayHash key for logging in anonymous mode magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") // HighwayHash hasher for logging in anonymous mode loggerHighwayHasher hash.Hash ) // Disable disables all logging, false by default. (used for "go test") var Disable = false // Level type type Level int8 // Enumerated level types const ( InformationLvl Level = iota + 1 ErrorLvl FatalLvl ) var trimStrings []string var globalDeploymentID string // TimeFormat - logging time format. const TimeFormat string = "15:04:05 MST 01/02/2006" // List of error strings to be ignored by LogIf const ( diskNotFoundError = "disk not found" ) var matchingFuncNames = [...]string{ "http.HandlerFunc.ServeHTTP", "cmd.serverMain", "cmd.StartGateway", "cmd.(*webAPIHandlers).ListBuckets", "cmd.(*webAPIHandlers).MakeBucket", "cmd.(*webAPIHandlers).DeleteBucket", "cmd.(*webAPIHandlers).ListObjects", "cmd.(*webAPIHandlers).RemoveObject", "cmd.(*webAPIHandlers).Login", "cmd.(*webAPIHandlers).GenerateAuth", "cmd.(*webAPIHandlers).SetAuth", "cmd.(*webAPIHandlers).GetAuth", "cmd.(*webAPIHandlers).CreateURLToken", "cmd.(*webAPIHandlers).Upload", "cmd.(*webAPIHandlers).Download", "cmd.(*webAPIHandlers).DownloadZip", "cmd.(*webAPIHandlers).GetBucketPolicy", "cmd.(*webAPIHandlers).ListAllBucketPolicies", "cmd.(*webAPIHandlers).SetBucketPolicy", "cmd.(*webAPIHandlers).PresignedGet", "cmd.(*webAPIHandlers).ServerInfo", "cmd.(*webAPIHandlers).StorageInfo", // add more here .. } func (level Level) String() string { var lvlStr string switch level { case InformationLvl: lvlStr = "INFO" case ErrorLvl: lvlStr = "ERROR" case FatalLvl: lvlStr = "FATAL" } return lvlStr } // quietFlag: Hide startup messages if enabled // jsonFlag: Display in JSON format, if enabled var ( quietFlag, jsonFlag, anonFlag bool // Custom function to format error errorFmtFunc func(string, error, bool) string ) // EnableQuiet - turns quiet option on. func EnableQuiet() { quietFlag = true } // EnableJSON - outputs logs in json format. func EnableJSON() { jsonFlag = true quietFlag = true } // EnableAnonymous - turns anonymous flag // to avoid printing sensitive information. func EnableAnonymous() { anonFlag = true } // IsJSON - returns true if jsonFlag is true func IsJSON() bool { return jsonFlag } // IsQuiet - returns true if quietFlag is true func IsQuiet() bool { return quietFlag } // RegisterUIError registers the specified rendering function. This latter // will be called for a pretty rendering of fatal errors. func
(f func(string, error, bool) string) { errorFmtFunc = f } // Remove any duplicates and return unique entries. func uniqueEntries(paths []string) []string { m := make(set.StringSet) for _, p := range paths { if !m.Contains(p) { m.Add(p) } } return m.ToSlice() } // SetDeploymentID -- Deployment Id from the main package is set here func SetDeploymentID(deploymentID string) { globalDeploymentID = deploymentID } // Init sets the trimStrings to possible GOPATHs // and GOROOT directories. Also append github.com/minio/minio // This is done to clean up the filename, when stack trace is // displayed when an error happens. func Init(goPath string, goRoot string) { var goPathList []string var goRootList []string var defaultgoPathList []string var defaultgoRootList []string pathSeperator := ":" // Add all possible GOPATH paths into trimStrings // Split GOPATH depending on the OS type if runtime.GOOS == "windows" { pathSeperator = ";" } goPathList = strings.Split(goPath, pathSeperator) goRootList = strings.Split(goRoot, pathSeperator) defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator) defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator) // Add trim string "{GOROOT}/src/" into trimStrings trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)} // Add all possible path from GOPATH=path1:path2...:pathN // as "{path#}/src/" into trimStrings for _, goPathString := range goPathList { trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator)) } for _, goRootString := range goRootList { trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator)) } for _, defaultgoPathString := range defaultgoPathList { trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator)) } for _, defaultgoRootString := range defaultgoRootList { trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator)) } // Remove duplicate entries. trimStrings = uniqueEntries(trimStrings) // Add "github.com/minio/minio" as the last to cover // paths like "{GOROOT}/src/github.com/minio/minio" // and "{GOPATH}/src/github.com/minio/minio" trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator)) loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit } func trimTrace(f string) string { for _, trimString := range trimStrings { f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString)) } return filepath.FromSlash(f) } func getSource(level int) string { pc, file, lineNumber, ok := runtime.Caller(level) if ok { // Clean up the common prefixes file = trimTrace(file) _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName) } return "" } // getTrace method - creates and returns stack trace func getTrace(traceLevel int) []string { var trace []string pc, file, lineNumber, ok := runtime.Caller(traceLevel) for ok && file != "" { // Clean up the common prefixes file = trimTrace(file) // Get the function name _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) // Skip duplicate traces that start with file name, "<autogenerated>" // and also skip traces with function name that starts with "runtime." if !strings.HasPrefix(file, "<autogenerated>") && !strings.HasPrefix(funcName, "runtime.") { // Form and append a line of stack trace into a // collection, 'trace', to build full stack trace trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)) // Ignore trace logs beyond the following conditions for _, name := range matchingFuncNames { if funcName == name { return trace } } } traceLevel++ // Read stack trace information from PC pc, file, lineNumber, ok = runtime.Caller(traceLevel) } return trace } // Return the highway hash of the passed string func hashString(input string) string { defer loggerHighwayHasher.Reset() loggerHighwayHasher.Write([]byte(input)) checksum := loggerHighwayHasher.Sum(nil) return hex.EncodeToString(checksum) } // LogAlwaysIf prints a detailed error message during // the execution of the server. func LogAlwaysIf(ctx context.Context, err error) { if err == nil { return } logIf(ctx, err) } // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. func LogIf(ctx context.Context, err error) { if err == nil { return } if err.Error() != diskNotFoundError { logIf(ctx, err) } } // logIf prints a detailed error message during // the execution of the server. func logIf(ctx context.Context, err error) { if Disable { return } req := GetReqInfo(ctx) if req == nil { req = &ReqInfo{API: "SYSTEM"} } API := "SYSTEM" if req.API != "" { API = req.API } tags := make(map[string]string) for _, entry := range req.GetTags() { tags[entry.Key] = entry.Val } // Get full stack trace trace := getTrace(3) // Get the cause for the Error message := err.Error() if req.DeploymentID == "" { req.DeploymentID = globalDeploymentID } entry := log.Entry{ DeploymentID: req.DeploymentID, Level: ErrorLvl.String(), RemoteHost: req.RemoteHost, Host: req.Host, RequestID: req.RequestID, UserAgent: req.UserAgent, Time: time.Now().UTC().Format(time.RFC3339Nano), API: &log.API{ Name: API, Args: &log.Args{ Bucket: req.BucketName, Object: req.ObjectName, }, }, Trace: &log.Trace{ Message: message, Source: trace, Variables: tags, }, } if anonFlag { entry.API.Args.Bucket = hashString(entry.API.Args.Bucket) entry.API.Args.Object = hashString(entry.API.Args.Object) entry.RemoteHost = hashString(entry.RemoteHost) entry.Trace.Message = reflect.TypeOf(err).String() entry.Trace.Variables = make(map[string]string) } // Iterate over all logger targets to send the log entry for _, t := range Targets { t.Send(entry) } } // ErrCritical is the value panic'd whenever CriticalIf is called. var ErrCritical struct{} // CriticalIf logs the provided error on the console. It fails the // current go-routine by causing a `panic(ErrCritical)`. func CriticalIf(ctx context.Context, err error) { if err != nil { LogIf(ctx, err) panic(ErrCritical) } } // FatalIf is similar to Fatal() but it ignores passed nil error func FatalIf(err error, msg string, data ...interface{}) { if err == nil { return } fatal(err, msg, data...) }
RegisterUIError
identifier_name
logger.go
/* * MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package logger import ( "context" "encoding/hex" "fmt" "go/build" "hash" "path/filepath" "reflect" "runtime" "strings" "time" "github.com/minio/highwayhash" "github.com/minio/minio-go/v6/pkg/set" "github.com/minio/minio/cmd/logger/message/log" ) var ( // HighwayHash key for logging in anonymous mode magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") // HighwayHash hasher for logging in anonymous mode loggerHighwayHasher hash.Hash ) // Disable disables all logging, false by default. (used for "go test") var Disable = false // Level type type Level int8 // Enumerated level types const ( InformationLvl Level = iota + 1 ErrorLvl FatalLvl ) var trimStrings []string var globalDeploymentID string // TimeFormat - logging time format. const TimeFormat string = "15:04:05 MST 01/02/2006" // List of error strings to be ignored by LogIf const ( diskNotFoundError = "disk not found" ) var matchingFuncNames = [...]string{ "http.HandlerFunc.ServeHTTP", "cmd.serverMain", "cmd.StartGateway", "cmd.(*webAPIHandlers).ListBuckets", "cmd.(*webAPIHandlers).MakeBucket", "cmd.(*webAPIHandlers).DeleteBucket", "cmd.(*webAPIHandlers).ListObjects", "cmd.(*webAPIHandlers).RemoveObject", "cmd.(*webAPIHandlers).Login", "cmd.(*webAPIHandlers).GenerateAuth", "cmd.(*webAPIHandlers).SetAuth", "cmd.(*webAPIHandlers).GetAuth", "cmd.(*webAPIHandlers).CreateURLToken", "cmd.(*webAPIHandlers).Upload", "cmd.(*webAPIHandlers).Download", "cmd.(*webAPIHandlers).DownloadZip", "cmd.(*webAPIHandlers).GetBucketPolicy", "cmd.(*webAPIHandlers).ListAllBucketPolicies", "cmd.(*webAPIHandlers).SetBucketPolicy", "cmd.(*webAPIHandlers).PresignedGet", "cmd.(*webAPIHandlers).ServerInfo", "cmd.(*webAPIHandlers).StorageInfo", // add more here .. } func (level Level) String() string { var lvlStr string switch level { case InformationLvl: lvlStr = "INFO" case ErrorLvl: lvlStr = "ERROR" case FatalLvl: lvlStr = "FATAL" } return lvlStr } // quietFlag: Hide startup messages if enabled // jsonFlag: Display in JSON format, if enabled var ( quietFlag, jsonFlag, anonFlag bool // Custom function to format error errorFmtFunc func(string, error, bool) string ) // EnableQuiet - turns quiet option on. func EnableQuiet() { quietFlag = true } // EnableJSON - outputs logs in json format. func EnableJSON()
// EnableAnonymous - turns anonymous flag // to avoid printing sensitive information. func EnableAnonymous() { anonFlag = true } // IsJSON - returns true if jsonFlag is true func IsJSON() bool { return jsonFlag } // IsQuiet - returns true if quietFlag is true func IsQuiet() bool { return quietFlag } // RegisterUIError registers the specified rendering function. This latter // will be called for a pretty rendering of fatal errors. func RegisterUIError(f func(string, error, bool) string) { errorFmtFunc = f } // Remove any duplicates and return unique entries. func uniqueEntries(paths []string) []string { m := make(set.StringSet) for _, p := range paths { if !m.Contains(p) { m.Add(p) } } return m.ToSlice() } // SetDeploymentID -- Deployment Id from the main package is set here func SetDeploymentID(deploymentID string) { globalDeploymentID = deploymentID } // Init sets the trimStrings to possible GOPATHs // and GOROOT directories. Also append github.com/minio/minio // This is done to clean up the filename, when stack trace is // displayed when an error happens. func Init(goPath string, goRoot string) { var goPathList []string var goRootList []string var defaultgoPathList []string var defaultgoRootList []string pathSeperator := ":" // Add all possible GOPATH paths into trimStrings // Split GOPATH depending on the OS type if runtime.GOOS == "windows" { pathSeperator = ";" } goPathList = strings.Split(goPath, pathSeperator) goRootList = strings.Split(goRoot, pathSeperator) defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator) defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator) // Add trim string "{GOROOT}/src/" into trimStrings trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)} // Add all possible path from GOPATH=path1:path2...:pathN // as "{path#}/src/" into trimStrings for _, goPathString := range goPathList { trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator)) } for _, goRootString := range goRootList { trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator)) } for _, defaultgoPathString := range defaultgoPathList { trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator)) } for _, defaultgoRootString := range defaultgoRootList { trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator)) } // Remove duplicate entries. trimStrings = uniqueEntries(trimStrings) // Add "github.com/minio/minio" as the last to cover // paths like "{GOROOT}/src/github.com/minio/minio" // and "{GOPATH}/src/github.com/minio/minio" trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator)) loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit } func trimTrace(f string) string { for _, trimString := range trimStrings { f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString)) } return filepath.FromSlash(f) } func getSource(level int) string { pc, file, lineNumber, ok := runtime.Caller(level) if ok { // Clean up the common prefixes file = trimTrace(file) _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName) } return "" } // getTrace method - creates and returns stack trace func getTrace(traceLevel int) []string { var trace []string pc, file, lineNumber, ok := runtime.Caller(traceLevel) for ok && file != "" { // Clean up the common prefixes file = trimTrace(file) // Get the function name _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) // Skip duplicate traces that start with file name, "<autogenerated>" // and also skip traces with function name that starts with "runtime." if !strings.HasPrefix(file, "<autogenerated>") && !strings.HasPrefix(funcName, "runtime.") { // Form and append a line of stack trace into a // collection, 'trace', to build full stack trace trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)) // Ignore trace logs beyond the following conditions for _, name := range matchingFuncNames { if funcName == name { return trace } } } traceLevel++ // Read stack trace information from PC pc, file, lineNumber, ok = runtime.Caller(traceLevel) } return trace } // Return the highway hash of the passed string func hashString(input string) string { defer loggerHighwayHasher.Reset() loggerHighwayHasher.Write([]byte(input)) checksum := loggerHighwayHasher.Sum(nil) return hex.EncodeToString(checksum) } // LogAlwaysIf prints a detailed error message during // the execution of the server. func LogAlwaysIf(ctx context.Context, err error) { if err == nil { return } logIf(ctx, err) } // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. func LogIf(ctx context.Context, err error) { if err == nil { return } if err.Error() != diskNotFoundError { logIf(ctx, err) } } // logIf prints a detailed error message during // the execution of the server. func logIf(ctx context.Context, err error) { if Disable { return } req := GetReqInfo(ctx) if req == nil { req = &ReqInfo{API: "SYSTEM"} } API := "SYSTEM" if req.API != "" { API = req.API } tags := make(map[string]string) for _, entry := range req.GetTags() { tags[entry.Key] = entry.Val } // Get full stack trace trace := getTrace(3) // Get the cause for the Error message := err.Error() if req.DeploymentID == "" { req.DeploymentID = globalDeploymentID } entry := log.Entry{ DeploymentID: req.DeploymentID, Level: ErrorLvl.String(), RemoteHost: req.RemoteHost, Host: req.Host, RequestID: req.RequestID, UserAgent: req.UserAgent, Time: time.Now().UTC().Format(time.RFC3339Nano), API: &log.API{ Name: API, Args: &log.Args{ Bucket: req.BucketName, Object: req.ObjectName, }, }, Trace: &log.Trace{ Message: message, Source: trace, Variables: tags, }, } if anonFlag { entry.API.Args.Bucket = hashString(entry.API.Args.Bucket) entry.API.Args.Object = hashString(entry.API.Args.Object) entry.RemoteHost = hashString(entry.RemoteHost) entry.Trace.Message = reflect.TypeOf(err).String() entry.Trace.Variables = make(map[string]string) } // Iterate over all logger targets to send the log entry for _, t := range Targets { t.Send(entry) } } // ErrCritical is the value panic'd whenever CriticalIf is called. var ErrCritical struct{} // CriticalIf logs the provided error on the console. It fails the // current go-routine by causing a `panic(ErrCritical)`. func CriticalIf(ctx context.Context, err error) { if err != nil { LogIf(ctx, err) panic(ErrCritical) } } // FatalIf is similar to Fatal() but it ignores passed nil error func FatalIf(err error, msg string, data ...interface{}) { if err == nil { return } fatal(err, msg, data...) }
{ jsonFlag = true quietFlag = true }
identifier_body
logger.go
/* * MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package logger import ( "context" "encoding/hex" "fmt" "go/build" "hash" "path/filepath" "reflect" "runtime" "strings" "time" "github.com/minio/highwayhash" "github.com/minio/minio-go/v6/pkg/set" "github.com/minio/minio/cmd/logger/message/log" ) var ( // HighwayHash key for logging in anonymous mode magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") // HighwayHash hasher for logging in anonymous mode loggerHighwayHasher hash.Hash ) // Disable disables all logging, false by default. (used for "go test") var Disable = false // Level type type Level int8 // Enumerated level types const ( InformationLvl Level = iota + 1 ErrorLvl FatalLvl ) var trimStrings []string var globalDeploymentID string // TimeFormat - logging time format. const TimeFormat string = "15:04:05 MST 01/02/2006" // List of error strings to be ignored by LogIf const ( diskNotFoundError = "disk not found" ) var matchingFuncNames = [...]string{ "http.HandlerFunc.ServeHTTP", "cmd.serverMain", "cmd.StartGateway", "cmd.(*webAPIHandlers).ListBuckets", "cmd.(*webAPIHandlers).MakeBucket", "cmd.(*webAPIHandlers).DeleteBucket", "cmd.(*webAPIHandlers).ListObjects", "cmd.(*webAPIHandlers).RemoveObject", "cmd.(*webAPIHandlers).Login", "cmd.(*webAPIHandlers).GenerateAuth", "cmd.(*webAPIHandlers).SetAuth", "cmd.(*webAPIHandlers).GetAuth", "cmd.(*webAPIHandlers).CreateURLToken", "cmd.(*webAPIHandlers).Upload", "cmd.(*webAPIHandlers).Download", "cmd.(*webAPIHandlers).DownloadZip", "cmd.(*webAPIHandlers).GetBucketPolicy", "cmd.(*webAPIHandlers).ListAllBucketPolicies", "cmd.(*webAPIHandlers).SetBucketPolicy", "cmd.(*webAPIHandlers).PresignedGet", "cmd.(*webAPIHandlers).ServerInfo", "cmd.(*webAPIHandlers).StorageInfo", // add more here .. } func (level Level) String() string { var lvlStr string switch level { case InformationLvl: lvlStr = "INFO" case ErrorLvl: lvlStr = "ERROR" case FatalLvl: lvlStr = "FATAL" } return lvlStr } // quietFlag: Hide startup messages if enabled // jsonFlag: Display in JSON format, if enabled var ( quietFlag, jsonFlag, anonFlag bool // Custom function to format error errorFmtFunc func(string, error, bool) string ) // EnableQuiet - turns quiet option on. func EnableQuiet() { quietFlag = true } // EnableJSON - outputs logs in json format. func EnableJSON() { jsonFlag = true quietFlag = true } // EnableAnonymous - turns anonymous flag // to avoid printing sensitive information. func EnableAnonymous() { anonFlag = true } // IsJSON - returns true if jsonFlag is true func IsJSON() bool { return jsonFlag } // IsQuiet - returns true if quietFlag is true func IsQuiet() bool { return quietFlag } // RegisterUIError registers the specified rendering function. This latter // will be called for a pretty rendering of fatal errors. func RegisterUIError(f func(string, error, bool) string) { errorFmtFunc = f } // Remove any duplicates and return unique entries. func uniqueEntries(paths []string) []string { m := make(set.StringSet) for _, p := range paths { if !m.Contains(p) { m.Add(p) } } return m.ToSlice() } // SetDeploymentID -- Deployment Id from the main package is set here func SetDeploymentID(deploymentID string) { globalDeploymentID = deploymentID } // Init sets the trimStrings to possible GOPATHs // and GOROOT directories. Also append github.com/minio/minio // This is done to clean up the filename, when stack trace is // displayed when an error happens. func Init(goPath string, goRoot string) { var goPathList []string var goRootList []string var defaultgoPathList []string var defaultgoRootList []string pathSeperator := ":" // Add all possible GOPATH paths into trimStrings // Split GOPATH depending on the OS type if runtime.GOOS == "windows" { pathSeperator = ";" } goPathList = strings.Split(goPath, pathSeperator) goRootList = strings.Split(goRoot, pathSeperator) defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator)
// Add trim string "{GOROOT}/src/" into trimStrings trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)} // Add all possible path from GOPATH=path1:path2...:pathN // as "{path#}/src/" into trimStrings for _, goPathString := range goPathList { trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator)) } for _, goRootString := range goRootList { trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator)) } for _, defaultgoPathString := range defaultgoPathList { trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator)) } for _, defaultgoRootString := range defaultgoRootList { trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator)) } // Remove duplicate entries. trimStrings = uniqueEntries(trimStrings) // Add "github.com/minio/minio" as the last to cover // paths like "{GOROOT}/src/github.com/minio/minio" // and "{GOPATH}/src/github.com/minio/minio" trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator)) loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit } func trimTrace(f string) string { for _, trimString := range trimStrings { f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString)) } return filepath.FromSlash(f) } func getSource(level int) string { pc, file, lineNumber, ok := runtime.Caller(level) if ok { // Clean up the common prefixes file = trimTrace(file) _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName) } return "" } // getTrace method - creates and returns stack trace func getTrace(traceLevel int) []string { var trace []string pc, file, lineNumber, ok := runtime.Caller(traceLevel) for ok && file != "" { // Clean up the common prefixes file = trimTrace(file) // Get the function name _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) // Skip duplicate traces that start with file name, "<autogenerated>" // and also skip traces with function name that starts with "runtime." if !strings.HasPrefix(file, "<autogenerated>") && !strings.HasPrefix(funcName, "runtime.") { // Form and append a line of stack trace into a // collection, 'trace', to build full stack trace trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)) // Ignore trace logs beyond the following conditions for _, name := range matchingFuncNames { if funcName == name { return trace } } } traceLevel++ // Read stack trace information from PC pc, file, lineNumber, ok = runtime.Caller(traceLevel) } return trace } // Return the highway hash of the passed string func hashString(input string) string { defer loggerHighwayHasher.Reset() loggerHighwayHasher.Write([]byte(input)) checksum := loggerHighwayHasher.Sum(nil) return hex.EncodeToString(checksum) } // LogAlwaysIf prints a detailed error message during // the execution of the server. func LogAlwaysIf(ctx context.Context, err error) { if err == nil { return } logIf(ctx, err) } // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. func LogIf(ctx context.Context, err error) { if err == nil { return } if err.Error() != diskNotFoundError { logIf(ctx, err) } } // logIf prints a detailed error message during // the execution of the server. func logIf(ctx context.Context, err error) { if Disable { return } req := GetReqInfo(ctx) if req == nil { req = &ReqInfo{API: "SYSTEM"} } API := "SYSTEM" if req.API != "" { API = req.API } tags := make(map[string]string) for _, entry := range req.GetTags() { tags[entry.Key] = entry.Val } // Get full stack trace trace := getTrace(3) // Get the cause for the Error message := err.Error() if req.DeploymentID == "" { req.DeploymentID = globalDeploymentID } entry := log.Entry{ DeploymentID: req.DeploymentID, Level: ErrorLvl.String(), RemoteHost: req.RemoteHost, Host: req.Host, RequestID: req.RequestID, UserAgent: req.UserAgent, Time: time.Now().UTC().Format(time.RFC3339Nano), API: &log.API{ Name: API, Args: &log.Args{ Bucket: req.BucketName, Object: req.ObjectName, }, }, Trace: &log.Trace{ Message: message, Source: trace, Variables: tags, }, } if anonFlag { entry.API.Args.Bucket = hashString(entry.API.Args.Bucket) entry.API.Args.Object = hashString(entry.API.Args.Object) entry.RemoteHost = hashString(entry.RemoteHost) entry.Trace.Message = reflect.TypeOf(err).String() entry.Trace.Variables = make(map[string]string) } // Iterate over all logger targets to send the log entry for _, t := range Targets { t.Send(entry) } } // ErrCritical is the value panic'd whenever CriticalIf is called. var ErrCritical struct{} // CriticalIf logs the provided error on the console. It fails the // current go-routine by causing a `panic(ErrCritical)`. func CriticalIf(ctx context.Context, err error) { if err != nil { LogIf(ctx, err) panic(ErrCritical) } } // FatalIf is similar to Fatal() but it ignores passed nil error func FatalIf(err error, msg string, data ...interface{}) { if err == nil { return } fatal(err, msg, data...) }
defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator)
random_line_split
logger.go
/* * MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package logger import ( "context" "encoding/hex" "fmt" "go/build" "hash" "path/filepath" "reflect" "runtime" "strings" "time" "github.com/minio/highwayhash" "github.com/minio/minio-go/v6/pkg/set" "github.com/minio/minio/cmd/logger/message/log" ) var ( // HighwayHash key for logging in anonymous mode magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") // HighwayHash hasher for logging in anonymous mode loggerHighwayHasher hash.Hash ) // Disable disables all logging, false by default. (used for "go test") var Disable = false // Level type type Level int8 // Enumerated level types const ( InformationLvl Level = iota + 1 ErrorLvl FatalLvl ) var trimStrings []string var globalDeploymentID string // TimeFormat - logging time format. const TimeFormat string = "15:04:05 MST 01/02/2006" // List of error strings to be ignored by LogIf const ( diskNotFoundError = "disk not found" ) var matchingFuncNames = [...]string{ "http.HandlerFunc.ServeHTTP", "cmd.serverMain", "cmd.StartGateway", "cmd.(*webAPIHandlers).ListBuckets", "cmd.(*webAPIHandlers).MakeBucket", "cmd.(*webAPIHandlers).DeleteBucket", "cmd.(*webAPIHandlers).ListObjects", "cmd.(*webAPIHandlers).RemoveObject", "cmd.(*webAPIHandlers).Login", "cmd.(*webAPIHandlers).GenerateAuth", "cmd.(*webAPIHandlers).SetAuth", "cmd.(*webAPIHandlers).GetAuth", "cmd.(*webAPIHandlers).CreateURLToken", "cmd.(*webAPIHandlers).Upload", "cmd.(*webAPIHandlers).Download", "cmd.(*webAPIHandlers).DownloadZip", "cmd.(*webAPIHandlers).GetBucketPolicy", "cmd.(*webAPIHandlers).ListAllBucketPolicies", "cmd.(*webAPIHandlers).SetBucketPolicy", "cmd.(*webAPIHandlers).PresignedGet", "cmd.(*webAPIHandlers).ServerInfo", "cmd.(*webAPIHandlers).StorageInfo", // add more here .. } func (level Level) String() string { var lvlStr string switch level { case InformationLvl: lvlStr = "INFO" case ErrorLvl: lvlStr = "ERROR" case FatalLvl: lvlStr = "FATAL" } return lvlStr } // quietFlag: Hide startup messages if enabled // jsonFlag: Display in JSON format, if enabled var ( quietFlag, jsonFlag, anonFlag bool // Custom function to format error errorFmtFunc func(string, error, bool) string ) // EnableQuiet - turns quiet option on. func EnableQuiet() { quietFlag = true } // EnableJSON - outputs logs in json format. func EnableJSON() { jsonFlag = true quietFlag = true } // EnableAnonymous - turns anonymous flag // to avoid printing sensitive information. func EnableAnonymous() { anonFlag = true } // IsJSON - returns true if jsonFlag is true func IsJSON() bool { return jsonFlag } // IsQuiet - returns true if quietFlag is true func IsQuiet() bool { return quietFlag } // RegisterUIError registers the specified rendering function. This latter // will be called for a pretty rendering of fatal errors. func RegisterUIError(f func(string, error, bool) string) { errorFmtFunc = f } // Remove any duplicates and return unique entries. func uniqueEntries(paths []string) []string { m := make(set.StringSet) for _, p := range paths { if !m.Contains(p) { m.Add(p) } } return m.ToSlice() } // SetDeploymentID -- Deployment Id from the main package is set here func SetDeploymentID(deploymentID string) { globalDeploymentID = deploymentID } // Init sets the trimStrings to possible GOPATHs // and GOROOT directories. Also append github.com/minio/minio // This is done to clean up the filename, when stack trace is // displayed when an error happens. func Init(goPath string, goRoot string) { var goPathList []string var goRootList []string var defaultgoPathList []string var defaultgoRootList []string pathSeperator := ":" // Add all possible GOPATH paths into trimStrings // Split GOPATH depending on the OS type if runtime.GOOS == "windows" { pathSeperator = ";" } goPathList = strings.Split(goPath, pathSeperator) goRootList = strings.Split(goRoot, pathSeperator) defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator) defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator) // Add trim string "{GOROOT}/src/" into trimStrings trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)} // Add all possible path from GOPATH=path1:path2...:pathN // as "{path#}/src/" into trimStrings for _, goPathString := range goPathList { trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator)) } for _, goRootString := range goRootList { trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator)) } for _, defaultgoPathString := range defaultgoPathList { trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator)) } for _, defaultgoRootString := range defaultgoRootList { trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator)) } // Remove duplicate entries. trimStrings = uniqueEntries(trimStrings) // Add "github.com/minio/minio" as the last to cover // paths like "{GOROOT}/src/github.com/minio/minio" // and "{GOPATH}/src/github.com/minio/minio" trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator)) loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit } func trimTrace(f string) string { for _, trimString := range trimStrings { f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString)) } return filepath.FromSlash(f) } func getSource(level int) string { pc, file, lineNumber, ok := runtime.Caller(level) if ok { // Clean up the common prefixes file = trimTrace(file) _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName) } return "" } // getTrace method - creates and returns stack trace func getTrace(traceLevel int) []string { var trace []string pc, file, lineNumber, ok := runtime.Caller(traceLevel) for ok && file != "" { // Clean up the common prefixes file = trimTrace(file) // Get the function name _, funcName := filepath.Split(runtime.FuncForPC(pc).Name()) // Skip duplicate traces that start with file name, "<autogenerated>" // and also skip traces with function name that starts with "runtime." if !strings.HasPrefix(file, "<autogenerated>") && !strings.HasPrefix(funcName, "runtime.") { // Form and append a line of stack trace into a // collection, 'trace', to build full stack trace trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)) // Ignore trace logs beyond the following conditions for _, name := range matchingFuncNames { if funcName == name
} } traceLevel++ // Read stack trace information from PC pc, file, lineNumber, ok = runtime.Caller(traceLevel) } return trace } // Return the highway hash of the passed string func hashString(input string) string { defer loggerHighwayHasher.Reset() loggerHighwayHasher.Write([]byte(input)) checksum := loggerHighwayHasher.Sum(nil) return hex.EncodeToString(checksum) } // LogAlwaysIf prints a detailed error message during // the execution of the server. func LogAlwaysIf(ctx context.Context, err error) { if err == nil { return } logIf(ctx, err) } // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. func LogIf(ctx context.Context, err error) { if err == nil { return } if err.Error() != diskNotFoundError { logIf(ctx, err) } } // logIf prints a detailed error message during // the execution of the server. func logIf(ctx context.Context, err error) { if Disable { return } req := GetReqInfo(ctx) if req == nil { req = &ReqInfo{API: "SYSTEM"} } API := "SYSTEM" if req.API != "" { API = req.API } tags := make(map[string]string) for _, entry := range req.GetTags() { tags[entry.Key] = entry.Val } // Get full stack trace trace := getTrace(3) // Get the cause for the Error message := err.Error() if req.DeploymentID == "" { req.DeploymentID = globalDeploymentID } entry := log.Entry{ DeploymentID: req.DeploymentID, Level: ErrorLvl.String(), RemoteHost: req.RemoteHost, Host: req.Host, RequestID: req.RequestID, UserAgent: req.UserAgent, Time: time.Now().UTC().Format(time.RFC3339Nano), API: &log.API{ Name: API, Args: &log.Args{ Bucket: req.BucketName, Object: req.ObjectName, }, }, Trace: &log.Trace{ Message: message, Source: trace, Variables: tags, }, } if anonFlag { entry.API.Args.Bucket = hashString(entry.API.Args.Bucket) entry.API.Args.Object = hashString(entry.API.Args.Object) entry.RemoteHost = hashString(entry.RemoteHost) entry.Trace.Message = reflect.TypeOf(err).String() entry.Trace.Variables = make(map[string]string) } // Iterate over all logger targets to send the log entry for _, t := range Targets { t.Send(entry) } } // ErrCritical is the value panic'd whenever CriticalIf is called. var ErrCritical struct{} // CriticalIf logs the provided error on the console. It fails the // current go-routine by causing a `panic(ErrCritical)`. func CriticalIf(ctx context.Context, err error) { if err != nil { LogIf(ctx, err) panic(ErrCritical) } } // FatalIf is similar to Fatal() but it ignores passed nil error func FatalIf(err error, msg string, data ...interface{}) { if err == nil { return } fatal(err, msg, data...) }
{ return trace }
conditional_block
select.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime // This file contains the implementation of Go select statements. import ( "runtime/internal/atomic" "unsafe" ) const debugSelect = false // Select case descriptor. // Known to compiler. // Changes here must also be made in src/cmd/internal/gc/select.go's scasetype. type scase struct { c *hchan // chan elem unsafe.Pointer // data element } var ( chansendpc = funcPC(chansend) chanrecvpc = funcPC(chanrecv) ) func selectsetpc(pc *uintptr) { *pc = getcallerpc() } func sellock(scases []scase, lockorder []uint16) { var c *hchan for _, o := range lockorder { c0 := scases[o].c if c0 != c { c = c0 lock(&c.lock) } } } func selunlock(scases []scase, lockorder []uint16) { // We must be very careful here to not touch sel after we have unlocked // the last lock, because sel can be freed right after the last unlock. // Consider the following situation. // First M calls runtime·park() in runtime·selectgo() passing the sel. // Once runtime·park() has unlocked the last lock, another M makes // the G that calls select runnable again and schedules it for execution. // When the G runs on another M, it locks all the locks and frees sel. // Now if the first M touches sel, it will access freed memory. for i := len(lockorder) - 1; i >= 0; i-- { c := scases[lockorder[i]].c if i > 0 && c == scases[lockorder[i-1]].c { continue // will unlock it on the next iteration } unlock(&c.lock) } } func selparkcommit(gp *g, _ unsafe.Pointer) bool { // There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. // Set activeStackChans here instead of before we try parking // because we could self-deadlock in stack growth on a // channel lock. gp.activeStackChans = true // Mark that it's safe for stack shrinking to occur now, // because any thread acquiring this G's stack for shrinking // is guaranteed to observe activeStackChans after this store. atomic.Store8(&gp.parkingOnChan, 0) // Make sure we unlock after setting activeStackChans and // unsetting parkingOnChan. The moment we unlock any of the // channel locks we risk gp getting readied by a channel operation // and so gp could continue running before everything before the // unlock is visible (even to gp itself). // This must not access gp's stack (see gopark). In // particular, it must not access the *hselect. That's okay, // because by the time this is called, gp.waiting has all // channels in lock order. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { if sg.c != lastc && lastc != nil { // As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple // sudogs may have the same channel, we unlock // only after we've passed the last instance // of a channel. unlock(&lastc.lock) } lastc = sg.c } if lastc != nil { unlock(&lastc.lock) } return true } func block() { gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever } // selectgo implements the select statement. // // cas0 points to an array of type [ncases]scase, and order0 points to // an array of type [2*ncases]uint16 where ncases must be <= 65536. // Both reside on the goroutine's stack (regardless of any escaping in // selectgo). // // For race detector builds, pc0 points to an array of type // [ncases]uintptr (also on the stack); for other builds, it's set to // nil. // // selectgo returns the index of the chosen scase, which matches the // ordinal position of its respective select{recv,send,default} call. // Also, if the chosen scase was a receive operation, it reports whether // a value was received. func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) { if debugSelect { print("select: cas0=", cas0, "\n") } // NOTE: In order to maintain a lean stack size, the number of scases // is capped at 65536. cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0)) order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0)) ncases := nsends + nrecvs scases := cas1[:ncases:ncases] pollorder := order1[:ncases:ncases] lockorder := order1[ncases:][:ncases:ncases] // NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler. // Even when raceenabled is true, there might be select // statements in packages compiled without -race (e.g., // ensureSigM in runtime/signal_unix.go). var pcs []uintptr if raceenabled && pc0 != nil { pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0)) pcs = pc1[:ncases:ncases] } casePC := func(casi int) uintptr { if pcs == nil { return 0 } return pcs[casi] } var t0 int64 if blockprofilerate > 0 { t0 = cputicks() } // The compiler rewrites selects that statically have // only 0 or 1 cases plus default into simpler constructs. // The only way we can end up with such small sel.ncase // values here is for a larger select in which most channels // have been nilled out. The general code handles those // cases correctly, and they are rare enough not to bother // optimizing (and needing to test). // generate permuted order norder := 0 for i := range scases { cas := &scases[i] // Omit cases without channels from the poll and lock orders. if cas.c == nil { cas.elem = nil // allow GC continue } j := fastrandn(uint32(norder + 1)) pollorder[norder] = pollorder[j] pollorder[j] = uint16(i) norder++ } pollorder = pollorder[:norder] lockorder = lockorder[:norder] // sort the cases by Hchan address to get the locking order. // simple heap sort, to guarantee n log n time and constant stack footprint. for i := range lockorder { j := i // Start with the pollorder to permute cases on the same channel. c := scases[pollorder[i]].c for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() { k := (j - 1) / 2 lockorder[j] = lockorder[k] j = k } lockorder[j] = pollorder[i] } for i := len(lockorder) - 1; i >= 0; i-- { o := lockorder[i] c := scases[o].c lockorder[i] = lockorder[0] j := 0 for { k := j*2 + 1 if k >= i { break } if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { k++ } if c.sortkey() < scases[lockorder[k]].c.sortkey() { lockorder[j] = lockorder[k] j = k continue } break } lockorder[j] = o } if debugSelect { for i := 0; i+1 < len(lockorder); i++ { if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") throw("select: broken sort") } } } // lock all the channels involved in the select sellock(scases, lockorder) var ( gp *g sg *sudog c *hchan k *scase sglist *sudog sgnext *sudog qp unsafe.Pointer nextp **sudog ) // pass 1 - look for something already waiting var casi int var cas *scase var caseSuccess bool var caseReleaseTime int64 = -1 var recvOK bool for _, casei := range pollorder { casi = int(casei) cas = &scases[casi] c = cas.c if casi >= nsends { sg = c.sendq.dequeue() if sg != nil { goto recv } if c.qcount > 0 { goto bufrecv } if c.closed != 0 { goto rclose } } else { if raceenabled { racereadpc(c.raceaddr(), casePC(casi), chansendpc) } if c.closed != 0 { goto sclose } sg = c.recvq.dequeue() if sg != nil { goto send } if c.qcount < c.dataqsiz { goto bufsend } } } if !block { selunlock(scases, lockorder) casi = -1 goto retc } // pass 2 - enqueue on all chans gp = getg() if gp.waiting != nil { throw("gp.waiting != nil") } nextp = &gp.waiting for _, casei := range lockorder { casi = int(casei) cas = &scases[casi] c = cas.c sg := acquireSudog() sg.g = gp sg.isSelect = true // No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. sg.elem = cas.elem sg.releasetime = 0 if t0 != 0 { sg.releasetime = -1 } sg.c = c // Construct waiting list in lock order. *nextp = sg nextp = &sg.waitlink if casi < nsends { c.sendq.enqueue(sg) } else { c.recvq.enqueue(sg) } } // wait for someone to wake us up gp.param = nil // Signal to anyone trying to shrink our stack that we're about // to park on a channel. The window between when this G's status // changes and when we set gp.activeStackChans is not safe for // stack shrinking. atomic.Store8(&gp.parkingOnChan, 1) gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1) gp.activeStackChans = false sellock(scases, lockorder) gp.selectDone = 0 sg = (*sudog)(gp.param) gp.param = nil // pass 3 - dequeue from unsuccessful chans // otherwise they stack up on quiet channels // record the successful case, if any. // We singly-linked up the SudoGs in lock order. casi = -1 cas = nil caseSuccess = false sglist = gp.waiting // Clear all elem before unlinking from gp.waiting. for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { sg1.isSelect = false sg1.elem = nil sg1.c = nil } gp.waiting = nil for _, casei := range lockorder { k = &scases[casei] if sg == sglist { // sg has already been dequeued by the G that woke us up. casi = int(casei) cas = k caseSuccess = sglist.success if sglist.releasetime > 0 { caseReleaseTime = sglist.releasetime } } else { c = k.c if int(casei) < nsends { c.sendq.dequeueSudoG(sglist) } else { c.recvq.dequeueSudoG(sglist) } } sgnext = sglist.waitlink sglist.waitlink = nil releaseSudog(sglist) sglist = sgnext } if cas == nil { throw("selectgo: bad wakeup") } c = cas.c if debugSelect { print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n") } if casi < nsends { if !caseSuccess { goto sclose } } else { recvOK = caseSuccess } if raceenabled { if casi < nsends { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } else if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } } if msanenabled { if casi < nsends { msanread(cas.elem, c.elemtype.size) } else if cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } } selunlock(scases, lockorder) goto retc bufrecv: // can receive from buffer if raceenabled { if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } racereleaseacquire(chanbuf(c, c.recvx)) } if msanenabled && cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } recvOK = true qp = chanbuf(c, c.recvx) if cas.elem != nil { typedmemmove(c.elemtype, cas.elem, qp) } typedmemclr(c.elemtype, qp) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.qcount-- selunlock(scases, lockorder) goto retc bufsend: // can send to buffer if raceenabled { racereleaseacquire(chanbuf(c, c.sendx)) raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) c.sendx++ if c.sendx == c.dataqsiz { c.sendx = 0 } c.qcount++ selunlock(scases, lockorder) goto retc recv: // can receive from sleeping sender (sg) recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncrecv: cas0=", cas0, " c=", c, "\n") } recvOK = true goto retc rclose: // read at end of closed channel selunlock(scases, lockorder) recvOK = false if cas.elem != nil { typedmemclr(c.elemtype, cas.elem) } if raceenabled { raceacquire(c.raceaddr()) } goto retc send: // can send to a sleeping receiver (sg) if raceenabled { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect {
oto retc retc: if caseReleaseTime > 0 { blockevent(caseReleaseTime-t0, 1) } return casi, recvOK sclose: // send on closed channel selunlock(scases, lockorder) panic(plainError("send on closed channel")) } func (c *hchan) sortkey() uintptr { return uintptr(unsafe.Pointer(c)) } // A runtimeSelect is a single case passed to rselect. // This must match ../reflect/value.go:/runtimeSelect type runtimeSelect struct { dir selectDir typ unsafe.Pointer // channel type (not used here) ch *hchan // channel val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) } // These values must match ../reflect/value.go:/SelectDir. type selectDir int const ( _ selectDir = iota selectSend // case Chan <- Send selectRecv // case <-Chan: selectDefault // default ) //go:linkname reflect_rselect reflect.rselect func reflect_rselect(cases []runtimeSelect) (int, bool) { if len(cases) == 0 { block() } sel := make([]scase, len(cases)) orig := make([]int, len(cases)) nsends, nrecvs := 0, 0 dflt := -1 for i, rc := range cases { var j int switch rc.dir { case selectDefault: dflt = i continue case selectSend: j = nsends nsends++ case selectRecv: nrecvs++ j = len(cases) - nrecvs } sel[j] = scase{c: rc.ch, elem: rc.val} orig[j] = i } // Only a default case. if nsends+nrecvs == 0 { return dflt, false } // Compact sel and orig if necessary. if nsends+nrecvs < len(cases) { copy(sel[nsends:], sel[len(cases)-nrecvs:]) copy(orig[nsends:], orig[len(cases)-nrecvs:]) } order := make([]uint16, 2*(nsends+nrecvs)) var pc0 *uintptr if raceenabled { pcs := make([]uintptr, nsends+nrecvs) for i := range pcs { selectsetpc(&pcs[i]) } pc0 = &pcs[0] } chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1) // Translate chosen back to caller's ordering. if chosen < 0 { chosen = dflt } else { chosen = orig[chosen] } return chosen, recvOK } func (q *waitq) dequeueSudoG(sgp *sudog) { x := sgp.prev y := sgp.next if x != nil { if y != nil { // middle of queue x.next = y y.prev = x sgp.next = nil sgp.prev = nil return } // end of queue x.next = nil q.last = x sgp.prev = nil return } if y != nil { // start of queue y.prev = nil q.first = y sgp.next = nil return } // x==y==nil. Either sgp is the only element in the queue, // or it has already been removed. Use q.first to disambiguate. if q.first == sgp { q.first = nil q.last = nil } }
print("syncsend: cas0=", cas0, " c=", c, "\n") } g
conditional_block
select.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime // This file contains the implementation of Go select statements. import ( "runtime/internal/atomic" "unsafe" ) const debugSelect = false // Select case descriptor. // Known to compiler. // Changes here must also be made in src/cmd/internal/gc/select.go's scasetype. type scase struct { c *hchan // chan elem unsafe.Pointer // data element } var ( chansendpc = funcPC(chansend) chanrecvpc = funcPC(chanrecv) ) func selectsetpc(pc *uintptr) { *pc = getcallerpc() } func sellock(scases []scase, lockorder []uint16) { var c *hchan for _, o := range lockorder { c0 := scases[o].c if c0 != c { c = c0 lock(&c.lock) } } } func selunlock(scases []scase, lockorder []uint16) { // We must be very careful here to not touch sel after we have unlocked // the last lock, because sel can be freed right after the last unlock. // Consider the following situation. // First M calls runtime·park() in runtime·selectgo() passing the sel. // Once runtime·park() has unlocked the last lock, another M makes // the G that calls select runnable again and schedules it for execution. // When the G runs on another M, it locks all the locks and frees sel. // Now if the first M touches sel, it will access freed memory. for i := len(lockorder) - 1; i >= 0; i-- { c := scases[lockorder[i]].c if i > 0 && c == scases[lockorder[i-1]].c { continue // will unlock it on the next iteration } unlock(&c.lock) } } func selparkcommit(gp *g, _ unsafe.Pointer) bool {
unc block() { gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever } // selectgo implements the select statement. // // cas0 points to an array of type [ncases]scase, and order0 points to // an array of type [2*ncases]uint16 where ncases must be <= 65536. // Both reside on the goroutine's stack (regardless of any escaping in // selectgo). // // For race detector builds, pc0 points to an array of type // [ncases]uintptr (also on the stack); for other builds, it's set to // nil. // // selectgo returns the index of the chosen scase, which matches the // ordinal position of its respective select{recv,send,default} call. // Also, if the chosen scase was a receive operation, it reports whether // a value was received. func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) { if debugSelect { print("select: cas0=", cas0, "\n") } // NOTE: In order to maintain a lean stack size, the number of scases // is capped at 65536. cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0)) order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0)) ncases := nsends + nrecvs scases := cas1[:ncases:ncases] pollorder := order1[:ncases:ncases] lockorder := order1[ncases:][:ncases:ncases] // NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler. // Even when raceenabled is true, there might be select // statements in packages compiled without -race (e.g., // ensureSigM in runtime/signal_unix.go). var pcs []uintptr if raceenabled && pc0 != nil { pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0)) pcs = pc1[:ncases:ncases] } casePC := func(casi int) uintptr { if pcs == nil { return 0 } return pcs[casi] } var t0 int64 if blockprofilerate > 0 { t0 = cputicks() } // The compiler rewrites selects that statically have // only 0 or 1 cases plus default into simpler constructs. // The only way we can end up with such small sel.ncase // values here is for a larger select in which most channels // have been nilled out. The general code handles those // cases correctly, and they are rare enough not to bother // optimizing (and needing to test). // generate permuted order norder := 0 for i := range scases { cas := &scases[i] // Omit cases without channels from the poll and lock orders. if cas.c == nil { cas.elem = nil // allow GC continue } j := fastrandn(uint32(norder + 1)) pollorder[norder] = pollorder[j] pollorder[j] = uint16(i) norder++ } pollorder = pollorder[:norder] lockorder = lockorder[:norder] // sort the cases by Hchan address to get the locking order. // simple heap sort, to guarantee n log n time and constant stack footprint. for i := range lockorder { j := i // Start with the pollorder to permute cases on the same channel. c := scases[pollorder[i]].c for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() { k := (j - 1) / 2 lockorder[j] = lockorder[k] j = k } lockorder[j] = pollorder[i] } for i := len(lockorder) - 1; i >= 0; i-- { o := lockorder[i] c := scases[o].c lockorder[i] = lockorder[0] j := 0 for { k := j*2 + 1 if k >= i { break } if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { k++ } if c.sortkey() < scases[lockorder[k]].c.sortkey() { lockorder[j] = lockorder[k] j = k continue } break } lockorder[j] = o } if debugSelect { for i := 0; i+1 < len(lockorder); i++ { if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") throw("select: broken sort") } } } // lock all the channels involved in the select sellock(scases, lockorder) var ( gp *g sg *sudog c *hchan k *scase sglist *sudog sgnext *sudog qp unsafe.Pointer nextp **sudog ) // pass 1 - look for something already waiting var casi int var cas *scase var caseSuccess bool var caseReleaseTime int64 = -1 var recvOK bool for _, casei := range pollorder { casi = int(casei) cas = &scases[casi] c = cas.c if casi >= nsends { sg = c.sendq.dequeue() if sg != nil { goto recv } if c.qcount > 0 { goto bufrecv } if c.closed != 0 { goto rclose } } else { if raceenabled { racereadpc(c.raceaddr(), casePC(casi), chansendpc) } if c.closed != 0 { goto sclose } sg = c.recvq.dequeue() if sg != nil { goto send } if c.qcount < c.dataqsiz { goto bufsend } } } if !block { selunlock(scases, lockorder) casi = -1 goto retc } // pass 2 - enqueue on all chans gp = getg() if gp.waiting != nil { throw("gp.waiting != nil") } nextp = &gp.waiting for _, casei := range lockorder { casi = int(casei) cas = &scases[casi] c = cas.c sg := acquireSudog() sg.g = gp sg.isSelect = true // No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. sg.elem = cas.elem sg.releasetime = 0 if t0 != 0 { sg.releasetime = -1 } sg.c = c // Construct waiting list in lock order. *nextp = sg nextp = &sg.waitlink if casi < nsends { c.sendq.enqueue(sg) } else { c.recvq.enqueue(sg) } } // wait for someone to wake us up gp.param = nil // Signal to anyone trying to shrink our stack that we're about // to park on a channel. The window between when this G's status // changes and when we set gp.activeStackChans is not safe for // stack shrinking. atomic.Store8(&gp.parkingOnChan, 1) gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1) gp.activeStackChans = false sellock(scases, lockorder) gp.selectDone = 0 sg = (*sudog)(gp.param) gp.param = nil // pass 3 - dequeue from unsuccessful chans // otherwise they stack up on quiet channels // record the successful case, if any. // We singly-linked up the SudoGs in lock order. casi = -1 cas = nil caseSuccess = false sglist = gp.waiting // Clear all elem before unlinking from gp.waiting. for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { sg1.isSelect = false sg1.elem = nil sg1.c = nil } gp.waiting = nil for _, casei := range lockorder { k = &scases[casei] if sg == sglist { // sg has already been dequeued by the G that woke us up. casi = int(casei) cas = k caseSuccess = sglist.success if sglist.releasetime > 0 { caseReleaseTime = sglist.releasetime } } else { c = k.c if int(casei) < nsends { c.sendq.dequeueSudoG(sglist) } else { c.recvq.dequeueSudoG(sglist) } } sgnext = sglist.waitlink sglist.waitlink = nil releaseSudog(sglist) sglist = sgnext } if cas == nil { throw("selectgo: bad wakeup") } c = cas.c if debugSelect { print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n") } if casi < nsends { if !caseSuccess { goto sclose } } else { recvOK = caseSuccess } if raceenabled { if casi < nsends { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } else if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } } if msanenabled { if casi < nsends { msanread(cas.elem, c.elemtype.size) } else if cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } } selunlock(scases, lockorder) goto retc bufrecv: // can receive from buffer if raceenabled { if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } racereleaseacquire(chanbuf(c, c.recvx)) } if msanenabled && cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } recvOK = true qp = chanbuf(c, c.recvx) if cas.elem != nil { typedmemmove(c.elemtype, cas.elem, qp) } typedmemclr(c.elemtype, qp) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.qcount-- selunlock(scases, lockorder) goto retc bufsend: // can send to buffer if raceenabled { racereleaseacquire(chanbuf(c, c.sendx)) raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) c.sendx++ if c.sendx == c.dataqsiz { c.sendx = 0 } c.qcount++ selunlock(scases, lockorder) goto retc recv: // can receive from sleeping sender (sg) recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncrecv: cas0=", cas0, " c=", c, "\n") } recvOK = true goto retc rclose: // read at end of closed channel selunlock(scases, lockorder) recvOK = false if cas.elem != nil { typedmemclr(c.elemtype, cas.elem) } if raceenabled { raceacquire(c.raceaddr()) } goto retc send: // can send to a sleeping receiver (sg) if raceenabled { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncsend: cas0=", cas0, " c=", c, "\n") } goto retc retc: if caseReleaseTime > 0 { blockevent(caseReleaseTime-t0, 1) } return casi, recvOK sclose: // send on closed channel selunlock(scases, lockorder) panic(plainError("send on closed channel")) } func (c *hchan) sortkey() uintptr { return uintptr(unsafe.Pointer(c)) } // A runtimeSelect is a single case passed to rselect. // This must match ../reflect/value.go:/runtimeSelect type runtimeSelect struct { dir selectDir typ unsafe.Pointer // channel type (not used here) ch *hchan // channel val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) } // These values must match ../reflect/value.go:/SelectDir. type selectDir int const ( _ selectDir = iota selectSend // case Chan <- Send selectRecv // case <-Chan: selectDefault // default ) //go:linkname reflect_rselect reflect.rselect func reflect_rselect(cases []runtimeSelect) (int, bool) { if len(cases) == 0 { block() } sel := make([]scase, len(cases)) orig := make([]int, len(cases)) nsends, nrecvs := 0, 0 dflt := -1 for i, rc := range cases { var j int switch rc.dir { case selectDefault: dflt = i continue case selectSend: j = nsends nsends++ case selectRecv: nrecvs++ j = len(cases) - nrecvs } sel[j] = scase{c: rc.ch, elem: rc.val} orig[j] = i } // Only a default case. if nsends+nrecvs == 0 { return dflt, false } // Compact sel and orig if necessary. if nsends+nrecvs < len(cases) { copy(sel[nsends:], sel[len(cases)-nrecvs:]) copy(orig[nsends:], orig[len(cases)-nrecvs:]) } order := make([]uint16, 2*(nsends+nrecvs)) var pc0 *uintptr if raceenabled { pcs := make([]uintptr, nsends+nrecvs) for i := range pcs { selectsetpc(&pcs[i]) } pc0 = &pcs[0] } chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1) // Translate chosen back to caller's ordering. if chosen < 0 { chosen = dflt } else { chosen = orig[chosen] } return chosen, recvOK } func (q *waitq) dequeueSudoG(sgp *sudog) { x := sgp.prev y := sgp.next if x != nil { if y != nil { // middle of queue x.next = y y.prev = x sgp.next = nil sgp.prev = nil return } // end of queue x.next = nil q.last = x sgp.prev = nil return } if y != nil { // start of queue y.prev = nil q.first = y sgp.next = nil return } // x==y==nil. Either sgp is the only element in the queue, // or it has already been removed. Use q.first to disambiguate. if q.first == sgp { q.first = nil q.last = nil } }
// There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. // Set activeStackChans here instead of before we try parking // because we could self-deadlock in stack growth on a // channel lock. gp.activeStackChans = true // Mark that it's safe for stack shrinking to occur now, // because any thread acquiring this G's stack for shrinking // is guaranteed to observe activeStackChans after this store. atomic.Store8(&gp.parkingOnChan, 0) // Make sure we unlock after setting activeStackChans and // unsetting parkingOnChan. The moment we unlock any of the // channel locks we risk gp getting readied by a channel operation // and so gp could continue running before everything before the // unlock is visible (even to gp itself). // This must not access gp's stack (see gopark). In // particular, it must not access the *hselect. That's okay, // because by the time this is called, gp.waiting has all // channels in lock order. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { if sg.c != lastc && lastc != nil { // As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple // sudogs may have the same channel, we unlock // only after we've passed the last instance // of a channel. unlock(&lastc.lock) } lastc = sg.c } if lastc != nil { unlock(&lastc.lock) } return true } f
identifier_body
select.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime // This file contains the implementation of Go select statements. import ( "runtime/internal/atomic" "unsafe" ) const debugSelect = false // Select case descriptor. // Known to compiler. // Changes here must also be made in src/cmd/internal/gc/select.go's scasetype. type scase struct { c *hchan // chan elem unsafe.Pointer // data element } var ( chansendpc = funcPC(chansend) chanrecvpc = funcPC(chanrecv) ) func selectsetpc(pc *uintptr) { *pc = getcallerpc() } func sellock(scases []scase, lockorder []uint16) { var c *hchan for _, o := range lockorder { c0 := scases[o].c if c0 != c { c = c0 lock(&c.lock) } } } func selunlock(scases []scase, lockorder []uint16) { // We must be very careful here to not touch sel after we have unlocked // the last lock, because sel can be freed right after the last unlock. // Consider the following situation. // First M calls runtime·park() in runtime·selectgo() passing the sel. // Once runtime·park() has unlocked the last lock, another M makes // the G that calls select runnable again and schedules it for execution. // When the G runs on another M, it locks all the locks and frees sel. // Now if the first M touches sel, it will access freed memory. for i := len(lockorder) - 1; i >= 0; i-- { c := scases[lockorder[i]].c if i > 0 && c == scases[lockorder[i-1]].c { continue // will unlock it on the next iteration } unlock(&c.lock) } } func selparkcommit(gp *g, _ unsafe.Pointer) bool { // There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. // Set activeStackChans here instead of before we try parking // because we could self-deadlock in stack growth on a // channel lock. gp.activeStackChans = true // Mark that it's safe for stack shrinking to occur now, // because any thread acquiring this G's stack for shrinking // is guaranteed to observe activeStackChans after this store. atomic.Store8(&gp.parkingOnChan, 0) // Make sure we unlock after setting activeStackChans and // unsetting parkingOnChan. The moment we unlock any of the // channel locks we risk gp getting readied by a channel operation // and so gp could continue running before everything before the // unlock is visible (even to gp itself). // This must not access gp's stack (see gopark). In // particular, it must not access the *hselect. That's okay, // because by the time this is called, gp.waiting has all // channels in lock order. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { if sg.c != lastc && lastc != nil { // As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple // sudogs may have the same channel, we unlock // only after we've passed the last instance // of a channel. unlock(&lastc.lock) } lastc = sg.c } if lastc != nil { unlock(&lastc.lock) } return true } func block() { gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever } // selectgo implements the select statement. // // cas0 points to an array of type [ncases]scase, and order0 points to // an array of type [2*ncases]uint16 where ncases must be <= 65536. // Both reside on the goroutine's stack (regardless of any escaping in // selectgo). // // For race detector builds, pc0 points to an array of type // [ncases]uintptr (also on the stack); for other builds, it's set to // nil. // // selectgo returns the index of the chosen scase, which matches the // ordinal position of its respective select{recv,send,default} call. // Also, if the chosen scase was a receive operation, it reports whether // a value was received. func sel
s0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) { if debugSelect { print("select: cas0=", cas0, "\n") } // NOTE: In order to maintain a lean stack size, the number of scases // is capped at 65536. cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0)) order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0)) ncases := nsends + nrecvs scases := cas1[:ncases:ncases] pollorder := order1[:ncases:ncases] lockorder := order1[ncases:][:ncases:ncases] // NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler. // Even when raceenabled is true, there might be select // statements in packages compiled without -race (e.g., // ensureSigM in runtime/signal_unix.go). var pcs []uintptr if raceenabled && pc0 != nil { pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0)) pcs = pc1[:ncases:ncases] } casePC := func(casi int) uintptr { if pcs == nil { return 0 } return pcs[casi] } var t0 int64 if blockprofilerate > 0 { t0 = cputicks() } // The compiler rewrites selects that statically have // only 0 or 1 cases plus default into simpler constructs. // The only way we can end up with such small sel.ncase // values here is for a larger select in which most channels // have been nilled out. The general code handles those // cases correctly, and they are rare enough not to bother // optimizing (and needing to test). // generate permuted order norder := 0 for i := range scases { cas := &scases[i] // Omit cases without channels from the poll and lock orders. if cas.c == nil { cas.elem = nil // allow GC continue } j := fastrandn(uint32(norder + 1)) pollorder[norder] = pollorder[j] pollorder[j] = uint16(i) norder++ } pollorder = pollorder[:norder] lockorder = lockorder[:norder] // sort the cases by Hchan address to get the locking order. // simple heap sort, to guarantee n log n time and constant stack footprint. for i := range lockorder { j := i // Start with the pollorder to permute cases on the same channel. c := scases[pollorder[i]].c for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() { k := (j - 1) / 2 lockorder[j] = lockorder[k] j = k } lockorder[j] = pollorder[i] } for i := len(lockorder) - 1; i >= 0; i-- { o := lockorder[i] c := scases[o].c lockorder[i] = lockorder[0] j := 0 for { k := j*2 + 1 if k >= i { break } if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { k++ } if c.sortkey() < scases[lockorder[k]].c.sortkey() { lockorder[j] = lockorder[k] j = k continue } break } lockorder[j] = o } if debugSelect { for i := 0; i+1 < len(lockorder); i++ { if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") throw("select: broken sort") } } } // lock all the channels involved in the select sellock(scases, lockorder) var ( gp *g sg *sudog c *hchan k *scase sglist *sudog sgnext *sudog qp unsafe.Pointer nextp **sudog ) // pass 1 - look for something already waiting var casi int var cas *scase var caseSuccess bool var caseReleaseTime int64 = -1 var recvOK bool for _, casei := range pollorder { casi = int(casei) cas = &scases[casi] c = cas.c if casi >= nsends { sg = c.sendq.dequeue() if sg != nil { goto recv } if c.qcount > 0 { goto bufrecv } if c.closed != 0 { goto rclose } } else { if raceenabled { racereadpc(c.raceaddr(), casePC(casi), chansendpc) } if c.closed != 0 { goto sclose } sg = c.recvq.dequeue() if sg != nil { goto send } if c.qcount < c.dataqsiz { goto bufsend } } } if !block { selunlock(scases, lockorder) casi = -1 goto retc } // pass 2 - enqueue on all chans gp = getg() if gp.waiting != nil { throw("gp.waiting != nil") } nextp = &gp.waiting for _, casei := range lockorder { casi = int(casei) cas = &scases[casi] c = cas.c sg := acquireSudog() sg.g = gp sg.isSelect = true // No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. sg.elem = cas.elem sg.releasetime = 0 if t0 != 0 { sg.releasetime = -1 } sg.c = c // Construct waiting list in lock order. *nextp = sg nextp = &sg.waitlink if casi < nsends { c.sendq.enqueue(sg) } else { c.recvq.enqueue(sg) } } // wait for someone to wake us up gp.param = nil // Signal to anyone trying to shrink our stack that we're about // to park on a channel. The window between when this G's status // changes and when we set gp.activeStackChans is not safe for // stack shrinking. atomic.Store8(&gp.parkingOnChan, 1) gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1) gp.activeStackChans = false sellock(scases, lockorder) gp.selectDone = 0 sg = (*sudog)(gp.param) gp.param = nil // pass 3 - dequeue from unsuccessful chans // otherwise they stack up on quiet channels // record the successful case, if any. // We singly-linked up the SudoGs in lock order. casi = -1 cas = nil caseSuccess = false sglist = gp.waiting // Clear all elem before unlinking from gp.waiting. for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { sg1.isSelect = false sg1.elem = nil sg1.c = nil } gp.waiting = nil for _, casei := range lockorder { k = &scases[casei] if sg == sglist { // sg has already been dequeued by the G that woke us up. casi = int(casei) cas = k caseSuccess = sglist.success if sglist.releasetime > 0 { caseReleaseTime = sglist.releasetime } } else { c = k.c if int(casei) < nsends { c.sendq.dequeueSudoG(sglist) } else { c.recvq.dequeueSudoG(sglist) } } sgnext = sglist.waitlink sglist.waitlink = nil releaseSudog(sglist) sglist = sgnext } if cas == nil { throw("selectgo: bad wakeup") } c = cas.c if debugSelect { print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n") } if casi < nsends { if !caseSuccess { goto sclose } } else { recvOK = caseSuccess } if raceenabled { if casi < nsends { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } else if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } } if msanenabled { if casi < nsends { msanread(cas.elem, c.elemtype.size) } else if cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } } selunlock(scases, lockorder) goto retc bufrecv: // can receive from buffer if raceenabled { if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } racereleaseacquire(chanbuf(c, c.recvx)) } if msanenabled && cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } recvOK = true qp = chanbuf(c, c.recvx) if cas.elem != nil { typedmemmove(c.elemtype, cas.elem, qp) } typedmemclr(c.elemtype, qp) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.qcount-- selunlock(scases, lockorder) goto retc bufsend: // can send to buffer if raceenabled { racereleaseacquire(chanbuf(c, c.sendx)) raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) c.sendx++ if c.sendx == c.dataqsiz { c.sendx = 0 } c.qcount++ selunlock(scases, lockorder) goto retc recv: // can receive from sleeping sender (sg) recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncrecv: cas0=", cas0, " c=", c, "\n") } recvOK = true goto retc rclose: // read at end of closed channel selunlock(scases, lockorder) recvOK = false if cas.elem != nil { typedmemclr(c.elemtype, cas.elem) } if raceenabled { raceacquire(c.raceaddr()) } goto retc send: // can send to a sleeping receiver (sg) if raceenabled { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncsend: cas0=", cas0, " c=", c, "\n") } goto retc retc: if caseReleaseTime > 0 { blockevent(caseReleaseTime-t0, 1) } return casi, recvOK sclose: // send on closed channel selunlock(scases, lockorder) panic(plainError("send on closed channel")) } func (c *hchan) sortkey() uintptr { return uintptr(unsafe.Pointer(c)) } // A runtimeSelect is a single case passed to rselect. // This must match ../reflect/value.go:/runtimeSelect type runtimeSelect struct { dir selectDir typ unsafe.Pointer // channel type (not used here) ch *hchan // channel val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) } // These values must match ../reflect/value.go:/SelectDir. type selectDir int const ( _ selectDir = iota selectSend // case Chan <- Send selectRecv // case <-Chan: selectDefault // default ) //go:linkname reflect_rselect reflect.rselect func reflect_rselect(cases []runtimeSelect) (int, bool) { if len(cases) == 0 { block() } sel := make([]scase, len(cases)) orig := make([]int, len(cases)) nsends, nrecvs := 0, 0 dflt := -1 for i, rc := range cases { var j int switch rc.dir { case selectDefault: dflt = i continue case selectSend: j = nsends nsends++ case selectRecv: nrecvs++ j = len(cases) - nrecvs } sel[j] = scase{c: rc.ch, elem: rc.val} orig[j] = i } // Only a default case. if nsends+nrecvs == 0 { return dflt, false } // Compact sel and orig if necessary. if nsends+nrecvs < len(cases) { copy(sel[nsends:], sel[len(cases)-nrecvs:]) copy(orig[nsends:], orig[len(cases)-nrecvs:]) } order := make([]uint16, 2*(nsends+nrecvs)) var pc0 *uintptr if raceenabled { pcs := make([]uintptr, nsends+nrecvs) for i := range pcs { selectsetpc(&pcs[i]) } pc0 = &pcs[0] } chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1) // Translate chosen back to caller's ordering. if chosen < 0 { chosen = dflt } else { chosen = orig[chosen] } return chosen, recvOK } func (q *waitq) dequeueSudoG(sgp *sudog) { x := sgp.prev y := sgp.next if x != nil { if y != nil { // middle of queue x.next = y y.prev = x sgp.next = nil sgp.prev = nil return } // end of queue x.next = nil q.last = x sgp.prev = nil return } if y != nil { // start of queue y.prev = nil q.first = y sgp.next = nil return } // x==y==nil. Either sgp is the only element in the queue, // or it has already been removed. Use q.first to disambiguate. if q.first == sgp { q.first = nil q.last = nil } }
ectgo(ca
identifier_name
select.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime // This file contains the implementation of Go select statements. import ( "runtime/internal/atomic" "unsafe" ) const debugSelect = false // Select case descriptor. // Known to compiler. // Changes here must also be made in src/cmd/internal/gc/select.go's scasetype. type scase struct { c *hchan // chan elem unsafe.Pointer // data element } var ( chansendpc = funcPC(chansend) chanrecvpc = funcPC(chanrecv) ) func selectsetpc(pc *uintptr) { *pc = getcallerpc() } func sellock(scases []scase, lockorder []uint16) { var c *hchan for _, o := range lockorder { c0 := scases[o].c if c0 != c { c = c0 lock(&c.lock) } } } func selunlock(scases []scase, lockorder []uint16) { // We must be very careful here to not touch sel after we have unlocked // the last lock, because sel can be freed right after the last unlock. // Consider the following situation. // First M calls runtime·park() in runtime·selectgo() passing the sel. // Once runtime·park() has unlocked the last lock, another M makes // the G that calls select runnable again and schedules it for execution. // When the G runs on another M, it locks all the locks and frees sel. // Now if the first M touches sel, it will access freed memory. for i := len(lockorder) - 1; i >= 0; i-- { c := scases[lockorder[i]].c if i > 0 && c == scases[lockorder[i-1]].c { continue // will unlock it on the next iteration } unlock(&c.lock) } } func selparkcommit(gp *g, _ unsafe.Pointer) bool { // There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. // Set activeStackChans here instead of before we try parking // because we could self-deadlock in stack growth on a // channel lock. gp.activeStackChans = true // Mark that it's safe for stack shrinking to occur now, // because any thread acquiring this G's stack for shrinking // is guaranteed to observe activeStackChans after this store. atomic.Store8(&gp.parkingOnChan, 0) // Make sure we unlock after setting activeStackChans and // unsetting parkingOnChan. The moment we unlock any of the // channel locks we risk gp getting readied by a channel operation // and so gp could continue running before everything before the // unlock is visible (even to gp itself). // This must not access gp's stack (see gopark). In // particular, it must not access the *hselect. That's okay, // because by the time this is called, gp.waiting has all // channels in lock order. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { if sg.c != lastc && lastc != nil { // As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple // sudogs may have the same channel, we unlock // only after we've passed the last instance // of a channel. unlock(&lastc.lock) } lastc = sg.c } if lastc != nil { unlock(&lastc.lock) } return true } func block() { gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever } // selectgo implements the select statement. // // cas0 points to an array of type [ncases]scase, and order0 points to // an array of type [2*ncases]uint16 where ncases must be <= 65536. // Both reside on the goroutine's stack (regardless of any escaping in // selectgo). // // For race detector builds, pc0 points to an array of type // [ncases]uintptr (also on the stack); for other builds, it's set to // nil. // // selectgo returns the index of the chosen scase, which matches the // ordinal position of its respective select{recv,send,default} call. // Also, if the chosen scase was a receive operation, it reports whether // a value was received. func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) { if debugSelect { print("select: cas0=", cas0, "\n") } // NOTE: In order to maintain a lean stack size, the number of scases // is capped at 65536. cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0)) order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0)) ncases := nsends + nrecvs scases := cas1[:ncases:ncases] pollorder := order1[:ncases:ncases] lockorder := order1[ncases:][:ncases:ncases] // NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler. // Even when raceenabled is true, there might be select // statements in packages compiled without -race (e.g., // ensureSigM in runtime/signal_unix.go). var pcs []uintptr if raceenabled && pc0 != nil { pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0)) pcs = pc1[:ncases:ncases] } casePC := func(casi int) uintptr { if pcs == nil { return 0 } return pcs[casi] } var t0 int64 if blockprofilerate > 0 { t0 = cputicks() } // The compiler rewrites selects that statically have // only 0 or 1 cases plus default into simpler constructs. // The only way we can end up with such small sel.ncase // values here is for a larger select in which most channels // have been nilled out. The general code handles those // cases correctly, and they are rare enough not to bother // optimizing (and needing to test). // generate permuted order norder := 0 for i := range scases { cas := &scases[i] // Omit cases without channels from the poll and lock orders. if cas.c == nil { cas.elem = nil // allow GC continue } j := fastrandn(uint32(norder + 1)) pollorder[norder] = pollorder[j] pollorder[j] = uint16(i) norder++ } pollorder = pollorder[:norder] lockorder = lockorder[:norder] // sort the cases by Hchan address to get the locking order. // simple heap sort, to guarantee n log n time and constant stack footprint. for i := range lockorder { j := i // Start with the pollorder to permute cases on the same channel. c := scases[pollorder[i]].c for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() { k := (j - 1) / 2 lockorder[j] = lockorder[k] j = k } lockorder[j] = pollorder[i] } for i := len(lockorder) - 1; i >= 0; i-- { o := lockorder[i] c := scases[o].c lockorder[i] = lockorder[0] j := 0 for { k := j*2 + 1 if k >= i { break } if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { k++ } if c.sortkey() < scases[lockorder[k]].c.sortkey() { lockorder[j] = lockorder[k] j = k continue } break } lockorder[j] = o } if debugSelect { for i := 0; i+1 < len(lockorder); i++ { if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") throw("select: broken sort") } } } // lock all the channels involved in the select sellock(scases, lockorder) var ( gp *g sg *sudog c *hchan k *scase sglist *sudog sgnext *sudog qp unsafe.Pointer nextp **sudog ) // pass 1 - look for something already waiting var casi int var cas *scase var caseSuccess bool var caseReleaseTime int64 = -1 var recvOK bool for _, casei := range pollorder { casi = int(casei) cas = &scases[casi] c = cas.c if casi >= nsends { sg = c.sendq.dequeue() if sg != nil { goto recv } if c.qcount > 0 { goto bufrecv } if c.closed != 0 { goto rclose } } else { if raceenabled { racereadpc(c.raceaddr(), casePC(casi), chansendpc) } if c.closed != 0 { goto sclose } sg = c.recvq.dequeue() if sg != nil { goto send } if c.qcount < c.dataqsiz { goto bufsend } } } if !block { selunlock(scases, lockorder) casi = -1 goto retc } // pass 2 - enqueue on all chans gp = getg() if gp.waiting != nil { throw("gp.waiting != nil") } nextp = &gp.waiting for _, casei := range lockorder { casi = int(casei) cas = &scases[casi] c = cas.c sg := acquireSudog() sg.g = gp sg.isSelect = true // No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. sg.elem = cas.elem sg.releasetime = 0 if t0 != 0 { sg.releasetime = -1 } sg.c = c // Construct waiting list in lock order. *nextp = sg nextp = &sg.waitlink if casi < nsends { c.sendq.enqueue(sg) } else { c.recvq.enqueue(sg) } } // wait for someone to wake us up gp.param = nil // Signal to anyone trying to shrink our stack that we're about // to park on a channel. The window between when this G's status // changes and when we set gp.activeStackChans is not safe for // stack shrinking. atomic.Store8(&gp.parkingOnChan, 1) gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1) gp.activeStackChans = false sellock(scases, lockorder) gp.selectDone = 0 sg = (*sudog)(gp.param) gp.param = nil // pass 3 - dequeue from unsuccessful chans // otherwise they stack up on quiet channels // record the successful case, if any. // We singly-linked up the SudoGs in lock order. casi = -1 cas = nil caseSuccess = false sglist = gp.waiting // Clear all elem before unlinking from gp.waiting. for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { sg1.isSelect = false sg1.elem = nil sg1.c = nil } gp.waiting = nil for _, casei := range lockorder { k = &scases[casei] if sg == sglist { // sg has already been dequeued by the G that woke us up. casi = int(casei) cas = k caseSuccess = sglist.success if sglist.releasetime > 0 { caseReleaseTime = sglist.releasetime } } else { c = k.c if int(casei) < nsends { c.sendq.dequeueSudoG(sglist) } else { c.recvq.dequeueSudoG(sglist) } } sgnext = sglist.waitlink sglist.waitlink = nil releaseSudog(sglist) sglist = sgnext } if cas == nil { throw("selectgo: bad wakeup") } c = cas.c if debugSelect { print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n") } if casi < nsends { if !caseSuccess { goto sclose } } else { recvOK = caseSuccess } if raceenabled { if casi < nsends { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } else if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } } if msanenabled { if casi < nsends { msanread(cas.elem, c.elemtype.size) } else if cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } } selunlock(scases, lockorder) goto retc bufrecv: // can receive from buffer if raceenabled { if cas.elem != nil { raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) } racereleaseacquire(chanbuf(c, c.recvx)) } if msanenabled && cas.elem != nil { msanwrite(cas.elem, c.elemtype.size) } recvOK = true qp = chanbuf(c, c.recvx) if cas.elem != nil { typedmemmove(c.elemtype, cas.elem, qp) } typedmemclr(c.elemtype, qp) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.qcount-- selunlock(scases, lockorder) goto retc bufsend: // can send to buffer if raceenabled { racereleaseacquire(chanbuf(c, c.sendx)) raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) c.sendx++ if c.sendx == c.dataqsiz { c.sendx = 0 } c.qcount++ selunlock(scases, lockorder) goto retc recv: // can receive from sleeping sender (sg) recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncrecv: cas0=", cas0, " c=", c, "\n") } recvOK = true goto retc rclose: // read at end of closed channel selunlock(scases, lockorder) recvOK = false if cas.elem != nil { typedmemclr(c.elemtype, cas.elem) } if raceenabled { raceacquire(c.raceaddr()) } goto retc send: // can send to a sleeping receiver (sg) if raceenabled { raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { msanread(cas.elem, c.elemtype.size) } send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { print("syncsend: cas0=", cas0, " c=", c, "\n") } goto retc retc: if caseReleaseTime > 0 { blockevent(caseReleaseTime-t0, 1) } return casi, recvOK sclose: // send on closed channel selunlock(scases, lockorder) panic(plainError("send on closed channel")) } func (c *hchan) sortkey() uintptr { return uintptr(unsafe.Pointer(c)) } // A runtimeSelect is a single case passed to rselect. // This must match ../reflect/value.go:/runtimeSelect type runtimeSelect struct { dir selectDir typ unsafe.Pointer // channel type (not used here) ch *hchan // channel val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) } // These values must match ../reflect/value.go:/SelectDir. type selectDir int const ( _ selectDir = iota selectSend // case Chan <- Send selectRecv // case <-Chan: selectDefault // default ) //go:linkname reflect_rselect reflect.rselect func reflect_rselect(cases []runtimeSelect) (int, bool) { if len(cases) == 0 { block() } sel := make([]scase, len(cases)) orig := make([]int, len(cases)) nsends, nrecvs := 0, 0 dflt := -1 for i, rc := range cases { var j int switch rc.dir { case selectDefault: dflt = i continue case selectSend: j = nsends nsends++ case selectRecv: nrecvs++ j = len(cases) - nrecvs } sel[j] = scase{c: rc.ch, elem: rc.val} orig[j] = i } // Only a default case. if nsends+nrecvs == 0 { return dflt, false } // Compact sel and orig if necessary. if nsends+nrecvs < len(cases) { copy(sel[nsends:], sel[len(cases)-nrecvs:]) copy(orig[nsends:], orig[len(cases)-nrecvs:]) } order := make([]uint16, 2*(nsends+nrecvs)) var pc0 *uintptr if raceenabled { pcs := make([]uintptr, nsends+nrecvs) for i := range pcs { selectsetpc(&pcs[i]) } pc0 = &pcs[0] } chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1) // Translate chosen back to caller's ordering. if chosen < 0 { chosen = dflt } else { chosen = orig[chosen] } return chosen, recvOK } func (q *waitq) dequeueSudoG(sgp *sudog) { x := sgp.prev
if y != nil { // middle of queue x.next = y y.prev = x sgp.next = nil sgp.prev = nil return } // end of queue x.next = nil q.last = x sgp.prev = nil return } if y != nil { // start of queue y.prev = nil q.first = y sgp.next = nil return } // x==y==nil. Either sgp is the only element in the queue, // or it has already been removed. Use q.first to disambiguate. if q.first == sgp { q.first = nil q.last = nil } }
y := sgp.next if x != nil {
random_line_split
main.go
package main import ( "bufio" "fmt" "image" "image/color" "image/png" "io/ioutil" "math" "os" "strconv" "strings" "sync" "time" ) type instructionOperation int const ( Add instructionOperation = 1 Multiply instructionOperation = 2 Read instructionOperation = 3 Write instructionOperation = 4 JumpIfTrue instructionOperation = 5 JumpIfFalse instructionOperation = 6 LessThan instructionOperation = 7 Equals instructionOperation = 8 SetRelativeBase instructionOperation = 9 Terminate instructionOperation = 99 ) type direction int const ( up direction = iota right down left ) var ( InstructionLength = map[instructionOperation]int{ Add:4, Multiply:4, Read:2, Write:2, JumpIfTrue:3, JumpIfFalse:3, LessThan:4, Equals:4, SetRelativeBase:2, Terminate:1, } ) func main() { path, err := os.Getwd() if err != nil { fmt.Println(err) } // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part One robot := newPaintingRobotWithProgram(path + "/11/code") robot.run() fmt.Println("robot painted ", len(robot.paintedPoints), " tiles on the ship hull") // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part Two robot.exportToImage(path + "/11/registration.png") } func newPaintingRobotWithProgram(programPath string) *paintingRobot { inChannel := make(chan int64) outChannel := make(chan int64) doneChannel := make(chan interface{}) program := &program{ inChannel: inChannel, outChannel: outChannel, done: doneChannel, } program.loadCodeFromFile(programPath) return &paintingRobot{ brain: program, direction: up, position: &point{ x: 0, y: 0, }, } } type paintingRobot struct { brain *program position *point direction direction paintedPoints []*point } func (r *paintingRobot) run() { wg := sync.WaitGroup{} wg.Add(1) go r.brain.execute() go func() { r.brain.inChannel <- 1 readingColor := true robotLoop: for { var scannedColor int select { case reading := <-r.brain.outChannel: // Program outputs have 2 possible meanings that switch periodically: // * color (0 - black, 1 - white) // * rotation (0 - CCW, 1 - CW) if readingColor { r.paint(int(reading)) } else { r.changeDirection(int(reading)) r.move() scannedColor = r.scanColor() // After orientation change the program expects the code of detected color on that position as input. select { case r.brain.inChannel <- int64(scannedColor): fmt.Println("robot detected color ", scannedColor) case <-r.brain.done: } } readingColor = !readingColor case <-r.brain.done: wg.Done() break robotLoop } } }() wg.Wait() } // Gives the tile a color based on input (0 - black, 1 - white). // In order to keep track of unique painted tiles we keep record in slice and just repaint existing items. func (r *paintingRobot) paint(color int)
// Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation. func (r *paintingRobot) changeDirection(input int) { if input == 1 { if r.direction == up { r.direction = left } else { r.direction -= 1 } } else { if r.direction == left { r.direction = up } else { r.direction += 1 } } } // Moves the robot by 1 distance point in the direction it is currently facing. func (r *paintingRobot) move() { posX, posY := r.position.x, r.position.y switch r.direction { case up: posY -= 1 case right: posX += 1 case down: posY += 1 case left: posX -= 1 } r.position = &point{ x: posX, y: posY, } fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y)) } // Gets the color of underlying tile (based on robot's position). Default color is black (0). func (r *paintingRobot) scanColor() int { for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { return p.color } } return 0 } // Calculates overall size of the grid (painted) and center of both axis. func (r paintingRobot) getGridInfo() (int, int, point) { xMin, xMax, yMin, yMax := 0, 0, 0, 0 for _, p := range r.paintedPoints { if p.x > xMax { xMax = p.x } if p.x < xMin { xMin = p.x } if p.y > yMax { yMax = p.y } if p.y < yMin { yMin = p.y } } return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1, point{ x: int(math.Abs(float64(xMin))), y: int(math.Abs(float64(yMin))), } } func (r paintingRobot) getTileColor(x, y int) color.RGBA { for _, p := range r.paintedPoints { if x == p.x && y == p.y { switch p.color { case 0: return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} case 1: return color.RGBA{R: 255, G: 255, B: 255, A: 0xff} } } } return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} } func (r paintingRobot) exportToImage(output string) { canvasWidth, canvasHeight, center := r.getGridInfo() startPoint := image.Point{X: 0, Y: 0} endPoint := image.Point{X: canvasWidth, Y: canvasHeight} img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint}) for x := 0; x < canvasWidth; x++ { for y := 0; y < canvasHeight; y++ { img.Set(x, y, r.getTileColor(x - center.x, y - center.y)) } } f, err := os.Create(output) if err != nil { fmt.Println(err) } err = png.Encode(f, img) if err != nil { fmt.Println(err) } } type point struct { x int y int color int } type instruction struct { operation instructionOperation length int params []instructionParam } func (i *instruction) initialize(intCode []int64, pIndex int) { instValue := int(intCode[pIndex]) i.operation = instructionOperation(instValue) // Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there evalParamModes := false if instValue >= 100 { i.operation = instructionOperation(instValue % 100) evalParamModes = true } i.length = InstructionLength[i.operation] paramCount := i.length - 1 i.params = make([]instructionParam, paramCount, paramCount) for j := 0; j < paramCount; j++ { i.params[j] = instructionParam{0, intCode[pIndex+j+1]} // Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified // in the instruction code itself (as given number at respective position) if evalParamModes { i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10 } } } func (i *instruction) getValuesCount() int { switch i.operation { case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals: return 2 case Write, SetRelativeBase: return 1 default: return 0 } } func (i *instruction) doesStoreOutputInMemory() bool { return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals } type instructionParam struct { mode int value int64 } type program struct { memory []int64 memorySize int position int relativeBase int completed bool halt bool inChannel chan int64 outChannel chan int64 done chan interface{} dataStack []int64 haltOnOutput bool } func (p *program) loadCodeFromFile(file string) { bytes, err := ioutil.ReadFile(file) if err != nil { fmt.Println(err) } inputs := strings.Split(string(bytes), ",") intInputs, err := convertStringArray(inputs) if err != nil { fmt.Println(err) } p.memorySize = len(intInputs) * 10 p.memory = make([]int64, p.memorySize, p.memorySize) for i := 0; i < len(intInputs); i++ { p.memory[i] = intInputs[i] } } func (p *program) resetState() { p.position = 0 p.completed = false p.halt = false } func (p *program) resetMemory() { p.dataStack = make([]int64, p.memorySize, p.memorySize) } func (p *program) execute() { for !p.completed && !p.halt { var instruction instruction instruction.initialize(p.memory, p.position) p.loadParameterValues(&instruction) switch instruction.operation { case Add: p.doAdd(&instruction) case Multiply: p.doMultiply(&instruction) case Read: p.doReadInput(&instruction) case Write: p.doWriteOutput(&instruction) case JumpIfTrue: p.doJumpIfTrue(&instruction) case JumpIfFalse: p.doJumpIfFalse(&instruction) case LessThan: p.doComparisonLessThan(&instruction) case Equals: p.doComparisonEquals(&instruction) case SetRelativeBase: p.doUpdateRelativeBase(&instruction) case Terminate: fmt.Println("program finished") p.completed = true close(p.done) close(p.outChannel) default: fmt.Println("Encountered invalid OpCode: ", instruction.operation) p.completed = true close(p.done) close(p.outChannel) } } } // Parameters can be handled "by value" or "by reference" and this function supplies the end value in each case func (p *program) loadParameterValues(i *instruction) { for j := 0; j < i.getValuesCount(); j++ { switch i.params[j].mode { case 0: i.params[j].value = p.memory[i.params[j].value] case 2: i.params[j].value = p.memory[p.relativeBase+ int(i.params[j].value)] } } if i.doesStoreOutputInMemory() { if i.params[i.getValuesCount()].mode == 2 { i.params[i.getValuesCount()].value = int64(p.relativeBase) + i.params[i.getValuesCount()].value } } } func (p *program) doAdd(i *instruction) { p.memory[i.params[2].value] = i.params[0].value + i.params[1].value p.position += i.length } func (p *program) doMultiply(i *instruction) { p.memory[i.params[2].value] = i.params[0].value * i.params[1].value p.position += i.length } // Inputs are primarily read from dataStack of the program, if it is empty, input is prompted from Standard Input func (p *program) doReadInput(i *instruction) { var input int64 channelReadOk := false if p.inChannel != nil { select { case <-time.After(10 * time.Second): fmt.Println("waiting for input timed-out, trying to read from dataStack") case input = <-p.inChannel: channelReadOk = true } } if !channelReadOk { if len(p.dataStack) > 0 { input = p.dataStack[len(p.dataStack)-1] p.dataStack = p.dataStack[:len(p.dataStack)-1] } else { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter value: ") value, err := reader.ReadString('\n') if err != nil { fmt.Println(err) } inputInt, err := strconv.Atoi(strings.TrimSuffix(value, "\n")) if err != nil { fmt.Println(err) } input = int64(inputInt) } } p.memory[i.params[0].value] = input p.position += i.length } // program outputs are logged to Standard Output and stored in internal Data Stack func (p *program) doWriteOutput(i *instruction) { if p.outChannel != nil { p.outChannel <- i.params[0].value } else { p.dataStack = append(p.dataStack, i.params[0].value) } p.position += i.length if p.haltOnOutput { p.halt = true } } func (p *program) doJumpIfTrue(i *instruction) { if i.params[0].value != 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doJumpIfFalse(i *instruction) { if i.params[0].value == 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doComparisonLessThan(i *instruction) { if i.params[0].value < i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doComparisonEquals(i *instruction) { if i.params[0].value == i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doUpdateRelativeBase(i *instruction) { p.relativeBase += int(i.params[0].value) p.position += i.length } func convertStringArray(strArr []string) ([]int64, error) { iArr := make([]int64, 0, len(strArr)) for _, str := range strArr { i, err := strconv.Atoi(str) if err != nil { return nil, err } iArr = append(iArr, int64(i)) } return iArr, nil }
{ fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color)) for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { p.color = color fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints)) return } } r.position.color = color r.paintedPoints = append(r.paintedPoints, r.position) fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints)) }
identifier_body
main.go
package main import ( "bufio" "fmt" "image" "image/color" "image/png" "io/ioutil" "math" "os" "strconv" "strings" "sync" "time" ) type instructionOperation int const ( Add instructionOperation = 1 Multiply instructionOperation = 2 Read instructionOperation = 3 Write instructionOperation = 4 JumpIfTrue instructionOperation = 5 JumpIfFalse instructionOperation = 6 LessThan instructionOperation = 7 Equals instructionOperation = 8 SetRelativeBase instructionOperation = 9 Terminate instructionOperation = 99 ) type direction int const ( up direction = iota right down left ) var ( InstructionLength = map[instructionOperation]int{ Add:4, Multiply:4, Read:2, Write:2, JumpIfTrue:3, JumpIfFalse:3, LessThan:4, Equals:4, SetRelativeBase:2, Terminate:1, } ) func main() { path, err := os.Getwd() if err != nil { fmt.Println(err) } // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part One robot := newPaintingRobotWithProgram(path + "/11/code") robot.run() fmt.Println("robot painted ", len(robot.paintedPoints), " tiles on the ship hull") // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part Two robot.exportToImage(path + "/11/registration.png") } func newPaintingRobotWithProgram(programPath string) *paintingRobot { inChannel := make(chan int64) outChannel := make(chan int64) doneChannel := make(chan interface{}) program := &program{ inChannel: inChannel, outChannel: outChannel, done: doneChannel, } program.loadCodeFromFile(programPath) return &paintingRobot{ brain: program, direction: up, position: &point{ x: 0, y: 0, }, } } type paintingRobot struct { brain *program position *point direction direction paintedPoints []*point } func (r *paintingRobot) run() { wg := sync.WaitGroup{} wg.Add(1) go r.brain.execute() go func() { r.brain.inChannel <- 1 readingColor := true robotLoop: for { var scannedColor int select { case reading := <-r.brain.outChannel: // Program outputs have 2 possible meanings that switch periodically: // * color (0 - black, 1 - white) // * rotation (0 - CCW, 1 - CW) if readingColor { r.paint(int(reading)) } else { r.changeDirection(int(reading)) r.move() scannedColor = r.scanColor() // After orientation change the program expects the code of detected color on that position as input. select { case r.brain.inChannel <- int64(scannedColor): fmt.Println("robot detected color ", scannedColor) case <-r.brain.done: } } readingColor = !readingColor case <-r.brain.done: wg.Done() break robotLoop } } }() wg.Wait() } // Gives the tile a color based on input (0 - black, 1 - white). // In order to keep track of unique painted tiles we keep record in slice and just repaint existing items. func (r *paintingRobot) paint(color int) { fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color)) for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { p.color = color fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints)) return } } r.position.color = color r.paintedPoints = append(r.paintedPoints, r.position) fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints)) } // Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation. func (r *paintingRobot) changeDirection(input int) { if input == 1 { if r.direction == up { r.direction = left } else { r.direction -= 1 } } else { if r.direction == left { r.direction = up } else { r.direction += 1 } } } // Moves the robot by 1 distance point in the direction it is currently facing. func (r *paintingRobot) move() { posX, posY := r.position.x, r.position.y switch r.direction { case up: posY -= 1 case right: posX += 1 case down: posY += 1 case left: posX -= 1 } r.position = &point{ x: posX, y: posY, } fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y)) } // Gets the color of underlying tile (based on robot's position). Default color is black (0). func (r *paintingRobot) scanColor() int { for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { return p.color } } return 0 } // Calculates overall size of the grid (painted) and center of both axis. func (r paintingRobot)
() (int, int, point) { xMin, xMax, yMin, yMax := 0, 0, 0, 0 for _, p := range r.paintedPoints { if p.x > xMax { xMax = p.x } if p.x < xMin { xMin = p.x } if p.y > yMax { yMax = p.y } if p.y < yMin { yMin = p.y } } return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1, point{ x: int(math.Abs(float64(xMin))), y: int(math.Abs(float64(yMin))), } } func (r paintingRobot) getTileColor(x, y int) color.RGBA { for _, p := range r.paintedPoints { if x == p.x && y == p.y { switch p.color { case 0: return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} case 1: return color.RGBA{R: 255, G: 255, B: 255, A: 0xff} } } } return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} } func (r paintingRobot) exportToImage(output string) { canvasWidth, canvasHeight, center := r.getGridInfo() startPoint := image.Point{X: 0, Y: 0} endPoint := image.Point{X: canvasWidth, Y: canvasHeight} img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint}) for x := 0; x < canvasWidth; x++ { for y := 0; y < canvasHeight; y++ { img.Set(x, y, r.getTileColor(x - center.x, y - center.y)) } } f, err := os.Create(output) if err != nil { fmt.Println(err) } err = png.Encode(f, img) if err != nil { fmt.Println(err) } } type point struct { x int y int color int } type instruction struct { operation instructionOperation length int params []instructionParam } func (i *instruction) initialize(intCode []int64, pIndex int) { instValue := int(intCode[pIndex]) i.operation = instructionOperation(instValue) // Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there evalParamModes := false if instValue >= 100 { i.operation = instructionOperation(instValue % 100) evalParamModes = true } i.length = InstructionLength[i.operation] paramCount := i.length - 1 i.params = make([]instructionParam, paramCount, paramCount) for j := 0; j < paramCount; j++ { i.params[j] = instructionParam{0, intCode[pIndex+j+1]} // Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified // in the instruction code itself (as given number at respective position) if evalParamModes { i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10 } } } func (i *instruction) getValuesCount() int { switch i.operation { case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals: return 2 case Write, SetRelativeBase: return 1 default: return 0 } } func (i *instruction) doesStoreOutputInMemory() bool { return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals } type instructionParam struct { mode int value int64 } type program struct { memory []int64 memorySize int position int relativeBase int completed bool halt bool inChannel chan int64 outChannel chan int64 done chan interface{} dataStack []int64 haltOnOutput bool } func (p *program) loadCodeFromFile(file string) { bytes, err := ioutil.ReadFile(file) if err != nil { fmt.Println(err) } inputs := strings.Split(string(bytes), ",") intInputs, err := convertStringArray(inputs) if err != nil { fmt.Println(err) } p.memorySize = len(intInputs) * 10 p.memory = make([]int64, p.memorySize, p.memorySize) for i := 0; i < len(intInputs); i++ { p.memory[i] = intInputs[i] } } func (p *program) resetState() { p.position = 0 p.completed = false p.halt = false } func (p *program) resetMemory() { p.dataStack = make([]int64, p.memorySize, p.memorySize) } func (p *program) execute() { for !p.completed && !p.halt { var instruction instruction instruction.initialize(p.memory, p.position) p.loadParameterValues(&instruction) switch instruction.operation { case Add: p.doAdd(&instruction) case Multiply: p.doMultiply(&instruction) case Read: p.doReadInput(&instruction) case Write: p.doWriteOutput(&instruction) case JumpIfTrue: p.doJumpIfTrue(&instruction) case JumpIfFalse: p.doJumpIfFalse(&instruction) case LessThan: p.doComparisonLessThan(&instruction) case Equals: p.doComparisonEquals(&instruction) case SetRelativeBase: p.doUpdateRelativeBase(&instruction) case Terminate: fmt.Println("program finished") p.completed = true close(p.done) close(p.outChannel) default: fmt.Println("Encountered invalid OpCode: ", instruction.operation) p.completed = true close(p.done) close(p.outChannel) } } } // Parameters can be handled "by value" or "by reference" and this function supplies the end value in each case func (p *program) loadParameterValues(i *instruction) { for j := 0; j < i.getValuesCount(); j++ { switch i.params[j].mode { case 0: i.params[j].value = p.memory[i.params[j].value] case 2: i.params[j].value = p.memory[p.relativeBase+ int(i.params[j].value)] } } if i.doesStoreOutputInMemory() { if i.params[i.getValuesCount()].mode == 2 { i.params[i.getValuesCount()].value = int64(p.relativeBase) + i.params[i.getValuesCount()].value } } } func (p *program) doAdd(i *instruction) { p.memory[i.params[2].value] = i.params[0].value + i.params[1].value p.position += i.length } func (p *program) doMultiply(i *instruction) { p.memory[i.params[2].value] = i.params[0].value * i.params[1].value p.position += i.length } // Inputs are primarily read from dataStack of the program, if it is empty, input is prompted from Standard Input func (p *program) doReadInput(i *instruction) { var input int64 channelReadOk := false if p.inChannel != nil { select { case <-time.After(10 * time.Second): fmt.Println("waiting for input timed-out, trying to read from dataStack") case input = <-p.inChannel: channelReadOk = true } } if !channelReadOk { if len(p.dataStack) > 0 { input = p.dataStack[len(p.dataStack)-1] p.dataStack = p.dataStack[:len(p.dataStack)-1] } else { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter value: ") value, err := reader.ReadString('\n') if err != nil { fmt.Println(err) } inputInt, err := strconv.Atoi(strings.TrimSuffix(value, "\n")) if err != nil { fmt.Println(err) } input = int64(inputInt) } } p.memory[i.params[0].value] = input p.position += i.length } // program outputs are logged to Standard Output and stored in internal Data Stack func (p *program) doWriteOutput(i *instruction) { if p.outChannel != nil { p.outChannel <- i.params[0].value } else { p.dataStack = append(p.dataStack, i.params[0].value) } p.position += i.length if p.haltOnOutput { p.halt = true } } func (p *program) doJumpIfTrue(i *instruction) { if i.params[0].value != 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doJumpIfFalse(i *instruction) { if i.params[0].value == 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doComparisonLessThan(i *instruction) { if i.params[0].value < i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doComparisonEquals(i *instruction) { if i.params[0].value == i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doUpdateRelativeBase(i *instruction) { p.relativeBase += int(i.params[0].value) p.position += i.length } func convertStringArray(strArr []string) ([]int64, error) { iArr := make([]int64, 0, len(strArr)) for _, str := range strArr { i, err := strconv.Atoi(str) if err != nil { return nil, err } iArr = append(iArr, int64(i)) } return iArr, nil }
getGridInfo
identifier_name
main.go
package main import ( "bufio" "fmt" "image" "image/color" "image/png" "io/ioutil" "math" "os" "strconv" "strings" "sync" "time" ) type instructionOperation int const ( Add instructionOperation = 1 Multiply instructionOperation = 2 Read instructionOperation = 3 Write instructionOperation = 4 JumpIfTrue instructionOperation = 5 JumpIfFalse instructionOperation = 6 LessThan instructionOperation = 7 Equals instructionOperation = 8 SetRelativeBase instructionOperation = 9 Terminate instructionOperation = 99 ) type direction int const ( up direction = iota right down left ) var ( InstructionLength = map[instructionOperation]int{ Add:4, Multiply:4, Read:2, Write:2, JumpIfTrue:3, JumpIfFalse:3, LessThan:4, Equals:4, SetRelativeBase:2, Terminate:1, } ) func main() { path, err := os.Getwd() if err != nil { fmt.Println(err) } // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part One robot := newPaintingRobotWithProgram(path + "/11/code") robot.run() fmt.Println("robot painted ", len(robot.paintedPoints), " tiles on the ship hull") // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part Two robot.exportToImage(path + "/11/registration.png") } func newPaintingRobotWithProgram(programPath string) *paintingRobot { inChannel := make(chan int64) outChannel := make(chan int64) doneChannel := make(chan interface{}) program := &program{ inChannel: inChannel, outChannel: outChannel, done: doneChannel, } program.loadCodeFromFile(programPath) return &paintingRobot{ brain: program, direction: up, position: &point{ x: 0, y: 0, }, } } type paintingRobot struct { brain *program position *point direction direction paintedPoints []*point } func (r *paintingRobot) run() { wg := sync.WaitGroup{} wg.Add(1) go r.brain.execute() go func() { r.brain.inChannel <- 1 readingColor := true robotLoop: for { var scannedColor int select { case reading := <-r.brain.outChannel: // Program outputs have 2 possible meanings that switch periodically: // * color (0 - black, 1 - white) // * rotation (0 - CCW, 1 - CW) if readingColor { r.paint(int(reading)) } else { r.changeDirection(int(reading)) r.move() scannedColor = r.scanColor() // After orientation change the program expects the code of detected color on that position as input. select { case r.brain.inChannel <- int64(scannedColor): fmt.Println("robot detected color ", scannedColor) case <-r.brain.done: } }
readingColor = !readingColor case <-r.brain.done: wg.Done() break robotLoop } } }() wg.Wait() } // Gives the tile a color based on input (0 - black, 1 - white). // In order to keep track of unique painted tiles we keep record in slice and just repaint existing items. func (r *paintingRobot) paint(color int) { fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color)) for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { p.color = color fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints)) return } } r.position.color = color r.paintedPoints = append(r.paintedPoints, r.position) fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints)) } // Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation. func (r *paintingRobot) changeDirection(input int) { if input == 1 { if r.direction == up { r.direction = left } else { r.direction -= 1 } } else { if r.direction == left { r.direction = up } else { r.direction += 1 } } } // Moves the robot by 1 distance point in the direction it is currently facing. func (r *paintingRobot) move() { posX, posY := r.position.x, r.position.y switch r.direction { case up: posY -= 1 case right: posX += 1 case down: posY += 1 case left: posX -= 1 } r.position = &point{ x: posX, y: posY, } fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y)) } // Gets the color of underlying tile (based on robot's position). Default color is black (0). func (r *paintingRobot) scanColor() int { for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { return p.color } } return 0 } // Calculates overall size of the grid (painted) and center of both axis. func (r paintingRobot) getGridInfo() (int, int, point) { xMin, xMax, yMin, yMax := 0, 0, 0, 0 for _, p := range r.paintedPoints { if p.x > xMax { xMax = p.x } if p.x < xMin { xMin = p.x } if p.y > yMax { yMax = p.y } if p.y < yMin { yMin = p.y } } return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1, point{ x: int(math.Abs(float64(xMin))), y: int(math.Abs(float64(yMin))), } } func (r paintingRobot) getTileColor(x, y int) color.RGBA { for _, p := range r.paintedPoints { if x == p.x && y == p.y { switch p.color { case 0: return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} case 1: return color.RGBA{R: 255, G: 255, B: 255, A: 0xff} } } } return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} } func (r paintingRobot) exportToImage(output string) { canvasWidth, canvasHeight, center := r.getGridInfo() startPoint := image.Point{X: 0, Y: 0} endPoint := image.Point{X: canvasWidth, Y: canvasHeight} img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint}) for x := 0; x < canvasWidth; x++ { for y := 0; y < canvasHeight; y++ { img.Set(x, y, r.getTileColor(x - center.x, y - center.y)) } } f, err := os.Create(output) if err != nil { fmt.Println(err) } err = png.Encode(f, img) if err != nil { fmt.Println(err) } } type point struct { x int y int color int } type instruction struct { operation instructionOperation length int params []instructionParam } func (i *instruction) initialize(intCode []int64, pIndex int) { instValue := int(intCode[pIndex]) i.operation = instructionOperation(instValue) // Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there evalParamModes := false if instValue >= 100 { i.operation = instructionOperation(instValue % 100) evalParamModes = true } i.length = InstructionLength[i.operation] paramCount := i.length - 1 i.params = make([]instructionParam, paramCount, paramCount) for j := 0; j < paramCount; j++ { i.params[j] = instructionParam{0, intCode[pIndex+j+1]} // Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified // in the instruction code itself (as given number at respective position) if evalParamModes { i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10 } } } func (i *instruction) getValuesCount() int { switch i.operation { case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals: return 2 case Write, SetRelativeBase: return 1 default: return 0 } } func (i *instruction) doesStoreOutputInMemory() bool { return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals } type instructionParam struct { mode int value int64 } type program struct { memory []int64 memorySize int position int relativeBase int completed bool halt bool inChannel chan int64 outChannel chan int64 done chan interface{} dataStack []int64 haltOnOutput bool } func (p *program) loadCodeFromFile(file string) { bytes, err := ioutil.ReadFile(file) if err != nil { fmt.Println(err) } inputs := strings.Split(string(bytes), ",") intInputs, err := convertStringArray(inputs) if err != nil { fmt.Println(err) } p.memorySize = len(intInputs) * 10 p.memory = make([]int64, p.memorySize, p.memorySize) for i := 0; i < len(intInputs); i++ { p.memory[i] = intInputs[i] } } func (p *program) resetState() { p.position = 0 p.completed = false p.halt = false } func (p *program) resetMemory() { p.dataStack = make([]int64, p.memorySize, p.memorySize) } func (p *program) execute() { for !p.completed && !p.halt { var instruction instruction instruction.initialize(p.memory, p.position) p.loadParameterValues(&instruction) switch instruction.operation { case Add: p.doAdd(&instruction) case Multiply: p.doMultiply(&instruction) case Read: p.doReadInput(&instruction) case Write: p.doWriteOutput(&instruction) case JumpIfTrue: p.doJumpIfTrue(&instruction) case JumpIfFalse: p.doJumpIfFalse(&instruction) case LessThan: p.doComparisonLessThan(&instruction) case Equals: p.doComparisonEquals(&instruction) case SetRelativeBase: p.doUpdateRelativeBase(&instruction) case Terminate: fmt.Println("program finished") p.completed = true close(p.done) close(p.outChannel) default: fmt.Println("Encountered invalid OpCode: ", instruction.operation) p.completed = true close(p.done) close(p.outChannel) } } } // Parameters can be handled "by value" or "by reference" and this function supplies the end value in each case func (p *program) loadParameterValues(i *instruction) { for j := 0; j < i.getValuesCount(); j++ { switch i.params[j].mode { case 0: i.params[j].value = p.memory[i.params[j].value] case 2: i.params[j].value = p.memory[p.relativeBase+ int(i.params[j].value)] } } if i.doesStoreOutputInMemory() { if i.params[i.getValuesCount()].mode == 2 { i.params[i.getValuesCount()].value = int64(p.relativeBase) + i.params[i.getValuesCount()].value } } } func (p *program) doAdd(i *instruction) { p.memory[i.params[2].value] = i.params[0].value + i.params[1].value p.position += i.length } func (p *program) doMultiply(i *instruction) { p.memory[i.params[2].value] = i.params[0].value * i.params[1].value p.position += i.length } // Inputs are primarily read from dataStack of the program, if it is empty, input is prompted from Standard Input func (p *program) doReadInput(i *instruction) { var input int64 channelReadOk := false if p.inChannel != nil { select { case <-time.After(10 * time.Second): fmt.Println("waiting for input timed-out, trying to read from dataStack") case input = <-p.inChannel: channelReadOk = true } } if !channelReadOk { if len(p.dataStack) > 0 { input = p.dataStack[len(p.dataStack)-1] p.dataStack = p.dataStack[:len(p.dataStack)-1] } else { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter value: ") value, err := reader.ReadString('\n') if err != nil { fmt.Println(err) } inputInt, err := strconv.Atoi(strings.TrimSuffix(value, "\n")) if err != nil { fmt.Println(err) } input = int64(inputInt) } } p.memory[i.params[0].value] = input p.position += i.length } // program outputs are logged to Standard Output and stored in internal Data Stack func (p *program) doWriteOutput(i *instruction) { if p.outChannel != nil { p.outChannel <- i.params[0].value } else { p.dataStack = append(p.dataStack, i.params[0].value) } p.position += i.length if p.haltOnOutput { p.halt = true } } func (p *program) doJumpIfTrue(i *instruction) { if i.params[0].value != 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doJumpIfFalse(i *instruction) { if i.params[0].value == 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doComparisonLessThan(i *instruction) { if i.params[0].value < i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doComparisonEquals(i *instruction) { if i.params[0].value == i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doUpdateRelativeBase(i *instruction) { p.relativeBase += int(i.params[0].value) p.position += i.length } func convertStringArray(strArr []string) ([]int64, error) { iArr := make([]int64, 0, len(strArr)) for _, str := range strArr { i, err := strconv.Atoi(str) if err != nil { return nil, err } iArr = append(iArr, int64(i)) } return iArr, nil }
random_line_split
main.go
package main import ( "bufio" "fmt" "image" "image/color" "image/png" "io/ioutil" "math" "os" "strconv" "strings" "sync" "time" ) type instructionOperation int const ( Add instructionOperation = 1 Multiply instructionOperation = 2 Read instructionOperation = 3 Write instructionOperation = 4 JumpIfTrue instructionOperation = 5 JumpIfFalse instructionOperation = 6 LessThan instructionOperation = 7 Equals instructionOperation = 8 SetRelativeBase instructionOperation = 9 Terminate instructionOperation = 99 ) type direction int const ( up direction = iota right down left ) var ( InstructionLength = map[instructionOperation]int{ Add:4, Multiply:4, Read:2, Write:2, JumpIfTrue:3, JumpIfFalse:3, LessThan:4, Equals:4, SetRelativeBase:2, Terminate:1, } ) func main() { path, err := os.Getwd() if err != nil { fmt.Println(err) } // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part One robot := newPaintingRobotWithProgram(path + "/11/code") robot.run() fmt.Println("robot painted ", len(robot.paintedPoints), " tiles on the ship hull") // ----------------------------------------------------------------------------------------------------------------- // Here we solve problem for Part Two robot.exportToImage(path + "/11/registration.png") } func newPaintingRobotWithProgram(programPath string) *paintingRobot { inChannel := make(chan int64) outChannel := make(chan int64) doneChannel := make(chan interface{}) program := &program{ inChannel: inChannel, outChannel: outChannel, done: doneChannel, } program.loadCodeFromFile(programPath) return &paintingRobot{ brain: program, direction: up, position: &point{ x: 0, y: 0, }, } } type paintingRobot struct { brain *program position *point direction direction paintedPoints []*point } func (r *paintingRobot) run() { wg := sync.WaitGroup{} wg.Add(1) go r.brain.execute() go func() { r.brain.inChannel <- 1 readingColor := true robotLoop: for { var scannedColor int select { case reading := <-r.brain.outChannel: // Program outputs have 2 possible meanings that switch periodically: // * color (0 - black, 1 - white) // * rotation (0 - CCW, 1 - CW) if readingColor { r.paint(int(reading)) } else { r.changeDirection(int(reading)) r.move() scannedColor = r.scanColor() // After orientation change the program expects the code of detected color on that position as input. select { case r.brain.inChannel <- int64(scannedColor): fmt.Println("robot detected color ", scannedColor) case <-r.brain.done: } } readingColor = !readingColor case <-r.brain.done: wg.Done() break robotLoop } } }() wg.Wait() } // Gives the tile a color based on input (0 - black, 1 - white). // In order to keep track of unique painted tiles we keep record in slice and just repaint existing items. func (r *paintingRobot) paint(color int) { fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color)) for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { p.color = color fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints)) return } } r.position.color = color r.paintedPoints = append(r.paintedPoints, r.position) fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints)) } // Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation. func (r *paintingRobot) changeDirection(input int) { if input == 1 { if r.direction == up { r.direction = left } else { r.direction -= 1 } } else { if r.direction == left
else { r.direction += 1 } } } // Moves the robot by 1 distance point in the direction it is currently facing. func (r *paintingRobot) move() { posX, posY := r.position.x, r.position.y switch r.direction { case up: posY -= 1 case right: posX += 1 case down: posY += 1 case left: posX -= 1 } r.position = &point{ x: posX, y: posY, } fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y)) } // Gets the color of underlying tile (based on robot's position). Default color is black (0). func (r *paintingRobot) scanColor() int { for _, p := range r.paintedPoints { if p.x == r.position.x && p.y == r.position.y { return p.color } } return 0 } // Calculates overall size of the grid (painted) and center of both axis. func (r paintingRobot) getGridInfo() (int, int, point) { xMin, xMax, yMin, yMax := 0, 0, 0, 0 for _, p := range r.paintedPoints { if p.x > xMax { xMax = p.x } if p.x < xMin { xMin = p.x } if p.y > yMax { yMax = p.y } if p.y < yMin { yMin = p.y } } return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1, point{ x: int(math.Abs(float64(xMin))), y: int(math.Abs(float64(yMin))), } } func (r paintingRobot) getTileColor(x, y int) color.RGBA { for _, p := range r.paintedPoints { if x == p.x && y == p.y { switch p.color { case 0: return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} case 1: return color.RGBA{R: 255, G: 255, B: 255, A: 0xff} } } } return color.RGBA{R: 0, G: 0, B: 0, A: 0xff} } func (r paintingRobot) exportToImage(output string) { canvasWidth, canvasHeight, center := r.getGridInfo() startPoint := image.Point{X: 0, Y: 0} endPoint := image.Point{X: canvasWidth, Y: canvasHeight} img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint}) for x := 0; x < canvasWidth; x++ { for y := 0; y < canvasHeight; y++ { img.Set(x, y, r.getTileColor(x - center.x, y - center.y)) } } f, err := os.Create(output) if err != nil { fmt.Println(err) } err = png.Encode(f, img) if err != nil { fmt.Println(err) } } type point struct { x int y int color int } type instruction struct { operation instructionOperation length int params []instructionParam } func (i *instruction) initialize(intCode []int64, pIndex int) { instValue := int(intCode[pIndex]) i.operation = instructionOperation(instValue) // Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there evalParamModes := false if instValue >= 100 { i.operation = instructionOperation(instValue % 100) evalParamModes = true } i.length = InstructionLength[i.operation] paramCount := i.length - 1 i.params = make([]instructionParam, paramCount, paramCount) for j := 0; j < paramCount; j++ { i.params[j] = instructionParam{0, intCode[pIndex+j+1]} // Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified // in the instruction code itself (as given number at respective position) if evalParamModes { i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10 } } } func (i *instruction) getValuesCount() int { switch i.operation { case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals: return 2 case Write, SetRelativeBase: return 1 default: return 0 } } func (i *instruction) doesStoreOutputInMemory() bool { return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals } type instructionParam struct { mode int value int64 } type program struct { memory []int64 memorySize int position int relativeBase int completed bool halt bool inChannel chan int64 outChannel chan int64 done chan interface{} dataStack []int64 haltOnOutput bool } func (p *program) loadCodeFromFile(file string) { bytes, err := ioutil.ReadFile(file) if err != nil { fmt.Println(err) } inputs := strings.Split(string(bytes), ",") intInputs, err := convertStringArray(inputs) if err != nil { fmt.Println(err) } p.memorySize = len(intInputs) * 10 p.memory = make([]int64, p.memorySize, p.memorySize) for i := 0; i < len(intInputs); i++ { p.memory[i] = intInputs[i] } } func (p *program) resetState() { p.position = 0 p.completed = false p.halt = false } func (p *program) resetMemory() { p.dataStack = make([]int64, p.memorySize, p.memorySize) } func (p *program) execute() { for !p.completed && !p.halt { var instruction instruction instruction.initialize(p.memory, p.position) p.loadParameterValues(&instruction) switch instruction.operation { case Add: p.doAdd(&instruction) case Multiply: p.doMultiply(&instruction) case Read: p.doReadInput(&instruction) case Write: p.doWriteOutput(&instruction) case JumpIfTrue: p.doJumpIfTrue(&instruction) case JumpIfFalse: p.doJumpIfFalse(&instruction) case LessThan: p.doComparisonLessThan(&instruction) case Equals: p.doComparisonEquals(&instruction) case SetRelativeBase: p.doUpdateRelativeBase(&instruction) case Terminate: fmt.Println("program finished") p.completed = true close(p.done) close(p.outChannel) default: fmt.Println("Encountered invalid OpCode: ", instruction.operation) p.completed = true close(p.done) close(p.outChannel) } } } // Parameters can be handled "by value" or "by reference" and this function supplies the end value in each case func (p *program) loadParameterValues(i *instruction) { for j := 0; j < i.getValuesCount(); j++ { switch i.params[j].mode { case 0: i.params[j].value = p.memory[i.params[j].value] case 2: i.params[j].value = p.memory[p.relativeBase+ int(i.params[j].value)] } } if i.doesStoreOutputInMemory() { if i.params[i.getValuesCount()].mode == 2 { i.params[i.getValuesCount()].value = int64(p.relativeBase) + i.params[i.getValuesCount()].value } } } func (p *program) doAdd(i *instruction) { p.memory[i.params[2].value] = i.params[0].value + i.params[1].value p.position += i.length } func (p *program) doMultiply(i *instruction) { p.memory[i.params[2].value] = i.params[0].value * i.params[1].value p.position += i.length } // Inputs are primarily read from dataStack of the program, if it is empty, input is prompted from Standard Input func (p *program) doReadInput(i *instruction) { var input int64 channelReadOk := false if p.inChannel != nil { select { case <-time.After(10 * time.Second): fmt.Println("waiting for input timed-out, trying to read from dataStack") case input = <-p.inChannel: channelReadOk = true } } if !channelReadOk { if len(p.dataStack) > 0 { input = p.dataStack[len(p.dataStack)-1] p.dataStack = p.dataStack[:len(p.dataStack)-1] } else { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter value: ") value, err := reader.ReadString('\n') if err != nil { fmt.Println(err) } inputInt, err := strconv.Atoi(strings.TrimSuffix(value, "\n")) if err != nil { fmt.Println(err) } input = int64(inputInt) } } p.memory[i.params[0].value] = input p.position += i.length } // program outputs are logged to Standard Output and stored in internal Data Stack func (p *program) doWriteOutput(i *instruction) { if p.outChannel != nil { p.outChannel <- i.params[0].value } else { p.dataStack = append(p.dataStack, i.params[0].value) } p.position += i.length if p.haltOnOutput { p.halt = true } } func (p *program) doJumpIfTrue(i *instruction) { if i.params[0].value != 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doJumpIfFalse(i *instruction) { if i.params[0].value == 0 { p.position = int(i.params[1].value) } else { p.position += i.length } } func (p *program) doComparisonLessThan(i *instruction) { if i.params[0].value < i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doComparisonEquals(i *instruction) { if i.params[0].value == i.params[1].value { p.memory[i.params[2].value] = 1 } else { p.memory[i.params[2].value] = 0 } p.position += i.length } func (p *program) doUpdateRelativeBase(i *instruction) { p.relativeBase += int(i.params[0].value) p.position += i.length } func convertStringArray(strArr []string) ([]int64, error) { iArr := make([]int64, 0, len(strArr)) for _, str := range strArr { i, err := strconv.Atoi(str) if err != nil { return nil, err } iArr = append(iArr, int64(i)) } return iArr, nil }
{ r.direction = up }
conditional_block
crud.ts
import { ServiceResponseV1 } from '../http'; import { Request, Response } from 'express'; import { Model, DestroyOptions, UpdateOptions, FindOptions } from 'sequelize'; import { BadRequestError, ERR_MSG_MISSING_FIELDS, ERR_MSG_INVALID_PARTS } from '../errors'; import { merge, includes, each, clone, map, filter as arr_filter, reduce, find, values, Dictionary } from 'lodash'; import * as q from 'q'; import { v4 } from 'node-uuid'; const ID_MAP: SDict = { id: 'id' }; const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ]; type RequestHandler = (req: Request, res: Response, next?: Function) => void type SDict = Dictionary<string>; type Tag = { tag: string, val: any }; type Query = SDict & any; type QueryOptions = UpdateOptions & DestroyOptions & FindOptions; interface QueryResultsModelMeta { count?: number; includes_me?: boolean; } // XXX remove this and just use `next` function error(res: Response, err: Error) { res.status(500); res.json(<ServiceResponseV1<SDict>>{ meta: { ok: false, message: err.message, }, body: {} }); } function generate_where(schema: SDict, params: SDict): SDict { return reduce(schema, (prop_remap, lookup: string, field: string) => { if (params[lookup]) { prop_remap[field] = params[lookup]; } return prop_remap; }, {}); } function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions { var query = <QueryOptions>clone(extras); query.where = merge(generate_where(prop_remap, params), query.where); query.raw = true; return query; } function stamp_meta<V, H>(label: string, val: V): (holder: H) => H { return holder => { holder['@meta'] = holder['@meta'] || {}; holder['@meta'][label] = val; return holder; }; } function tag(name: string): (string) => Tag { return (val: string): Tag => { return { tag: name, val: val }; }; } function replace_with_uuid(val: string, field: string): Boolean { return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1; } function populate_dates(body: Query): Query { var cbody = clone(body); if (!cbody.deleted_date) { cbody.deleted_date = null; } return cbody; } function populate_uuids(body: Query): Query { var id; return reduce(body, (prop_remap: SDict, val: string, field: string) => { if (replace_with_uuid(val, field)) { id = id || v4(); val = id; } prop_remap[field] = val; return prop_remap; }, {}); } function populate_extra_parameters(req: Request, extra_params: Object) { if (extra_params) { each(extra_params, function (field) { req.body[field] = req.params[field]; }); } } // XXX remove this and just use `next` function error_handler(res: Response, action): any { return action.catch(err => error(res, err)); } function response_handler(res: Response, property?: string): any { var start_time = Date.now(); return results => { var body = property ? results[property] : results, meta = { ok: true, error: false, elapsed_time: Date.now() - start_time }; return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body }); }; } export function upsert(model: any, extra_params: string[] = []): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.upsert(populate_uuids(populate_dates(req.body))) .then(response_handler(res))); }; } /** * XXX instead of a callback this should pass the response down so that * handlers can just be appended. */ export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.create(populate_uuids(populate_dates(req.body))) .then(model => cb ? cb(model) : model) .then(response_handler(res))); }; } export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler { var find: string; return (req, res) => { // GET model/:id // GET model/:parent_id/sub_model // GET model/:parent_id/sub_model/:id if (req.params.id || prop_remap) { find = req.params.id ? 'findOne' : 'findAll'; error_handler(res, model[find](build_query(prop_remap, req.params, { paranoid: !req.params.id, order: ['created_date'] })).then(response_handler(res))); } else { error(res, new Error('search not implemented')); } }; } export function update(model: any): RequestHandler { return (req, res) => error_handler(res, model.update( populate_uuids(populate_dates(req.body)), build_query(ID_MAP, req.params) ).then(response_handler(res))); } /** * NOTE this will always do a soft-delete unless "purge=true" is passed as a * query paramter along with a valid "purge_key" value. this "purge_key" is * retrieved from the CP_PURGE_KEY env var. */ export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler { return (req, res, next) => { var deleted_by = req.user.id, where = { deleted_date: null }, force = req.query.purge === 'true' && req.query.purge_key === process.env.CP_PURGE_KEY && process.env.CP_PURGE_KEY; // mismatching prop_remap to req.* is resulting in `delete from X` // queries for (var prop in prop_remap) { if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) { next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap)))); return; } } error_handler(res, (<any>model).sequelize.transaction(transaction => model.update({ deleted_by }, build_query(prop_remap, req.params, { transaction, where })).then(() => model.destroy(build_query(prop_remap, req.params, { transaction, force }))))).then(response_handler(res)); }; } export function parts(model: any, prop_remap, parts_def?): RequestHandler { if (!parts_def) { parts_def = prop_remap; prop_remap = {id: 'id'}; } return (req, res, next) => { var parts_wanted = arr_filter((req.query.parts || '').split(',')), expand_wanted = arr_filter((req.query.expand || '').split(',')), bad_parts = [], queries = []; // check for invalid parts first each(parts_wanted, (part: string) => { if (!(part in parts_def)) { bad_parts.push(part); } }); if (bad_parts.length) { next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts))); return; } // mian queries.push(model.findOne(build_query(prop_remap, req.params, { paranoid: false })).then(tag('main'))); // parts each(parts_wanted, (part: string) => { var query = null, model = parts_def[part][0], prop_remap = parts_def[part][1], meta = parts_def[part][2] || {}; if (meta.expand && includes(expand_wanted, part)) { query = model.findAll(build_query(prop_remap, req.params)) .then(results => { var model = meta.expand[0], remap = meta.expand[1]; results = Array.isArray(results) ? results : [results]; return q.all(map(results, val => model.findOne(build_query(remap, <SDict>val)) .then(stamp_meta('relationship', val)))) .then(tag(part)); }); } else if (meta.instead) { query = new Promise((resolve, reject) => { var instead: QueryResultsModelMeta = {}, user_id = req.user.id, checks = []; if (meta.instead.count) { checks.push(new Promise((resolve, reject) => { model.findAndCountAll(build_query(prop_remap, req.params)) .then(count => { instead.count = count.count; resolve(); }); })); } if (meta.instead.includes_me) { checks.push(new Promise((resolve, reject) => { if (!user_id) { instead.includes_me = false; resolve(); } else { model.findOne(build_query(prop_remap, req.params, { where: { user_id } })).then(row => { instead.includes_me = !!row; resolve(); }); } })); } return Promise.all(checks).then(() => q.when({}) .then(stamp_meta('instead', instead))
} else { query = model.findAll(build_query(prop_remap, req.params)) .then(tag(part)); } queries.push(query); }); // combine `main` and `parts` into a single response object error_handler(res, q.all(queries) .then(results => { response_handler(res)(reduce(parts_wanted, (body, part: string) => { body[part] = (<Tag>(find(results, {tag: part}) || {})).val; return body; }, (<Tag>find(results, {tag: 'main'})).val)); }) ); }; } export function all(model: any): RequestHandler { return (req, res) => error_handler(res, model.findAll({}) .then(response_handler(res))); }
.then(tag(part)) .then(resolve)); });
random_line_split
crud.ts
import { ServiceResponseV1 } from '../http'; import { Request, Response } from 'express'; import { Model, DestroyOptions, UpdateOptions, FindOptions } from 'sequelize'; import { BadRequestError, ERR_MSG_MISSING_FIELDS, ERR_MSG_INVALID_PARTS } from '../errors'; import { merge, includes, each, clone, map, filter as arr_filter, reduce, find, values, Dictionary } from 'lodash'; import * as q from 'q'; import { v4 } from 'node-uuid'; const ID_MAP: SDict = { id: 'id' }; const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ]; type RequestHandler = (req: Request, res: Response, next?: Function) => void type SDict = Dictionary<string>; type Tag = { tag: string, val: any }; type Query = SDict & any; type QueryOptions = UpdateOptions & DestroyOptions & FindOptions; interface QueryResultsModelMeta { count?: number; includes_me?: boolean; } // XXX remove this and just use `next` function error(res: Response, err: Error) { res.status(500); res.json(<ServiceResponseV1<SDict>>{ meta: { ok: false, message: err.message, }, body: {} }); } function generate_where(schema: SDict, params: SDict): SDict { return reduce(schema, (prop_remap, lookup: string, field: string) => { if (params[lookup]) { prop_remap[field] = params[lookup]; } return prop_remap; }, {}); } function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions { var query = <QueryOptions>clone(extras); query.where = merge(generate_where(prop_remap, params), query.where); query.raw = true; return query; } function stamp_meta<V, H>(label: string, val: V): (holder: H) => H { return holder => { holder['@meta'] = holder['@meta'] || {}; holder['@meta'][label] = val; return holder; }; } function tag(name: string): (string) => Tag { return (val: string): Tag => { return { tag: name, val: val }; }; } function replace_with_uuid(val: string, field: string): Boolean { return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1; } function populate_dates(body: Query): Query { var cbody = clone(body); if (!cbody.deleted_date) { cbody.deleted_date = null; } return cbody; } function populate_uuids(body: Query): Query { var id; return reduce(body, (prop_remap: SDict, val: string, field: string) => { if (replace_with_uuid(val, field)) { id = id || v4(); val = id; } prop_remap[field] = val; return prop_remap; }, {}); } function populate_extra_parameters(req: Request, extra_params: Object) { if (extra_params) { each(extra_params, function (field) { req.body[field] = req.params[field]; }); } } // XXX remove this and just use `next` function error_handler(res: Response, action): any { return action.catch(err => error(res, err)); } function response_handler(res: Response, property?: string): any { var start_time = Date.now(); return results => { var body = property ? results[property] : results, meta = { ok: true, error: false, elapsed_time: Date.now() - start_time }; return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body }); }; } export function upsert(model: any, extra_params: string[] = []): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.upsert(populate_uuids(populate_dates(req.body))) .then(response_handler(res))); }; } /** * XXX instead of a callback this should pass the response down so that * handlers can just be appended. */ export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.create(populate_uuids(populate_dates(req.body))) .then(model => cb ? cb(model) : model) .then(response_handler(res))); }; } export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler { var find: string; return (req, res) => { // GET model/:id // GET model/:parent_id/sub_model // GET model/:parent_id/sub_model/:id if (req.params.id || prop_remap) { find = req.params.id ? 'findOne' : 'findAll'; error_handler(res, model[find](build_query(prop_remap, req.params, { paranoid: !req.params.id, order: ['created_date'] })).then(response_handler(res))); } else { error(res, new Error('search not implemented')); } }; } export function
(model: any): RequestHandler { return (req, res) => error_handler(res, model.update( populate_uuids(populate_dates(req.body)), build_query(ID_MAP, req.params) ).then(response_handler(res))); } /** * NOTE this will always do a soft-delete unless "purge=true" is passed as a * query paramter along with a valid "purge_key" value. this "purge_key" is * retrieved from the CP_PURGE_KEY env var. */ export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler { return (req, res, next) => { var deleted_by = req.user.id, where = { deleted_date: null }, force = req.query.purge === 'true' && req.query.purge_key === process.env.CP_PURGE_KEY && process.env.CP_PURGE_KEY; // mismatching prop_remap to req.* is resulting in `delete from X` // queries for (var prop in prop_remap) { if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) { next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap)))); return; } } error_handler(res, (<any>model).sequelize.transaction(transaction => model.update({ deleted_by }, build_query(prop_remap, req.params, { transaction, where })).then(() => model.destroy(build_query(prop_remap, req.params, { transaction, force }))))).then(response_handler(res)); }; } export function parts(model: any, prop_remap, parts_def?): RequestHandler { if (!parts_def) { parts_def = prop_remap; prop_remap = {id: 'id'}; } return (req, res, next) => { var parts_wanted = arr_filter((req.query.parts || '').split(',')), expand_wanted = arr_filter((req.query.expand || '').split(',')), bad_parts = [], queries = []; // check for invalid parts first each(parts_wanted, (part: string) => { if (!(part in parts_def)) { bad_parts.push(part); } }); if (bad_parts.length) { next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts))); return; } // mian queries.push(model.findOne(build_query(prop_remap, req.params, { paranoid: false })).then(tag('main'))); // parts each(parts_wanted, (part: string) => { var query = null, model = parts_def[part][0], prop_remap = parts_def[part][1], meta = parts_def[part][2] || {}; if (meta.expand && includes(expand_wanted, part)) { query = model.findAll(build_query(prop_remap, req.params)) .then(results => { var model = meta.expand[0], remap = meta.expand[1]; results = Array.isArray(results) ? results : [results]; return q.all(map(results, val => model.findOne(build_query(remap, <SDict>val)) .then(stamp_meta('relationship', val)))) .then(tag(part)); }); } else if (meta.instead) { query = new Promise((resolve, reject) => { var instead: QueryResultsModelMeta = {}, user_id = req.user.id, checks = []; if (meta.instead.count) { checks.push(new Promise((resolve, reject) => { model.findAndCountAll(build_query(prop_remap, req.params)) .then(count => { instead.count = count.count; resolve(); }); })); } if (meta.instead.includes_me) { checks.push(new Promise((resolve, reject) => { if (!user_id) { instead.includes_me = false; resolve(); } else { model.findOne(build_query(prop_remap, req.params, { where: { user_id } })).then(row => { instead.includes_me = !!row; resolve(); }); } })); } return Promise.all(checks).then(() => q.when({}) .then(stamp_meta('instead', instead)) .then(tag(part)) .then(resolve)); }); } else { query = model.findAll(build_query(prop_remap, req.params)) .then(tag(part)); } queries.push(query); }); // combine `main` and `parts` into a single response object error_handler(res, q.all(queries) .then(results => { response_handler(res)(reduce(parts_wanted, (body, part: string) => { body[part] = (<Tag>(find(results, {tag: part}) || {})).val; return body; }, (<Tag>find(results, {tag: 'main'})).val)); }) ); }; } export function all(model: any): RequestHandler { return (req, res) => error_handler(res, model.findAll({}) .then(response_handler(res))); }
update
identifier_name
crud.ts
import { ServiceResponseV1 } from '../http'; import { Request, Response } from 'express'; import { Model, DestroyOptions, UpdateOptions, FindOptions } from 'sequelize'; import { BadRequestError, ERR_MSG_MISSING_FIELDS, ERR_MSG_INVALID_PARTS } from '../errors'; import { merge, includes, each, clone, map, filter as arr_filter, reduce, find, values, Dictionary } from 'lodash'; import * as q from 'q'; import { v4 } from 'node-uuid'; const ID_MAP: SDict = { id: 'id' }; const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ]; type RequestHandler = (req: Request, res: Response, next?: Function) => void type SDict = Dictionary<string>; type Tag = { tag: string, val: any }; type Query = SDict & any; type QueryOptions = UpdateOptions & DestroyOptions & FindOptions; interface QueryResultsModelMeta { count?: number; includes_me?: boolean; } // XXX remove this and just use `next` function error(res: Response, err: Error) { res.status(500); res.json(<ServiceResponseV1<SDict>>{ meta: { ok: false, message: err.message, }, body: {} }); } function generate_where(schema: SDict, params: SDict): SDict { return reduce(schema, (prop_remap, lookup: string, field: string) => { if (params[lookup]) { prop_remap[field] = params[lookup]; } return prop_remap; }, {}); } function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions { var query = <QueryOptions>clone(extras); query.where = merge(generate_where(prop_remap, params), query.where); query.raw = true; return query; } function stamp_meta<V, H>(label: string, val: V): (holder: H) => H { return holder => { holder['@meta'] = holder['@meta'] || {}; holder['@meta'][label] = val; return holder; }; } function tag(name: string): (string) => Tag { return (val: string): Tag => { return { tag: name, val: val }; }; } function replace_with_uuid(val: string, field: string): Boolean { return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1; } function populate_dates(body: Query): Query { var cbody = clone(body); if (!cbody.deleted_date) { cbody.deleted_date = null; } return cbody; } function populate_uuids(body: Query): Query { var id; return reduce(body, (prop_remap: SDict, val: string, field: string) => { if (replace_with_uuid(val, field)) { id = id || v4(); val = id; } prop_remap[field] = val; return prop_remap; }, {}); } function populate_extra_parameters(req: Request, extra_params: Object) { if (extra_params) { each(extra_params, function (field) { req.body[field] = req.params[field]; }); } } // XXX remove this and just use `next` function error_handler(res: Response, action): any { return action.catch(err => error(res, err)); } function response_handler(res: Response, property?: string): any { var start_time = Date.now(); return results => { var body = property ? results[property] : results, meta = { ok: true, error: false, elapsed_time: Date.now() - start_time }; return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body }); }; } export function upsert(model: any, extra_params: string[] = []): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.upsert(populate_uuids(populate_dates(req.body))) .then(response_handler(res))); }; } /** * XXX instead of a callback this should pass the response down so that * handlers can just be appended. */ export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.create(populate_uuids(populate_dates(req.body))) .then(model => cb ? cb(model) : model) .then(response_handler(res))); }; } export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler { var find: string; return (req, res) => { // GET model/:id // GET model/:parent_id/sub_model // GET model/:parent_id/sub_model/:id if (req.params.id || prop_remap) { find = req.params.id ? 'findOne' : 'findAll'; error_handler(res, model[find](build_query(prop_remap, req.params, { paranoid: !req.params.id, order: ['created_date'] })).then(response_handler(res))); } else { error(res, new Error('search not implemented')); } }; } export function update(model: any): RequestHandler { return (req, res) => error_handler(res, model.update( populate_uuids(populate_dates(req.body)), build_query(ID_MAP, req.params) ).then(response_handler(res))); } /** * NOTE this will always do a soft-delete unless "purge=true" is passed as a * query paramter along with a valid "purge_key" value. this "purge_key" is * retrieved from the CP_PURGE_KEY env var. */ export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler { return (req, res, next) => { var deleted_by = req.user.id, where = { deleted_date: null }, force = req.query.purge === 'true' && req.query.purge_key === process.env.CP_PURGE_KEY && process.env.CP_PURGE_KEY; // mismatching prop_remap to req.* is resulting in `delete from X` // queries for (var prop in prop_remap) { if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) { next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap)))); return; } } error_handler(res, (<any>model).sequelize.transaction(transaction => model.update({ deleted_by }, build_query(prop_remap, req.params, { transaction, where })).then(() => model.destroy(build_query(prop_remap, req.params, { transaction, force }))))).then(response_handler(res)); }; } export function parts(model: any, prop_remap, parts_def?): RequestHandler { if (!parts_def) { parts_def = prop_remap; prop_remap = {id: 'id'}; } return (req, res, next) => { var parts_wanted = arr_filter((req.query.parts || '').split(',')), expand_wanted = arr_filter((req.query.expand || '').split(',')), bad_parts = [], queries = []; // check for invalid parts first each(parts_wanted, (part: string) => { if (!(part in parts_def)) { bad_parts.push(part); } }); if (bad_parts.length) { next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts))); return; } // mian queries.push(model.findOne(build_query(prop_remap, req.params, { paranoid: false })).then(tag('main'))); // parts each(parts_wanted, (part: string) => { var query = null, model = parts_def[part][0], prop_remap = parts_def[part][1], meta = parts_def[part][2] || {}; if (meta.expand && includes(expand_wanted, part)) { query = model.findAll(build_query(prop_remap, req.params)) .then(results => { var model = meta.expand[0], remap = meta.expand[1]; results = Array.isArray(results) ? results : [results]; return q.all(map(results, val => model.findOne(build_query(remap, <SDict>val)) .then(stamp_meta('relationship', val)))) .then(tag(part)); }); } else if (meta.instead) { query = new Promise((resolve, reject) => { var instead: QueryResultsModelMeta = {}, user_id = req.user.id, checks = []; if (meta.instead.count) { checks.push(new Promise((resolve, reject) => { model.findAndCountAll(build_query(prop_remap, req.params)) .then(count => { instead.count = count.count; resolve(); }); })); } if (meta.instead.includes_me) { checks.push(new Promise((resolve, reject) => { if (!user_id) { instead.includes_me = false; resolve(); } else
})); } return Promise.all(checks).then(() => q.when({}) .then(stamp_meta('instead', instead)) .then(tag(part)) .then(resolve)); }); } else { query = model.findAll(build_query(prop_remap, req.params)) .then(tag(part)); } queries.push(query); }); // combine `main` and `parts` into a single response object error_handler(res, q.all(queries) .then(results => { response_handler(res)(reduce(parts_wanted, (body, part: string) => { body[part] = (<Tag>(find(results, {tag: part}) || {})).val; return body; }, (<Tag>find(results, {tag: 'main'})).val)); }) ); }; } export function all(model: any): RequestHandler { return (req, res) => error_handler(res, model.findAll({}) .then(response_handler(res))); }
{ model.findOne(build_query(prop_remap, req.params, { where: { user_id } })).then(row => { instead.includes_me = !!row; resolve(); }); }
conditional_block
crud.ts
import { ServiceResponseV1 } from '../http'; import { Request, Response } from 'express'; import { Model, DestroyOptions, UpdateOptions, FindOptions } from 'sequelize'; import { BadRequestError, ERR_MSG_MISSING_FIELDS, ERR_MSG_INVALID_PARTS } from '../errors'; import { merge, includes, each, clone, map, filter as arr_filter, reduce, find, values, Dictionary } from 'lodash'; import * as q from 'q'; import { v4 } from 'node-uuid'; const ID_MAP: SDict = { id: 'id' }; const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ]; type RequestHandler = (req: Request, res: Response, next?: Function) => void type SDict = Dictionary<string>; type Tag = { tag: string, val: any }; type Query = SDict & any; type QueryOptions = UpdateOptions & DestroyOptions & FindOptions; interface QueryResultsModelMeta { count?: number; includes_me?: boolean; } // XXX remove this and just use `next` function error(res: Response, err: Error) { res.status(500); res.json(<ServiceResponseV1<SDict>>{ meta: { ok: false, message: err.message, }, body: {} }); } function generate_where(schema: SDict, params: SDict): SDict
function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions { var query = <QueryOptions>clone(extras); query.where = merge(generate_where(prop_remap, params), query.where); query.raw = true; return query; } function stamp_meta<V, H>(label: string, val: V): (holder: H) => H { return holder => { holder['@meta'] = holder['@meta'] || {}; holder['@meta'][label] = val; return holder; }; } function tag(name: string): (string) => Tag { return (val: string): Tag => { return { tag: name, val: val }; }; } function replace_with_uuid(val: string, field: string): Boolean { return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1; } function populate_dates(body: Query): Query { var cbody = clone(body); if (!cbody.deleted_date) { cbody.deleted_date = null; } return cbody; } function populate_uuids(body: Query): Query { var id; return reduce(body, (prop_remap: SDict, val: string, field: string) => { if (replace_with_uuid(val, field)) { id = id || v4(); val = id; } prop_remap[field] = val; return prop_remap; }, {}); } function populate_extra_parameters(req: Request, extra_params: Object) { if (extra_params) { each(extra_params, function (field) { req.body[field] = req.params[field]; }); } } // XXX remove this and just use `next` function error_handler(res: Response, action): any { return action.catch(err => error(res, err)); } function response_handler(res: Response, property?: string): any { var start_time = Date.now(); return results => { var body = property ? results[property] : results, meta = { ok: true, error: false, elapsed_time: Date.now() - start_time }; return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body }); }; } export function upsert(model: any, extra_params: string[] = []): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.upsert(populate_uuids(populate_dates(req.body))) .then(response_handler(res))); }; } /** * XXX instead of a callback this should pass the response down so that * handlers can just be appended. */ export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler { return (req, res) => { populate_extra_parameters(req, extra_params); error_handler(res, model.create(populate_uuids(populate_dates(req.body))) .then(model => cb ? cb(model) : model) .then(response_handler(res))); }; } export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler { var find: string; return (req, res) => { // GET model/:id // GET model/:parent_id/sub_model // GET model/:parent_id/sub_model/:id if (req.params.id || prop_remap) { find = req.params.id ? 'findOne' : 'findAll'; error_handler(res, model[find](build_query(prop_remap, req.params, { paranoid: !req.params.id, order: ['created_date'] })).then(response_handler(res))); } else { error(res, new Error('search not implemented')); } }; } export function update(model: any): RequestHandler { return (req, res) => error_handler(res, model.update( populate_uuids(populate_dates(req.body)), build_query(ID_MAP, req.params) ).then(response_handler(res))); } /** * NOTE this will always do a soft-delete unless "purge=true" is passed as a * query paramter along with a valid "purge_key" value. this "purge_key" is * retrieved from the CP_PURGE_KEY env var. */ export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler { return (req, res, next) => { var deleted_by = req.user.id, where = { deleted_date: null }, force = req.query.purge === 'true' && req.query.purge_key === process.env.CP_PURGE_KEY && process.env.CP_PURGE_KEY; // mismatching prop_remap to req.* is resulting in `delete from X` // queries for (var prop in prop_remap) { if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) { next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap)))); return; } } error_handler(res, (<any>model).sequelize.transaction(transaction => model.update({ deleted_by }, build_query(prop_remap, req.params, { transaction, where })).then(() => model.destroy(build_query(prop_remap, req.params, { transaction, force }))))).then(response_handler(res)); }; } export function parts(model: any, prop_remap, parts_def?): RequestHandler { if (!parts_def) { parts_def = prop_remap; prop_remap = {id: 'id'}; } return (req, res, next) => { var parts_wanted = arr_filter((req.query.parts || '').split(',')), expand_wanted = arr_filter((req.query.expand || '').split(',')), bad_parts = [], queries = []; // check for invalid parts first each(parts_wanted, (part: string) => { if (!(part in parts_def)) { bad_parts.push(part); } }); if (bad_parts.length) { next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts))); return; } // mian queries.push(model.findOne(build_query(prop_remap, req.params, { paranoid: false })).then(tag('main'))); // parts each(parts_wanted, (part: string) => { var query = null, model = parts_def[part][0], prop_remap = parts_def[part][1], meta = parts_def[part][2] || {}; if (meta.expand && includes(expand_wanted, part)) { query = model.findAll(build_query(prop_remap, req.params)) .then(results => { var model = meta.expand[0], remap = meta.expand[1]; results = Array.isArray(results) ? results : [results]; return q.all(map(results, val => model.findOne(build_query(remap, <SDict>val)) .then(stamp_meta('relationship', val)))) .then(tag(part)); }); } else if (meta.instead) { query = new Promise((resolve, reject) => { var instead: QueryResultsModelMeta = {}, user_id = req.user.id, checks = []; if (meta.instead.count) { checks.push(new Promise((resolve, reject) => { model.findAndCountAll(build_query(prop_remap, req.params)) .then(count => { instead.count = count.count; resolve(); }); })); } if (meta.instead.includes_me) { checks.push(new Promise((resolve, reject) => { if (!user_id) { instead.includes_me = false; resolve(); } else { model.findOne(build_query(prop_remap, req.params, { where: { user_id } })).then(row => { instead.includes_me = !!row; resolve(); }); } })); } return Promise.all(checks).then(() => q.when({}) .then(stamp_meta('instead', instead)) .then(tag(part)) .then(resolve)); }); } else { query = model.findAll(build_query(prop_remap, req.params)) .then(tag(part)); } queries.push(query); }); // combine `main` and `parts` into a single response object error_handler(res, q.all(queries) .then(results => { response_handler(res)(reduce(parts_wanted, (body, part: string) => { body[part] = (<Tag>(find(results, {tag: part}) || {})).val; return body; }, (<Tag>find(results, {tag: 'main'})).val)); }) ); }; } export function all(model: any): RequestHandler { return (req, res) => error_handler(res, model.findAll({}) .then(response_handler(res))); }
{ return reduce(schema, (prop_remap, lookup: string, field: string) => { if (params[lookup]) { prop_remap[field] = params[lookup]; } return prop_remap; }, {}); }
identifier_body
cpu_time.rs
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. // Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs // TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed. use std::{ io, mem, time::{Duration, Instant}, }; use derive_more::{Add, Sub}; #[derive(Add, Sub)] pub struct LinuxStyleCpuTime { pub user: u64, pub nice: u64, pub system: u64, pub idle: u64, pub iowait: u64, pub irq: u64, pub softirq: u64, pub steal: u64, pub guest: u64, pub guest_nice: u64, } impl LinuxStyleCpuTime { pub fn total(&self) -> u64 { // Note: guest(_nice) is not counted, since it is already in user. // See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time self.user + self.system + self.idle + self.nice + self.iowait + self.irq + self.softirq + self.steal } pub fn current() -> io::Result<LinuxStyleCpuTime> { imp::current() } } pub use std::io::Result; pub use imp::cpu_time; /// A struct to monitor process cpu usage #[derive(Clone, Copy)] pub struct ProcessStat { current_time: Instant, cpu_time: Duration, } impl ProcessStat { pub fn cur_proc_stat() -> io::Result<Self> { Ok(ProcessStat { current_time: Instant::now(), cpu_time: imp::cpu_time()?, }) } /// return the cpu usage from last invoke, /// or when this struct created if it is the first invoke. pub fn cpu_usage(&mut self) -> io::Result<f64> { let new_time = imp::cpu_time()?; let old_time = mem::replace(&mut self.cpu_time, new_time); let old_now = mem::replace(&mut self.current_time, Instant::now()); let real_time = self.current_time.duration_since(old_now).as_secs_f64(); if real_time > 0.0 { let cpu_time = new_time .checked_sub(old_time) .map(|dur| dur.as_secs_f64()) .unwrap_or(0.0); Ok(cpu_time / real_time) } else { Ok(0.0) } } } #[cfg(any(target_os = "linux", target_os = "freebsd"))] mod imp { use std::{fs::File, io, io::Read, time::Duration}; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { let mut state = String::new(); File::open("/proc/stat")?.read_to_string(&mut state)?; (|| { let mut parts = state.lines().next()?.split_whitespace(); if parts.next()? != "cpu" { return None; } Some(super::LinuxStyleCpuTime { user: parts.next()?.parse::<u64>().ok()?, nice: parts.next()?.parse::<u64>().ok()?, system: parts.next()?.parse::<u64>().ok()?, idle: parts.next()?.parse::<u64>().ok()?, iowait: parts.next()?.parse::<u64>().ok()?, irq: parts.next()?.parse::<u64>().ok()?, softirq: parts.next()?.parse::<u64>().ok()?, steal: parts.next()?.parse::<u64>().ok()?, guest: parts.next()?.parse::<u64>().ok()?, guest_nice: parts.next()?.parse::<u64>().ok()?, }) })() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed")) } pub fn cpu_time() -> io::Result<Duration> { let mut time = libc::timespec { tv_sec: 0, tv_nsec: 0, }; if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 { Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "macos")] mod imp { use std::{io, ptr}; use libc::*; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { // There's scant little documentation on `host_processor_info` // throughout the internet, so this is just modeled after what everyone // else is doing. For now this is modeled largely after libuv. unsafe { let mut num_cpus_u = 0; let mut cpu_info = ptr::null_mut(); let mut msg_type = 0; let ret = host_processor_info( mach_host_self(), PROCESSOR_CPU_LOAD_INFO as processor_flavor_t, &mut num_cpus_u, &mut cpu_info, &mut msg_type, ); if ret != KERN_SUCCESS { return Err(io::Error::from_raw_os_error(ret)); } let mut ret = super::LinuxStyleCpuTime { user: 0, system: 0, idle: 0, iowait: 0, irq: 0, softirq: 0, steal: 0, guest: 0, nice: 0, guest_nice: 0, }; let mut current = cpu_info as *const processor_cpu_load_info_data_t; for _ in 0..num_cpus_u { ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64; ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64; ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64; ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64; current = current.offset(1); } vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize); Ok(ret) } } pub fn cpu_time() -> io::Result<std::time::Duration> { let mut time = unsafe { std::mem::zeroed() }; if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 { let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64; let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000; Ok(std::time::Duration::new(sec, nsec)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "windows")] mod imp { use std::io; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { Err(io::Error::new( io::ErrorKind::Other, "unsupported platform to learn CPU state", )) } use std::{io, mem, time::Duration}; use scopeguard::defer; use winapi::{ shared::{ minwindef::FILETIME, ntdef::{FALSE, NULL}, }, um::{ handleapi::CloseHandle, processthreadsapi::{ GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes, GetThreadTimes, OpenThread, }, sysinfoapi::{GetSystemInfo, SYSTEM_INFO}, winnt::THREAD_QUERY_INFORMATION, }, }; /// convert to u64, unit 100 ns fn filetime_to_ns100(ft: FILETIME) -> u64 { ((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64 } fn get_sys_times() -> io::Result<(u64, u64, u64)> { let mut idle = FILETIME::default(); let mut kernel = FILETIME::default(); let mut user = FILETIME::default(); let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) }; if ret == 0 { return Err(io::Error::last_os_error()); } let idle = filetime_to_ns100(idle); let kernel = filetime_to_ns100(kernel); let user = filetime_to_ns100(user); Ok((idle, kernel, user)) } fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> { let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) }; if handler == NULL { return Err(io::Error::last_os_error()); } defer! {{ unsafe { CloseHandle(handler) }; }} let mut create_time = FILETIME::default(); let mut exit_time = FILETIME::default(); let mut kernel_time = FILETIME::default(); let mut user_time = FILETIME::default(); let ret = unsafe { GetThreadTimes( handler, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ) }; if ret == 0 { return Err(io::Error::last_os_error()); } let kernel_time = filetime_to_ns100(kernel_time); let user_time = filetime_to_ns100(user_time); Ok((kernel_time, user_time)) } #[inline] pub fn cpu_time() -> io::Result<Duration>
} #[cfg(test)] mod tests { use super::*; // this test should be executed alone. #[test] fn test_process_usage() { let mut stat = ProcessStat::cur_proc_stat().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage < 0.01); let num = 1; for _ in 0..num * 10 { std::thread::spawn(move || { loop { let _ = (0..10_000_000).into_iter().sum::<u128>(); } }); } std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage > 0.9_f64) } }
{ let (kernel_time, user_time) = unsafe { let process = GetCurrentProcess(); let mut create_time = mem::zeroed(); let mut exit_time = mem::zeroed(); let mut kernel_time = mem::zeroed(); let mut user_time = mem::zeroed(); let ret = GetProcessTimes( process, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ); if ret != 0 { (kernel_time, user_time) } else { return Err(io::Error::last_os_error()); } }; let kt = filetime_to_ns100(kernel_time); let ut = filetime_to_ns100(user_time); // convert ns // // Note: make it ns unit may overflow in some cases. // For example, a machine with 128 cores runs for one year. let cpu = (kt + ut) * 100; // make it un-normalized let cpu = cpu * processor_numbers()? as u64; Ok(Duration::from_nanos(cpu)) }
identifier_body
cpu_time.rs
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. // Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs // TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed. use std::{ io, mem, time::{Duration, Instant}, }; use derive_more::{Add, Sub}; #[derive(Add, Sub)] pub struct LinuxStyleCpuTime { pub user: u64, pub nice: u64, pub system: u64, pub idle: u64, pub iowait: u64, pub irq: u64, pub softirq: u64, pub steal: u64, pub guest: u64, pub guest_nice: u64, } impl LinuxStyleCpuTime { pub fn total(&self) -> u64 { // Note: guest(_nice) is not counted, since it is already in user. // See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time self.user + self.system + self.idle + self.nice + self.iowait + self.irq + self.softirq + self.steal } pub fn current() -> io::Result<LinuxStyleCpuTime> { imp::current() } } pub use std::io::Result; pub use imp::cpu_time; /// A struct to monitor process cpu usage #[derive(Clone, Copy)] pub struct ProcessStat { current_time: Instant, cpu_time: Duration, } impl ProcessStat { pub fn cur_proc_stat() -> io::Result<Self> { Ok(ProcessStat { current_time: Instant::now(), cpu_time: imp::cpu_time()?, }) } /// return the cpu usage from last invoke, /// or when this struct created if it is the first invoke. pub fn cpu_usage(&mut self) -> io::Result<f64> { let new_time = imp::cpu_time()?; let old_time = mem::replace(&mut self.cpu_time, new_time); let old_now = mem::replace(&mut self.current_time, Instant::now()); let real_time = self.current_time.duration_since(old_now).as_secs_f64(); if real_time > 0.0 { let cpu_time = new_time .checked_sub(old_time) .map(|dur| dur.as_secs_f64()) .unwrap_or(0.0); Ok(cpu_time / real_time) } else { Ok(0.0) } } } #[cfg(any(target_os = "linux", target_os = "freebsd"))] mod imp { use std::{fs::File, io, io::Read, time::Duration}; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { let mut state = String::new(); File::open("/proc/stat")?.read_to_string(&mut state)?; (|| { let mut parts = state.lines().next()?.split_whitespace(); if parts.next()? != "cpu" { return None; } Some(super::LinuxStyleCpuTime { user: parts.next()?.parse::<u64>().ok()?, nice: parts.next()?.parse::<u64>().ok()?, system: parts.next()?.parse::<u64>().ok()?, idle: parts.next()?.parse::<u64>().ok()?, iowait: parts.next()?.parse::<u64>().ok()?, irq: parts.next()?.parse::<u64>().ok()?, softirq: parts.next()?.parse::<u64>().ok()?, steal: parts.next()?.parse::<u64>().ok()?, guest: parts.next()?.parse::<u64>().ok()?, guest_nice: parts.next()?.parse::<u64>().ok()?, }) })() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed")) } pub fn cpu_time() -> io::Result<Duration> { let mut time = libc::timespec { tv_sec: 0, tv_nsec: 0, }; if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 { Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "macos")] mod imp { use std::{io, ptr}; use libc::*; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { // There's scant little documentation on `host_processor_info` // throughout the internet, so this is just modeled after what everyone // else is doing. For now this is modeled largely after libuv. unsafe { let mut num_cpus_u = 0; let mut cpu_info = ptr::null_mut(); let mut msg_type = 0; let ret = host_processor_info( mach_host_self(), PROCESSOR_CPU_LOAD_INFO as processor_flavor_t, &mut num_cpus_u, &mut cpu_info, &mut msg_type, ); if ret != KERN_SUCCESS { return Err(io::Error::from_raw_os_error(ret)); } let mut ret = super::LinuxStyleCpuTime { user: 0, system: 0, idle: 0, iowait: 0, irq: 0, softirq: 0, steal: 0, guest: 0, nice: 0, guest_nice: 0, }; let mut current = cpu_info as *const processor_cpu_load_info_data_t; for _ in 0..num_cpus_u { ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64; ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64; ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64; ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64; current = current.offset(1); } vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize); Ok(ret) } } pub fn
() -> io::Result<std::time::Duration> { let mut time = unsafe { std::mem::zeroed() }; if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 { let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64; let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000; Ok(std::time::Duration::new(sec, nsec)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "windows")] mod imp { use std::io; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { Err(io::Error::new( io::ErrorKind::Other, "unsupported platform to learn CPU state", )) } use std::{io, mem, time::Duration}; use scopeguard::defer; use winapi::{ shared::{ minwindef::FILETIME, ntdef::{FALSE, NULL}, }, um::{ handleapi::CloseHandle, processthreadsapi::{ GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes, GetThreadTimes, OpenThread, }, sysinfoapi::{GetSystemInfo, SYSTEM_INFO}, winnt::THREAD_QUERY_INFORMATION, }, }; /// convert to u64, unit 100 ns fn filetime_to_ns100(ft: FILETIME) -> u64 { ((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64 } fn get_sys_times() -> io::Result<(u64, u64, u64)> { let mut idle = FILETIME::default(); let mut kernel = FILETIME::default(); let mut user = FILETIME::default(); let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) }; if ret == 0 { return Err(io::Error::last_os_error()); } let idle = filetime_to_ns100(idle); let kernel = filetime_to_ns100(kernel); let user = filetime_to_ns100(user); Ok((idle, kernel, user)) } fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> { let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) }; if handler == NULL { return Err(io::Error::last_os_error()); } defer! {{ unsafe { CloseHandle(handler) }; }} let mut create_time = FILETIME::default(); let mut exit_time = FILETIME::default(); let mut kernel_time = FILETIME::default(); let mut user_time = FILETIME::default(); let ret = unsafe { GetThreadTimes( handler, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ) }; if ret == 0 { return Err(io::Error::last_os_error()); } let kernel_time = filetime_to_ns100(kernel_time); let user_time = filetime_to_ns100(user_time); Ok((kernel_time, user_time)) } #[inline] pub fn cpu_time() -> io::Result<Duration> { let (kernel_time, user_time) = unsafe { let process = GetCurrentProcess(); let mut create_time = mem::zeroed(); let mut exit_time = mem::zeroed(); let mut kernel_time = mem::zeroed(); let mut user_time = mem::zeroed(); let ret = GetProcessTimes( process, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ); if ret != 0 { (kernel_time, user_time) } else { return Err(io::Error::last_os_error()); } }; let kt = filetime_to_ns100(kernel_time); let ut = filetime_to_ns100(user_time); // convert ns // // Note: make it ns unit may overflow in some cases. // For example, a machine with 128 cores runs for one year. let cpu = (kt + ut) * 100; // make it un-normalized let cpu = cpu * processor_numbers()? as u64; Ok(Duration::from_nanos(cpu)) } } #[cfg(test)] mod tests { use super::*; // this test should be executed alone. #[test] fn test_process_usage() { let mut stat = ProcessStat::cur_proc_stat().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage < 0.01); let num = 1; for _ in 0..num * 10 { std::thread::spawn(move || { loop { let _ = (0..10_000_000).into_iter().sum::<u128>(); } }); } std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage > 0.9_f64) } }
cpu_time
identifier_name
cpu_time.rs
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. // Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs // TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed. use std::{ io, mem, time::{Duration, Instant}, }; use derive_more::{Add, Sub}; #[derive(Add, Sub)] pub struct LinuxStyleCpuTime { pub user: u64, pub nice: u64, pub system: u64, pub idle: u64, pub iowait: u64, pub irq: u64, pub softirq: u64, pub steal: u64, pub guest: u64, pub guest_nice: u64, } impl LinuxStyleCpuTime { pub fn total(&self) -> u64 { // Note: guest(_nice) is not counted, since it is already in user. // See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time self.user + self.system + self.idle + self.nice + self.iowait + self.irq + self.softirq + self.steal } pub fn current() -> io::Result<LinuxStyleCpuTime> { imp::current() } } pub use std::io::Result; pub use imp::cpu_time; /// A struct to monitor process cpu usage #[derive(Clone, Copy)] pub struct ProcessStat { current_time: Instant, cpu_time: Duration, } impl ProcessStat { pub fn cur_proc_stat() -> io::Result<Self> { Ok(ProcessStat { current_time: Instant::now(), cpu_time: imp::cpu_time()?, }) } /// return the cpu usage from last invoke, /// or when this struct created if it is the first invoke. pub fn cpu_usage(&mut self) -> io::Result<f64> { let new_time = imp::cpu_time()?; let old_time = mem::replace(&mut self.cpu_time, new_time); let old_now = mem::replace(&mut self.current_time, Instant::now()); let real_time = self.current_time.duration_since(old_now).as_secs_f64(); if real_time > 0.0 { let cpu_time = new_time .checked_sub(old_time) .map(|dur| dur.as_secs_f64()) .unwrap_or(0.0); Ok(cpu_time / real_time) } else { Ok(0.0) } } } #[cfg(any(target_os = "linux", target_os = "freebsd"))] mod imp { use std::{fs::File, io, io::Read, time::Duration}; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { let mut state = String::new(); File::open("/proc/stat")?.read_to_string(&mut state)?; (|| { let mut parts = state.lines().next()?.split_whitespace(); if parts.next()? != "cpu" { return None; } Some(super::LinuxStyleCpuTime { user: parts.next()?.parse::<u64>().ok()?, nice: parts.next()?.parse::<u64>().ok()?, system: parts.next()?.parse::<u64>().ok()?, idle: parts.next()?.parse::<u64>().ok()?, iowait: parts.next()?.parse::<u64>().ok()?, irq: parts.next()?.parse::<u64>().ok()?, softirq: parts.next()?.parse::<u64>().ok()?, steal: parts.next()?.parse::<u64>().ok()?, guest: parts.next()?.parse::<u64>().ok()?, guest_nice: parts.next()?.parse::<u64>().ok()?, }) })() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed")) } pub fn cpu_time() -> io::Result<Duration> { let mut time = libc::timespec { tv_sec: 0, tv_nsec: 0, }; if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 { Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "macos")] mod imp { use std::{io, ptr}; use libc::*; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { // There's scant little documentation on `host_processor_info` // throughout the internet, so this is just modeled after what everyone // else is doing. For now this is modeled largely after libuv. unsafe { let mut num_cpus_u = 0; let mut cpu_info = ptr::null_mut(); let mut msg_type = 0; let ret = host_processor_info( mach_host_self(), PROCESSOR_CPU_LOAD_INFO as processor_flavor_t, &mut num_cpus_u, &mut cpu_info, &mut msg_type, ); if ret != KERN_SUCCESS { return Err(io::Error::from_raw_os_error(ret)); } let mut ret = super::LinuxStyleCpuTime { user: 0, system: 0, idle: 0, iowait: 0, irq: 0, softirq: 0, steal: 0, guest: 0, nice: 0, guest_nice: 0, }; let mut current = cpu_info as *const processor_cpu_load_info_data_t; for _ in 0..num_cpus_u { ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64; ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64; ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64; ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64; current = current.offset(1); } vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize); Ok(ret) } } pub fn cpu_time() -> io::Result<std::time::Duration> { let mut time = unsafe { std::mem::zeroed() }; if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 { let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64; let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000; Ok(std::time::Duration::new(sec, nsec)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "windows")] mod imp { use std::io; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { Err(io::Error::new( io::ErrorKind::Other, "unsupported platform to learn CPU state", )) } use std::{io, mem, time::Duration}; use scopeguard::defer; use winapi::{ shared::{ minwindef::FILETIME, ntdef::{FALSE, NULL}, }, um::{ handleapi::CloseHandle, processthreadsapi::{ GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes, GetThreadTimes, OpenThread, }, sysinfoapi::{GetSystemInfo, SYSTEM_INFO}, winnt::THREAD_QUERY_INFORMATION, }, }; /// convert to u64, unit 100 ns fn filetime_to_ns100(ft: FILETIME) -> u64 { ((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64 } fn get_sys_times() -> io::Result<(u64, u64, u64)> { let mut idle = FILETIME::default(); let mut kernel = FILETIME::default(); let mut user = FILETIME::default(); let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) }; if ret == 0 { return Err(io::Error::last_os_error()); } let idle = filetime_to_ns100(idle); let kernel = filetime_to_ns100(kernel); let user = filetime_to_ns100(user); Ok((idle, kernel, user)) } fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> { let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) }; if handler == NULL { return Err(io::Error::last_os_error()); } defer! {{ unsafe { CloseHandle(handler) }; }} let mut create_time = FILETIME::default(); let mut exit_time = FILETIME::default(); let mut kernel_time = FILETIME::default(); let mut user_time = FILETIME::default(); let ret = unsafe { GetThreadTimes( handler, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ) }; if ret == 0 { return Err(io::Error::last_os_error()); } let kernel_time = filetime_to_ns100(kernel_time); let user_time = filetime_to_ns100(user_time); Ok((kernel_time, user_time)) } #[inline] pub fn cpu_time() -> io::Result<Duration> { let (kernel_time, user_time) = unsafe { let process = GetCurrentProcess(); let mut create_time = mem::zeroed(); let mut exit_time = mem::zeroed(); let mut kernel_time = mem::zeroed(); let mut user_time = mem::zeroed(); let ret = GetProcessTimes( process, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ); if ret != 0 { (kernel_time, user_time) } else { return Err(io::Error::last_os_error());
} }; let kt = filetime_to_ns100(kernel_time); let ut = filetime_to_ns100(user_time); // convert ns // // Note: make it ns unit may overflow in some cases. // For example, a machine with 128 cores runs for one year. let cpu = (kt + ut) * 100; // make it un-normalized let cpu = cpu * processor_numbers()? as u64; Ok(Duration::from_nanos(cpu)) } } #[cfg(test)] mod tests { use super::*; // this test should be executed alone. #[test] fn test_process_usage() { let mut stat = ProcessStat::cur_proc_stat().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage < 0.01); let num = 1; for _ in 0..num * 10 { std::thread::spawn(move || { loop { let _ = (0..10_000_000).into_iter().sum::<u128>(); } }); } std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage > 0.9_f64) } }
random_line_split
cpu_time.rs
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. // Modified from https://github.com/rust-lang/cargo/blob/426fae51f39ebf6c545a2c12f78bc09fbfdb7aa9/src/cargo/util/cpu.rs // TODO: Maybe use https://github.com/heim-rs/heim is better after https://github.com/heim-rs/heim/issues/233 is fixed. use std::{ io, mem, time::{Duration, Instant}, }; use derive_more::{Add, Sub}; #[derive(Add, Sub)] pub struct LinuxStyleCpuTime { pub user: u64, pub nice: u64, pub system: u64, pub idle: u64, pub iowait: u64, pub irq: u64, pub softirq: u64, pub steal: u64, pub guest: u64, pub guest_nice: u64, } impl LinuxStyleCpuTime { pub fn total(&self) -> u64 { // Note: guest(_nice) is not counted, since it is already in user. // See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time self.user + self.system + self.idle + self.nice + self.iowait + self.irq + self.softirq + self.steal } pub fn current() -> io::Result<LinuxStyleCpuTime> { imp::current() } } pub use std::io::Result; pub use imp::cpu_time; /// A struct to monitor process cpu usage #[derive(Clone, Copy)] pub struct ProcessStat { current_time: Instant, cpu_time: Duration, } impl ProcessStat { pub fn cur_proc_stat() -> io::Result<Self> { Ok(ProcessStat { current_time: Instant::now(), cpu_time: imp::cpu_time()?, }) } /// return the cpu usage from last invoke, /// or when this struct created if it is the first invoke. pub fn cpu_usage(&mut self) -> io::Result<f64> { let new_time = imp::cpu_time()?; let old_time = mem::replace(&mut self.cpu_time, new_time); let old_now = mem::replace(&mut self.current_time, Instant::now()); let real_time = self.current_time.duration_since(old_now).as_secs_f64(); if real_time > 0.0
else { Ok(0.0) } } } #[cfg(any(target_os = "linux", target_os = "freebsd"))] mod imp { use std::{fs::File, io, io::Read, time::Duration}; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { let mut state = String::new(); File::open("/proc/stat")?.read_to_string(&mut state)?; (|| { let mut parts = state.lines().next()?.split_whitespace(); if parts.next()? != "cpu" { return None; } Some(super::LinuxStyleCpuTime { user: parts.next()?.parse::<u64>().ok()?, nice: parts.next()?.parse::<u64>().ok()?, system: parts.next()?.parse::<u64>().ok()?, idle: parts.next()?.parse::<u64>().ok()?, iowait: parts.next()?.parse::<u64>().ok()?, irq: parts.next()?.parse::<u64>().ok()?, softirq: parts.next()?.parse::<u64>().ok()?, steal: parts.next()?.parse::<u64>().ok()?, guest: parts.next()?.parse::<u64>().ok()?, guest_nice: parts.next()?.parse::<u64>().ok()?, }) })() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed")) } pub fn cpu_time() -> io::Result<Duration> { let mut time = libc::timespec { tv_sec: 0, tv_nsec: 0, }; if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 { Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "macos")] mod imp { use std::{io, ptr}; use libc::*; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { // There's scant little documentation on `host_processor_info` // throughout the internet, so this is just modeled after what everyone // else is doing. For now this is modeled largely after libuv. unsafe { let mut num_cpus_u = 0; let mut cpu_info = ptr::null_mut(); let mut msg_type = 0; let ret = host_processor_info( mach_host_self(), PROCESSOR_CPU_LOAD_INFO as processor_flavor_t, &mut num_cpus_u, &mut cpu_info, &mut msg_type, ); if ret != KERN_SUCCESS { return Err(io::Error::from_raw_os_error(ret)); } let mut ret = super::LinuxStyleCpuTime { user: 0, system: 0, idle: 0, iowait: 0, irq: 0, softirq: 0, steal: 0, guest: 0, nice: 0, guest_nice: 0, }; let mut current = cpu_info as *const processor_cpu_load_info_data_t; for _ in 0..num_cpus_u { ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64; ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64; ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64; ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64; current = current.offset(1); } vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize); Ok(ret) } } pub fn cpu_time() -> io::Result<std::time::Duration> { let mut time = unsafe { std::mem::zeroed() }; if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 { let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64; let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000; Ok(std::time::Duration::new(sec, nsec)) } else { Err(io::Error::last_os_error()) } } } #[cfg(target_os = "windows")] mod imp { use std::io; pub fn current() -> io::Result<super::LinuxStyleCpuTime> { Err(io::Error::new( io::ErrorKind::Other, "unsupported platform to learn CPU state", )) } use std::{io, mem, time::Duration}; use scopeguard::defer; use winapi::{ shared::{ minwindef::FILETIME, ntdef::{FALSE, NULL}, }, um::{ handleapi::CloseHandle, processthreadsapi::{ GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes, GetThreadTimes, OpenThread, }, sysinfoapi::{GetSystemInfo, SYSTEM_INFO}, winnt::THREAD_QUERY_INFORMATION, }, }; /// convert to u64, unit 100 ns fn filetime_to_ns100(ft: FILETIME) -> u64 { ((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64 } fn get_sys_times() -> io::Result<(u64, u64, u64)> { let mut idle = FILETIME::default(); let mut kernel = FILETIME::default(); let mut user = FILETIME::default(); let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) }; if ret == 0 { return Err(io::Error::last_os_error()); } let idle = filetime_to_ns100(idle); let kernel = filetime_to_ns100(kernel); let user = filetime_to_ns100(user); Ok((idle, kernel, user)) } fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> { let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) }; if handler == NULL { return Err(io::Error::last_os_error()); } defer! {{ unsafe { CloseHandle(handler) }; }} let mut create_time = FILETIME::default(); let mut exit_time = FILETIME::default(); let mut kernel_time = FILETIME::default(); let mut user_time = FILETIME::default(); let ret = unsafe { GetThreadTimes( handler, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ) }; if ret == 0 { return Err(io::Error::last_os_error()); } let kernel_time = filetime_to_ns100(kernel_time); let user_time = filetime_to_ns100(user_time); Ok((kernel_time, user_time)) } #[inline] pub fn cpu_time() -> io::Result<Duration> { let (kernel_time, user_time) = unsafe { let process = GetCurrentProcess(); let mut create_time = mem::zeroed(); let mut exit_time = mem::zeroed(); let mut kernel_time = mem::zeroed(); let mut user_time = mem::zeroed(); let ret = GetProcessTimes( process, &mut create_time, &mut exit_time, &mut kernel_time, &mut user_time, ); if ret != 0 { (kernel_time, user_time) } else { return Err(io::Error::last_os_error()); } }; let kt = filetime_to_ns100(kernel_time); let ut = filetime_to_ns100(user_time); // convert ns // // Note: make it ns unit may overflow in some cases. // For example, a machine with 128 cores runs for one year. let cpu = (kt + ut) * 100; // make it un-normalized let cpu = cpu * processor_numbers()? as u64; Ok(Duration::from_nanos(cpu)) } } #[cfg(test)] mod tests { use super::*; // this test should be executed alone. #[test] fn test_process_usage() { let mut stat = ProcessStat::cur_proc_stat().unwrap(); std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage < 0.01); let num = 1; for _ in 0..num * 10 { std::thread::spawn(move || { loop { let _ = (0..10_000_000).into_iter().sum::<u128>(); } }); } std::thread::sleep(std::time::Duration::from_secs(1)); let usage = stat.cpu_usage().unwrap(); assert!(usage > 0.9_f64) } }
{ let cpu_time = new_time .checked_sub(old_time) .map(|dur| dur.as_secs_f64()) .unwrap_or(0.0); Ok(cpu_time / real_time) }
conditional_block
esv.py
""" This file provides the following dictionaries based on data from the ESV bible. Chapter lengths obtained via email from Crossway in Jan 2008. Dictionary | Keyed to | Returns --------------------------------------------------------------------------- book_names | book number | tuples of corresponding (three letter abbreviation, full name, standard abbreviation) book_numbers | book name | corresponding book number number_chapters | book number | number of chapters in that book last_verses | (book number, chapter) | last verse of that chapter --------------------------------------------------------------------------- In addition, the get_passage_text function is provided to look up passage text for a given passage. """ from .common import book_names, book_numbers # List recording the last verse in each chapter of each book in the bible. # e.g last verse in Leviticus chapter 5 is last_verse_data[2][4]. last_verse_data = [ [31, 25, 24, 26, 32, 22, 24, 22, 29, 32, 32, 20, 18, 24, 21, 16, 27, 33, 38, 18, 34, 24, 20, 67, 34, 35, 46, 22, 35, 43, 55, 32, 20, 31, 29, 43, 36, 30, 23, 23, 57, 38, 34, 34, 28, 34, 31, 22, 33, 26], [22, 25, 22, 31, 23, 30, 25, 32, 35, 29, 10, 51, 22, 31, 27, 36, 16, 27, 25, 26, 36, 31, 33, 18, 40, 37, 21, 43, 46, 38, 18, 35, 23, 35, 35, 38, 29, 31, 43, 38], [17, 16, 17, 35, 19, 30, 38, 36, 24, 20, 47, 8, 59, 57, 33, 34, 16, 30, 37, 27, 24, 33, 44, 23, 55, 46, 34], [54, 34, 51, 49, 31, 27, 89, 26, 23, 36, 35, 16, 33, 45, 41, 50, 13, 32, 22, 29, 35, 41, 30, 25, 18, 65, 23, 31, 40, 16, 54, 42, 56, 29, 34, 13], [46, 37, 29, 49, 33, 25, 26, 20, 29, 22, 32, 32, 18, 29, 23, 22, 20, 22, 21, 20, 23, 30, 25, 22, 19, 19, 26, 68, 29, 20, 30, 52, 29, 12], [18, 24, 17, 24, 15, 27, 26, 35, 27, 43, 23, 24, 33, 15, 63, 10, 18, 28, 51, 9, 45, 34, 16, 33], [36, 23, 31, 24, 31, 40, 25, 35, 57, 18, 40, 15, 25, 20, 20, 31, 13, 31, 30, 48, 25], [22, 23, 18, 22], [28, 36, 21, 22, 12, 21, 17, 22, 27, 27, 15, 25, 23, 52, 35, 23, 58, 30, 24, 42, 15, 23, 29, 22, 44, 25, 12, 25, 11, 31, 13], [27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 33, 43, 26, 22, 51, 39, 25], [53, 46, 28, 34, 18, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46, 21, 43, 29, 53], [18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 21, 21, 25, 29, 38, 20, 41, 37, 37, 21, 26, 20, 37, 20, 30], [54, 55, 24, 43, 26, 81, 40, 40, 44, 14, 47, 40, 14, 17, 29, 43, 27, 17, 19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30], [17, 18, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 22, 15, 19, 14, 19, 34, 11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23], [11, 70, 13, 24, 17, 22, 28, 36, 15, 44], [11, 20, 32, 23, 19, 19, 73, 18, 38, 39, 36, 47, 31], [22, 23, 15, 17, 14, 14, 10, 17, 32, 3], [22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21, 29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33, 24, 41, 30, 24, 34, 17], [6, 12, 8, 8, 12, 10, 17, 9, 20, 18, 7, 8, 6, 7, 5, 11, 15, 50, 14, 9, 13, 31, 6, 10, 22, 12, 14, 9, 11, 12, 24, 11, 22, 22, 28, 12, 40, 22, 13, 17, 13, 11, 5, 26, 17, 11, 9, 14, 20, 23, 19, 9, 6, 7, 23, 13, 11, 11, 17, 12, 8, 12, 11, 10, 13, 20, 7, 35, 36, 5, 24, 20, 28, 23, 10, 12, 20, 72, 13, 19, 16, 8, 18, 12, 13, 17, 7, 18, 52, 17, 16, 15, 5, 23, 11, 13, 12, 9, 9, 5, 8, 28, 22, 35, 45, 48, 43, 13, 31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176,7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 13, 10, 7, 12, 15, 21, 10, 20, 14, 9, 6], [33, 22, 35, 27, 23, 35, 27, 36, 18, 32, 31, 28, 25, 35, 33, 33, 28, 24, 29, 30, 31, 29, 35, 34, 28, 28, 27, 28, 27, 33, 31], [18, 26, 22, 16, 20, 12, 29, 17, 18, 20, 10, 14], [17, 17, 11, 16, 16, 13, 13, 14], [31, 22, 26, 6, 30, 13, 25, 22, 21, 34, 16, 6, 22, 32, 9, 14, 14, 7, 25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22, 38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17, 13, 12, 21, 14, 21, 22, 11, 12, 19, 12, 25, 24], [19, 37, 25, 31, 31, 30, 34, 22, 26, 25, 23, 17, 27, 22, 21, 21, 27, 23, 15, 18, 14, 30, 40, 10, 38, 24, 22, 17, 32, 24, 40, 44, 26, 22, 19, 32, 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34], [22, 22, 66, 22, 22], [28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32, 14, 49, 32, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38, 28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35], [21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13], [11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9], [20, 32, 21], [15, 16, 15, 13, 27, 14, 17, 14, 15], [21], [17, 10, 10, 11], [16, 13, 12, 13, 15, 16, 20], [15, 13, 19], [17, 20, 19], [18, 15, 20], [15, 23], [21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21], [14, 17, 18, 6], [25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35, 30, 34, 46, 46, 39, 51, 46, 75, 66, 20], [45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20], [80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43, 48, 47, 38, 71, 56, 53], [51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40, 42, 31, 25], [26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28, 41, 38, 40, 30, 35, 27, 27, 32, 44, 31], [32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27], [31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24], [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14], [24, 21, 29, 31, 26, 18], [23, 22, 21, 32, 33, 24], [30, 30, 21, 23], [29, 23, 25, 18], [10, 20, 13, 18, 28], [12, 17, 18], [20, 15, 16, 16, 25, 21], [18, 26, 17, 22], [16, 15, 15], [25], [14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25], [27, 26, 18, 17, 20], [25, 25, 22, 19, 14], [21, 22, 18], [10, 29, 24, 21, 21], [13], [15], [25], [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21] ] missing_verses = { (40, 12): [47], (40, 17): [21], (40, 18): [11], (40, 23): [14], (41, 7): [16], (41, 9): [44, 46], (41, 11): [26], (41, 15): [28], (42, 17): [36], (42, 23): [17], (43, 5): [4], (44, 8): [37], (44, 15): [34], (44, 24): [7], (44, 28): [29], (45, 16): [24], } # Creating number_chapters, last_verses and number_verses_in_book dictionaries # from last_verse_data and missing_verses information number_chapters = {} last_verses = {} number_verses_in_book = {} for b, vv in enumerate(last_verse_data): book = b + 1 number_chapters[book] = len(vv) total_verses = 0 for c, last_verse in enumerate(vv): chapter = c + 1 last_verses[book, chapter] = last_verse total_verses += last_verse - \ len(missing_verses.get((book, chapter), [])) number_verses_in_book[book] = total_verses try: from urllib.parse import urlencode from urllib.request import urlopen, Request except ImportError: # Python 2 from urllib import urlencode from urllib2 import urlopen, Request import json from .text_cache import SimpleCache API_TOTAL_PROPORTION_OF_BOOK = 0.5 API_CONSECUTIVE_VERSES = 500 CACHE_TOTAL_PROPORTION_OF_BOOK = 0.5 CACHE_CONSECUTIVE_VERSES = 500 book_limits = dict([(k, v*CACHE_TOTAL_PROPORTION_OF_BOOK) for (k, v) in number_verses_in_book.items()]) default_cache = SimpleCache(CACHE_CONSECUTIVE_VERSES, book_limits) def get_passage_text(passage, api_key="", html=False, options={}, cache=default_cache):
""" Fetch biblical text (in ESV translation) corresponding to the provided Passage object. Returns tuple of (passage_text, truncated), where 'truncated' is a boolean indicating whether passage was shortened to comply with API conditions. Parameters: 'passage' is any object that returns a string representation of itself with str(passage), the total number of verses it contains with len(passage), and the book number with passage.book_n. 'api_key' is an alphanumeric code provided by the ESV API at https://api.esv.org/account/ 'options' is a dict of custom parameters, as per https://api.esv.org/v3/docs/ 'html' is a boolean indicating whether function should return passage in html format or plain-text format 'cache' is a dictionary-like object or function that stores tuples of (book_n, passage_length, passage_text) keyed to params string. """ # Set default parameters params = { "include-headings": "false", "include-footnotes": "false", "include-audio-link": "false", "include-passage-references": "false", "include-short-copyright": "false" } url = "https://api.esv.org/v3/passage/html/" # If we're just wanting plain-text: if not html: params.update({ "include-verse-numbers": "false", "include-first-verse-numbers": "false", "include-passage-horizontal-lines": "false", "include-heading-horizontal-lines": "false", "line-length": 0, }) url = "https://api.esv.org/v3/passage/text/" # Add in user-defined variables (possibly overwriting defaults) params.update(options) # Truncate passage to API limits, as necessary trun_pass = passage.truncate(API_CONSECUTIVE_VERSES, API_TOTAL_PROPORTION_OF_BOOK) if trun_pass is passage: truncated = False else: truncated = True # Add in passage reference params["q"] = str(trun_pass) # Construct parameters string from sorted variables param_string = urlencode(list(params.items())) # Check cache if cache.get(param_string, None) != None: return (cache[param_string][2], truncated) # Get text from ESV webservice try: q = Request(url+"?"+param_string) q.add_header("Authorization", "Token "+api_key) response = json.loads(urlopen(q).read()) text = "".join(response["passages"]) cache[param_string] = (trun_pass.book_n, len(trun_pass), text) return (text, truncated) except IOError: return ("Error: Could not fetch passage text!", truncated)
identifier_body
esv.py
""" This file provides the following dictionaries based on data from the ESV bible. Chapter lengths obtained via email from Crossway in Jan 2008. Dictionary | Keyed to | Returns --------------------------------------------------------------------------- book_names | book number | tuples of corresponding (three letter abbreviation, full name, standard abbreviation) book_numbers | book name | corresponding book number number_chapters | book number | number of chapters in that book last_verses | (book number, chapter) | last verse of that chapter --------------------------------------------------------------------------- In addition, the get_passage_text function is provided to look up passage text for a given passage. """ from .common import book_names, book_numbers # List recording the last verse in each chapter of each book in the bible. # e.g last verse in Leviticus chapter 5 is last_verse_data[2][4]. last_verse_data = [ [31, 25, 24, 26, 32, 22, 24, 22, 29, 32, 32, 20, 18, 24, 21, 16, 27, 33, 38, 18, 34, 24, 20, 67, 34, 35, 46, 22, 35, 43, 55, 32, 20, 31, 29, 43, 36, 30, 23, 23, 57, 38, 34, 34, 28, 34, 31, 22, 33, 26], [22, 25, 22, 31, 23, 30, 25, 32, 35, 29, 10, 51, 22, 31, 27, 36, 16, 27, 25, 26, 36, 31, 33, 18, 40, 37, 21, 43, 46, 38, 18, 35, 23, 35, 35, 38, 29, 31, 43, 38], [17, 16, 17, 35, 19, 30, 38, 36, 24, 20, 47, 8, 59, 57, 33, 34, 16, 30, 37, 27, 24, 33, 44, 23, 55, 46, 34], [54, 34, 51, 49, 31, 27, 89, 26, 23, 36, 35, 16, 33, 45, 41, 50, 13, 32, 22, 29, 35, 41, 30, 25, 18, 65, 23, 31, 40, 16, 54, 42, 56, 29, 34, 13], [46, 37, 29, 49, 33, 25, 26, 20, 29, 22, 32, 32, 18, 29, 23, 22, 20, 22, 21, 20, 23, 30, 25, 22, 19, 19, 26, 68, 29, 20, 30, 52, 29, 12], [18, 24, 17, 24, 15, 27, 26, 35, 27, 43, 23, 24, 33, 15, 63, 10, 18, 28, 51, 9, 45, 34, 16, 33], [36, 23, 31, 24, 31, 40, 25, 35, 57, 18, 40, 15, 25, 20, 20, 31, 13, 31, 30, 48, 25], [22, 23, 18, 22], [28, 36, 21, 22, 12, 21, 17, 22, 27, 27, 15, 25, 23, 52, 35, 23, 58, 30, 24, 42, 15, 23, 29, 22, 44, 25, 12, 25, 11, 31, 13], [27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 33, 43, 26, 22, 51, 39, 25], [53, 46, 28, 34, 18, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46, 21, 43, 29, 53], [18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 21, 21, 25, 29, 38, 20, 41, 37, 37, 21, 26, 20, 37, 20, 30], [54, 55, 24, 43, 26, 81, 40, 40, 44, 14, 47, 40, 14, 17, 29, 43, 27, 17, 19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30], [17, 18, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 22, 15, 19, 14, 19, 34, 11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23], [11, 70, 13, 24, 17, 22, 28, 36, 15, 44], [11, 20, 32, 23, 19, 19, 73, 18, 38, 39, 36, 47, 31], [22, 23, 15, 17, 14, 14, 10, 17, 32, 3], [22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21, 29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33, 24, 41, 30, 24, 34, 17], [6, 12, 8, 8, 12, 10, 17, 9, 20, 18, 7, 8, 6, 7, 5, 11, 15, 50, 14, 9, 13, 31, 6, 10, 22, 12, 14, 9, 11, 12, 24, 11, 22, 22, 28, 12, 40, 22, 13, 17, 13, 11, 5, 26, 17, 11, 9, 14, 20, 23, 19, 9, 6, 7, 23, 13, 11, 11, 17, 12, 8, 12, 11, 10, 13, 20, 7, 35, 36, 5, 24, 20, 28, 23, 10, 12, 20, 72, 13, 19, 16, 8, 18, 12, 13, 17, 7, 18, 52, 17, 16, 15, 5, 23, 11, 13, 12, 9, 9, 5, 8, 28, 22, 35, 45, 48, 43, 13, 31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176,7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 13, 10, 7, 12, 15, 21, 10, 20, 14, 9, 6], [33, 22, 35, 27, 23, 35, 27, 36, 18, 32, 31, 28, 25, 35, 33, 33, 28, 24, 29, 30, 31, 29, 35, 34, 28, 28, 27, 28, 27, 33, 31], [18, 26, 22, 16, 20, 12, 29, 17, 18, 20, 10, 14], [17, 17, 11, 16, 16, 13, 13, 14], [31, 22, 26, 6, 30, 13, 25, 22, 21, 34, 16, 6, 22, 32, 9, 14, 14, 7, 25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22, 38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17, 13, 12, 21, 14, 21, 22, 11, 12, 19, 12, 25, 24], [19, 37, 25, 31, 31, 30, 34, 22, 26, 25, 23, 17, 27, 22, 21, 21, 27, 23, 15, 18, 14, 30, 40, 10, 38, 24, 22, 17, 32, 24, 40, 44, 26, 22, 19, 32, 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34], [22, 22, 66, 22, 22], [28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32, 14, 49, 32, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38, 28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35], [21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13], [11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9], [20, 32, 21], [15, 16, 15, 13, 27, 14, 17, 14, 15], [21], [17, 10, 10, 11], [16, 13, 12, 13, 15, 16, 20], [15, 13, 19], [17, 20, 19], [18, 15, 20], [15, 23], [21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21], [14, 17, 18, 6], [25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35, 30, 34, 46, 46, 39, 51, 46, 75, 66, 20], [45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20], [80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43, 48, 47, 38, 71, 56, 53], [51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40, 42, 31, 25], [26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28, 41, 38, 40, 30, 35, 27, 27, 32, 44, 31], [32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27], [31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24], [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14], [24, 21, 29, 31, 26, 18], [23, 22, 21, 32, 33, 24], [30, 30, 21, 23], [29, 23, 25, 18], [10, 20, 13, 18, 28], [12, 17, 18], [20, 15, 16, 16, 25, 21], [18, 26, 17, 22], [16, 15, 15], [25], [14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25], [27, 26, 18, 17, 20], [25, 25, 22, 19, 14], [21, 22, 18], [10, 29, 24, 21, 21], [13], [15], [25], [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21] ] missing_verses = { (40, 12): [47], (40, 17): [21], (40, 18): [11], (40, 23): [14], (41, 7): [16], (41, 9): [44, 46], (41, 11): [26], (41, 15): [28], (42, 17): [36], (42, 23): [17], (43, 5): [4], (44, 8): [37], (44, 15): [34], (44, 24): [7], (44, 28): [29], (45, 16): [24], } # Creating number_chapters, last_verses and number_verses_in_book dictionaries # from last_verse_data and missing_verses information number_chapters = {} last_verses = {} number_verses_in_book = {} for b, vv in enumerate(last_verse_data): book = b + 1 number_chapters[book] = len(vv) total_verses = 0 for c, last_verse in enumerate(vv):
number_verses_in_book[book] = total_verses try: from urllib.parse import urlencode from urllib.request import urlopen, Request except ImportError: # Python 2 from urllib import urlencode from urllib2 import urlopen, Request import json from .text_cache import SimpleCache API_TOTAL_PROPORTION_OF_BOOK = 0.5 API_CONSECUTIVE_VERSES = 500 CACHE_TOTAL_PROPORTION_OF_BOOK = 0.5 CACHE_CONSECUTIVE_VERSES = 500 book_limits = dict([(k, v*CACHE_TOTAL_PROPORTION_OF_BOOK) for (k, v) in number_verses_in_book.items()]) default_cache = SimpleCache(CACHE_CONSECUTIVE_VERSES, book_limits) def get_passage_text(passage, api_key="", html=False, options={}, cache=default_cache): """ Fetch biblical text (in ESV translation) corresponding to the provided Passage object. Returns tuple of (passage_text, truncated), where 'truncated' is a boolean indicating whether passage was shortened to comply with API conditions. Parameters: 'passage' is any object that returns a string representation of itself with str(passage), the total number of verses it contains with len(passage), and the book number with passage.book_n. 'api_key' is an alphanumeric code provided by the ESV API at https://api.esv.org/account/ 'options' is a dict of custom parameters, as per https://api.esv.org/v3/docs/ 'html' is a boolean indicating whether function should return passage in html format or plain-text format 'cache' is a dictionary-like object or function that stores tuples of (book_n, passage_length, passage_text) keyed to params string. """ # Set default parameters params = { "include-headings": "false", "include-footnotes": "false", "include-audio-link": "false", "include-passage-references": "false", "include-short-copyright": "false" } url = "https://api.esv.org/v3/passage/html/" # If we're just wanting plain-text: if not html: params.update({ "include-verse-numbers": "false", "include-first-verse-numbers": "false", "include-passage-horizontal-lines": "false", "include-heading-horizontal-lines": "false", "line-length": 0, }) url = "https://api.esv.org/v3/passage/text/" # Add in user-defined variables (possibly overwriting defaults) params.update(options) # Truncate passage to API limits, as necessary trun_pass = passage.truncate(API_CONSECUTIVE_VERSES, API_TOTAL_PROPORTION_OF_BOOK) if trun_pass is passage: truncated = False else: truncated = True # Add in passage reference params["q"] = str(trun_pass) # Construct parameters string from sorted variables param_string = urlencode(list(params.items())) # Check cache if cache.get(param_string, None) != None: return (cache[param_string][2], truncated) # Get text from ESV webservice try: q = Request(url+"?"+param_string) q.add_header("Authorization", "Token "+api_key) response = json.loads(urlopen(q).read()) text = "".join(response["passages"]) cache[param_string] = (trun_pass.book_n, len(trun_pass), text) return (text, truncated) except IOError: return ("Error: Could not fetch passage text!", truncated)
chapter = c + 1 last_verses[book, chapter] = last_verse total_verses += last_verse - \ len(missing_verses.get((book, chapter), []))
conditional_block
esv.py
""" This file provides the following dictionaries based on data from the ESV bible. Chapter lengths obtained via email from Crossway in Jan 2008. Dictionary | Keyed to | Returns --------------------------------------------------------------------------- book_names | book number | tuples of corresponding (three letter abbreviation, full name, standard abbreviation) book_numbers | book name | corresponding book number number_chapters | book number | number of chapters in that book last_verses | (book number, chapter) | last verse of that chapter --------------------------------------------------------------------------- In addition, the get_passage_text function is provided to look up passage text for a given passage. """ from .common import book_names, book_numbers # List recording the last verse in each chapter of each book in the bible. # e.g last verse in Leviticus chapter 5 is last_verse_data[2][4]. last_verse_data = [ [31, 25, 24, 26, 32, 22, 24, 22, 29, 32, 32, 20, 18, 24, 21, 16, 27, 33, 38, 18, 34, 24, 20, 67, 34, 35, 46, 22, 35, 43, 55, 32, 20, 31, 29, 43, 36, 30, 23, 23, 57, 38, 34, 34, 28, 34, 31, 22, 33, 26], [22, 25, 22, 31, 23, 30, 25, 32, 35, 29, 10, 51, 22, 31, 27, 36, 16, 27, 25, 26, 36, 31, 33, 18, 40, 37, 21, 43, 46, 38, 18, 35, 23, 35, 35, 38, 29, 31, 43, 38], [17, 16, 17, 35, 19, 30, 38, 36, 24, 20, 47, 8, 59, 57, 33, 34, 16, 30, 37, 27, 24, 33, 44, 23, 55, 46, 34], [54, 34, 51, 49, 31, 27, 89, 26, 23, 36, 35, 16, 33, 45, 41, 50, 13, 32, 22, 29, 35, 41, 30, 25, 18, 65, 23, 31, 40, 16, 54, 42, 56, 29, 34, 13], [46, 37, 29, 49, 33, 25, 26, 20, 29, 22, 32, 32, 18, 29, 23, 22, 20, 22, 21, 20, 23, 30, 25, 22, 19, 19, 26, 68, 29, 20, 30, 52, 29, 12], [18, 24, 17, 24, 15, 27, 26, 35, 27, 43, 23, 24, 33, 15, 63, 10, 18, 28, 51, 9, 45, 34, 16, 33], [36, 23, 31, 24, 31, 40, 25, 35, 57, 18, 40, 15, 25, 20, 20, 31, 13, 31, 30, 48, 25], [22, 23, 18, 22], [28, 36, 21, 22, 12, 21, 17, 22, 27, 27, 15, 25, 23, 52, 35, 23, 58, 30, 24, 42, 15, 23, 29, 22, 44, 25, 12, 25, 11, 31, 13], [27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 33, 43, 26, 22, 51, 39, 25], [53, 46, 28, 34, 18, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46, 21, 43, 29, 53], [18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 21, 21, 25, 29, 38, 20, 41, 37, 37, 21, 26, 20, 37, 20, 30], [54, 55, 24, 43, 26, 81, 40, 40, 44, 14, 47, 40, 14, 17, 29, 43, 27, 17, 19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30], [17, 18, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 22, 15, 19, 14, 19, 34, 11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23], [11, 70, 13, 24, 17, 22, 28, 36, 15, 44], [11, 20, 32, 23, 19, 19, 73, 18, 38, 39, 36, 47, 31], [22, 23, 15, 17, 14, 14, 10, 17, 32, 3], [22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21, 29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33, 24, 41, 30, 24, 34, 17], [6, 12, 8, 8, 12, 10, 17, 9, 20, 18, 7, 8, 6, 7, 5, 11, 15, 50, 14, 9, 13, 31, 6, 10, 22, 12, 14, 9, 11, 12, 24, 11, 22, 22, 28, 12, 40, 22, 13, 17, 13, 11, 5, 26, 17, 11, 9, 14, 20, 23, 19, 9, 6, 7, 23, 13, 11, 11, 17, 12, 8, 12, 11, 10, 13, 20, 7, 35, 36, 5, 24, 20, 28, 23, 10, 12, 20, 72, 13, 19, 16, 8, 18, 12, 13, 17, 7, 18, 52, 17,
31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176,7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 13, 10, 7, 12, 15, 21, 10, 20, 14, 9, 6], [33, 22, 35, 27, 23, 35, 27, 36, 18, 32, 31, 28, 25, 35, 33, 33, 28, 24, 29, 30, 31, 29, 35, 34, 28, 28, 27, 28, 27, 33, 31], [18, 26, 22, 16, 20, 12, 29, 17, 18, 20, 10, 14], [17, 17, 11, 16, 16, 13, 13, 14], [31, 22, 26, 6, 30, 13, 25, 22, 21, 34, 16, 6, 22, 32, 9, 14, 14, 7, 25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22, 38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17, 13, 12, 21, 14, 21, 22, 11, 12, 19, 12, 25, 24], [19, 37, 25, 31, 31, 30, 34, 22, 26, 25, 23, 17, 27, 22, 21, 21, 27, 23, 15, 18, 14, 30, 40, 10, 38, 24, 22, 17, 32, 24, 40, 44, 26, 22, 19, 32, 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34], [22, 22, 66, 22, 22], [28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32, 14, 49, 32, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38, 28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35], [21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13], [11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9], [20, 32, 21], [15, 16, 15, 13, 27, 14, 17, 14, 15], [21], [17, 10, 10, 11], [16, 13, 12, 13, 15, 16, 20], [15, 13, 19], [17, 20, 19], [18, 15, 20], [15, 23], [21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21], [14, 17, 18, 6], [25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35, 30, 34, 46, 46, 39, 51, 46, 75, 66, 20], [45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20], [80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43, 48, 47, 38, 71, 56, 53], [51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40, 42, 31, 25], [26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28, 41, 38, 40, 30, 35, 27, 27, 32, 44, 31], [32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27], [31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24], [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14], [24, 21, 29, 31, 26, 18], [23, 22, 21, 32, 33, 24], [30, 30, 21, 23], [29, 23, 25, 18], [10, 20, 13, 18, 28], [12, 17, 18], [20, 15, 16, 16, 25, 21], [18, 26, 17, 22], [16, 15, 15], [25], [14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25], [27, 26, 18, 17, 20], [25, 25, 22, 19, 14], [21, 22, 18], [10, 29, 24, 21, 21], [13], [15], [25], [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21] ] missing_verses = { (40, 12): [47], (40, 17): [21], (40, 18): [11], (40, 23): [14], (41, 7): [16], (41, 9): [44, 46], (41, 11): [26], (41, 15): [28], (42, 17): [36], (42, 23): [17], (43, 5): [4], (44, 8): [37], (44, 15): [34], (44, 24): [7], (44, 28): [29], (45, 16): [24], } # Creating number_chapters, last_verses and number_verses_in_book dictionaries # from last_verse_data and missing_verses information number_chapters = {} last_verses = {} number_verses_in_book = {} for b, vv in enumerate(last_verse_data): book = b + 1 number_chapters[book] = len(vv) total_verses = 0 for c, last_verse in enumerate(vv): chapter = c + 1 last_verses[book, chapter] = last_verse total_verses += last_verse - \ len(missing_verses.get((book, chapter), [])) number_verses_in_book[book] = total_verses try: from urllib.parse import urlencode from urllib.request import urlopen, Request except ImportError: # Python 2 from urllib import urlencode from urllib2 import urlopen, Request import json from .text_cache import SimpleCache API_TOTAL_PROPORTION_OF_BOOK = 0.5 API_CONSECUTIVE_VERSES = 500 CACHE_TOTAL_PROPORTION_OF_BOOK = 0.5 CACHE_CONSECUTIVE_VERSES = 500 book_limits = dict([(k, v*CACHE_TOTAL_PROPORTION_OF_BOOK) for (k, v) in number_verses_in_book.items()]) default_cache = SimpleCache(CACHE_CONSECUTIVE_VERSES, book_limits) def get_passage_text(passage, api_key="", html=False, options={}, cache=default_cache): """ Fetch biblical text (in ESV translation) corresponding to the provided Passage object. Returns tuple of (passage_text, truncated), where 'truncated' is a boolean indicating whether passage was shortened to comply with API conditions. Parameters: 'passage' is any object that returns a string representation of itself with str(passage), the total number of verses it contains with len(passage), and the book number with passage.book_n. 'api_key' is an alphanumeric code provided by the ESV API at https://api.esv.org/account/ 'options' is a dict of custom parameters, as per https://api.esv.org/v3/docs/ 'html' is a boolean indicating whether function should return passage in html format or plain-text format 'cache' is a dictionary-like object or function that stores tuples of (book_n, passage_length, passage_text) keyed to params string. """ # Set default parameters params = { "include-headings": "false", "include-footnotes": "false", "include-audio-link": "false", "include-passage-references": "false", "include-short-copyright": "false" } url = "https://api.esv.org/v3/passage/html/" # If we're just wanting plain-text: if not html: params.update({ "include-verse-numbers": "false", "include-first-verse-numbers": "false", "include-passage-horizontal-lines": "false", "include-heading-horizontal-lines": "false", "line-length": 0, }) url = "https://api.esv.org/v3/passage/text/" # Add in user-defined variables (possibly overwriting defaults) params.update(options) # Truncate passage to API limits, as necessary trun_pass = passage.truncate(API_CONSECUTIVE_VERSES, API_TOTAL_PROPORTION_OF_BOOK) if trun_pass is passage: truncated = False else: truncated = True # Add in passage reference params["q"] = str(trun_pass) # Construct parameters string from sorted variables param_string = urlencode(list(params.items())) # Check cache if cache.get(param_string, None) != None: return (cache[param_string][2], truncated) # Get text from ESV webservice try: q = Request(url+"?"+param_string) q.add_header("Authorization", "Token "+api_key) response = json.loads(urlopen(q).read()) text = "".join(response["passages"]) cache[param_string] = (trun_pass.book_n, len(trun_pass), text) return (text, truncated) except IOError: return ("Error: Could not fetch passage text!", truncated)
16, 15, 5, 23, 11, 13, 12, 9, 9, 5, 8, 28, 22, 35, 45, 48, 43, 13,
random_line_split
esv.py
""" This file provides the following dictionaries based on data from the ESV bible. Chapter lengths obtained via email from Crossway in Jan 2008. Dictionary | Keyed to | Returns --------------------------------------------------------------------------- book_names | book number | tuples of corresponding (three letter abbreviation, full name, standard abbreviation) book_numbers | book name | corresponding book number number_chapters | book number | number of chapters in that book last_verses | (book number, chapter) | last verse of that chapter --------------------------------------------------------------------------- In addition, the get_passage_text function is provided to look up passage text for a given passage. """ from .common import book_names, book_numbers # List recording the last verse in each chapter of each book in the bible. # e.g last verse in Leviticus chapter 5 is last_verse_data[2][4]. last_verse_data = [ [31, 25, 24, 26, 32, 22, 24, 22, 29, 32, 32, 20, 18, 24, 21, 16, 27, 33, 38, 18, 34, 24, 20, 67, 34, 35, 46, 22, 35, 43, 55, 32, 20, 31, 29, 43, 36, 30, 23, 23, 57, 38, 34, 34, 28, 34, 31, 22, 33, 26], [22, 25, 22, 31, 23, 30, 25, 32, 35, 29, 10, 51, 22, 31, 27, 36, 16, 27, 25, 26, 36, 31, 33, 18, 40, 37, 21, 43, 46, 38, 18, 35, 23, 35, 35, 38, 29, 31, 43, 38], [17, 16, 17, 35, 19, 30, 38, 36, 24, 20, 47, 8, 59, 57, 33, 34, 16, 30, 37, 27, 24, 33, 44, 23, 55, 46, 34], [54, 34, 51, 49, 31, 27, 89, 26, 23, 36, 35, 16, 33, 45, 41, 50, 13, 32, 22, 29, 35, 41, 30, 25, 18, 65, 23, 31, 40, 16, 54, 42, 56, 29, 34, 13], [46, 37, 29, 49, 33, 25, 26, 20, 29, 22, 32, 32, 18, 29, 23, 22, 20, 22, 21, 20, 23, 30, 25, 22, 19, 19, 26, 68, 29, 20, 30, 52, 29, 12], [18, 24, 17, 24, 15, 27, 26, 35, 27, 43, 23, 24, 33, 15, 63, 10, 18, 28, 51, 9, 45, 34, 16, 33], [36, 23, 31, 24, 31, 40, 25, 35, 57, 18, 40, 15, 25, 20, 20, 31, 13, 31, 30, 48, 25], [22, 23, 18, 22], [28, 36, 21, 22, 12, 21, 17, 22, 27, 27, 15, 25, 23, 52, 35, 23, 58, 30, 24, 42, 15, 23, 29, 22, 44, 25, 12, 25, 11, 31, 13], [27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 33, 43, 26, 22, 51, 39, 25], [53, 46, 28, 34, 18, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46, 21, 43, 29, 53], [18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 21, 21, 25, 29, 38, 20, 41, 37, 37, 21, 26, 20, 37, 20, 30], [54, 55, 24, 43, 26, 81, 40, 40, 44, 14, 47, 40, 14, 17, 29, 43, 27, 17, 19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30], [17, 18, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 22, 15, 19, 14, 19, 34, 11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23], [11, 70, 13, 24, 17, 22, 28, 36, 15, 44], [11, 20, 32, 23, 19, 19, 73, 18, 38, 39, 36, 47, 31], [22, 23, 15, 17, 14, 14, 10, 17, 32, 3], [22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21, 29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33, 24, 41, 30, 24, 34, 17], [6, 12, 8, 8, 12, 10, 17, 9, 20, 18, 7, 8, 6, 7, 5, 11, 15, 50, 14, 9, 13, 31, 6, 10, 22, 12, 14, 9, 11, 12, 24, 11, 22, 22, 28, 12, 40, 22, 13, 17, 13, 11, 5, 26, 17, 11, 9, 14, 20, 23, 19, 9, 6, 7, 23, 13, 11, 11, 17, 12, 8, 12, 11, 10, 13, 20, 7, 35, 36, 5, 24, 20, 28, 23, 10, 12, 20, 72, 13, 19, 16, 8, 18, 12, 13, 17, 7, 18, 52, 17, 16, 15, 5, 23, 11, 13, 12, 9, 9, 5, 8, 28, 22, 35, 45, 48, 43, 13, 31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176,7, 8, 9, 4, 8, 5, 6, 5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 13, 10, 7, 12, 15, 21, 10, 20, 14, 9, 6], [33, 22, 35, 27, 23, 35, 27, 36, 18, 32, 31, 28, 25, 35, 33, 33, 28, 24, 29, 30, 31, 29, 35, 34, 28, 28, 27, 28, 27, 33, 31], [18, 26, 22, 16, 20, 12, 29, 17, 18, 20, 10, 14], [17, 17, 11, 16, 16, 13, 13, 14], [31, 22, 26, 6, 30, 13, 25, 22, 21, 34, 16, 6, 22, 32, 9, 14, 14, 7, 25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22, 38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17, 13, 12, 21, 14, 21, 22, 11, 12, 19, 12, 25, 24], [19, 37, 25, 31, 31, 30, 34, 22, 26, 25, 23, 17, 27, 22, 21, 21, 27, 23, 15, 18, 14, 30, 40, 10, 38, 24, 22, 17, 32, 24, 40, 44, 26, 22, 19, 32, 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34], [22, 22, 66, 22, 22], [28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32, 14, 49, 32, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38, 28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35], [21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13], [11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9], [20, 32, 21], [15, 16, 15, 13, 27, 14, 17, 14, 15], [21], [17, 10, 10, 11], [16, 13, 12, 13, 15, 16, 20], [15, 13, 19], [17, 20, 19], [18, 15, 20], [15, 23], [21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21], [14, 17, 18, 6], [25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35, 30, 34, 46, 46, 39, 51, 46, 75, 66, 20], [45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20], [80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43, 48, 47, 38, 71, 56, 53], [51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40, 42, 31, 25], [26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28, 41, 38, 40, 30, 35, 27, 27, 32, 44, 31], [32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27], [31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24], [24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14], [24, 21, 29, 31, 26, 18], [23, 22, 21, 32, 33, 24], [30, 30, 21, 23], [29, 23, 25, 18], [10, 20, 13, 18, 28], [12, 17, 18], [20, 15, 16, 16, 25, 21], [18, 26, 17, 22], [16, 15, 15], [25], [14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25], [27, 26, 18, 17, 20], [25, 25, 22, 19, 14], [21, 22, 18], [10, 29, 24, 21, 21], [13], [15], [25], [20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24, 21, 15, 27, 21] ] missing_verses = { (40, 12): [47], (40, 17): [21], (40, 18): [11], (40, 23): [14], (41, 7): [16], (41, 9): [44, 46], (41, 11): [26], (41, 15): [28], (42, 17): [36], (42, 23): [17], (43, 5): [4], (44, 8): [37], (44, 15): [34], (44, 24): [7], (44, 28): [29], (45, 16): [24], } # Creating number_chapters, last_verses and number_verses_in_book dictionaries # from last_verse_data and missing_verses information number_chapters = {} last_verses = {} number_verses_in_book = {} for b, vv in enumerate(last_verse_data): book = b + 1 number_chapters[book] = len(vv) total_verses = 0 for c, last_verse in enumerate(vv): chapter = c + 1 last_verses[book, chapter] = last_verse total_verses += last_verse - \ len(missing_verses.get((book, chapter), [])) number_verses_in_book[book] = total_verses try: from urllib.parse import urlencode from urllib.request import urlopen, Request except ImportError: # Python 2 from urllib import urlencode from urllib2 import urlopen, Request import json from .text_cache import SimpleCache API_TOTAL_PROPORTION_OF_BOOK = 0.5 API_CONSECUTIVE_VERSES = 500 CACHE_TOTAL_PROPORTION_OF_BOOK = 0.5 CACHE_CONSECUTIVE_VERSES = 500 book_limits = dict([(k, v*CACHE_TOTAL_PROPORTION_OF_BOOK) for (k, v) in number_verses_in_book.items()]) default_cache = SimpleCache(CACHE_CONSECUTIVE_VERSES, book_limits) def
(passage, api_key="", html=False, options={}, cache=default_cache): """ Fetch biblical text (in ESV translation) corresponding to the provided Passage object. Returns tuple of (passage_text, truncated), where 'truncated' is a boolean indicating whether passage was shortened to comply with API conditions. Parameters: 'passage' is any object that returns a string representation of itself with str(passage), the total number of verses it contains with len(passage), and the book number with passage.book_n. 'api_key' is an alphanumeric code provided by the ESV API at https://api.esv.org/account/ 'options' is a dict of custom parameters, as per https://api.esv.org/v3/docs/ 'html' is a boolean indicating whether function should return passage in html format or plain-text format 'cache' is a dictionary-like object or function that stores tuples of (book_n, passage_length, passage_text) keyed to params string. """ # Set default parameters params = { "include-headings": "false", "include-footnotes": "false", "include-audio-link": "false", "include-passage-references": "false", "include-short-copyright": "false" } url = "https://api.esv.org/v3/passage/html/" # If we're just wanting plain-text: if not html: params.update({ "include-verse-numbers": "false", "include-first-verse-numbers": "false", "include-passage-horizontal-lines": "false", "include-heading-horizontal-lines": "false", "line-length": 0, }) url = "https://api.esv.org/v3/passage/text/" # Add in user-defined variables (possibly overwriting defaults) params.update(options) # Truncate passage to API limits, as necessary trun_pass = passage.truncate(API_CONSECUTIVE_VERSES, API_TOTAL_PROPORTION_OF_BOOK) if trun_pass is passage: truncated = False else: truncated = True # Add in passage reference params["q"] = str(trun_pass) # Construct parameters string from sorted variables param_string = urlencode(list(params.items())) # Check cache if cache.get(param_string, None) != None: return (cache[param_string][2], truncated) # Get text from ESV webservice try: q = Request(url+"?"+param_string) q.add_header("Authorization", "Token "+api_key) response = json.loads(urlopen(q).read()) text = "".join(response["passages"]) cache[param_string] = (trun_pass.book_n, len(trun_pass), text) return (text, truncated) except IOError: return ("Error: Could not fetch passage text!", truncated)
get_passage_text
identifier_name
codegen.go
package asn1go import ( "errors" "fmt" goast "go/ast" goprint "go/printer" gotoken "go/token" "io" "strings" ) // CodeGenerator is an interface for code generation from ASN.1 modules. type CodeGenerator interface { Generate(module ModuleDefinition, writer io.Writer) error } // GenParams is code generator configuration. type GenParams struct { // Package is go package name. // If not specified, ASN.1 module name will be used to derive go module name. Package string // Type is a type of code generation to run. // TODO: deprecate in favor of separate New methods. Type GenType // IntegerRepr controls how INTEGER type is expressed in generated go code. IntegerRepr IntegerRepr } // GenType is code generator type. type GenType int const ( // GEN_DECLARATIONS is code generator that is GEN_DECLARATIONS GenType = iota ) // IntegerRepr is enum controlling how INTEGER is represented. type IntegerRepr string // IntegerRepr modes supported. const ( IntegerReprInt64 IntegerRepr = "int64" IntegerReprBigInt IntegerRepr = "big.Int" ) // NewCodeGenerator creates a new code generator from provided params. func NewCodeGenerator(params GenParams) CodeGenerator { if params.IntegerRepr == "" { params.IntegerRepr = IntegerReprInt64 } switch params.Type { case GEN_DECLARATIONS: return &declCodeGen{params} default: return nil } } type declCodeGen struct { Params GenParams } // moduleContext is context used to track state of the code generation. type moduleContext struct { extensibilityImplied bool // tagDefault is a ModuleDefinition.TagDefault value. tagDefault int // errors collected during conversion. // TODO: switch to explicit error passing. errors []error lookupContext ModuleBody // requiredModules holds go modules required by generated code. requiredModules []string params GenParams } func (ctx *moduleContext) appendError(err error) { ctx.errors = append(ctx.errors, err) } func (ctx *moduleContext) requireModule(module string)
// Generate declarations from module to be used together with encoding/asn1. // // Feature support status: // - [x] ModuleIdentifier // - [x] TagDefault (except AUTOMATIC) // - [ ] ExtensibilityImplied // - [.] ModuleBody -- see moduleContext.generateDeclarations. func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error { if module.TagDefault == TAGS_AUTOMATIC { // See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented. return errors.New("AUTOMATIC tagged modules are not supported") } ctx := moduleContext{ extensibilityImplied: module.ExtensibilityImplied, tagDefault: module.TagDefault, lookupContext: module.ModuleBody, params: gen.Params, } moduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference)) if len(gen.Params.Package) > 0 { moduleName = goast.NewIdent(gen.Params.Package) } ast := &goast.File{ Name: moduleName, Decls: ctx.generateDeclarations(module), } if len(ctx.errors) != 0 { msg := "errors generating Go AST from module: \n" for _, err := range ctx.errors { msg += " " + err.Error() + "\n" } return errors.New(msg) } importDecls := make([]goast.Decl, 0) for _, moduleName := range ctx.requiredModules { modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)} specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}} importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs}) } ast.Decls = append(importDecls, ast.Decls...) return goprint.Fprint(writer, gotoken.NewFileSet(), ast) } func goifyName(name string) string { return strings.Title(strings.Replace(name, "-", "_", -1)) } // generateDeclarations produces go declarations based on ModuleBody of module. // // Feature support status: // - [.] AssignmentList // - [ ] ValueAssignment // - [x] TypeAssignment // - [ ] Imports func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl { decls := make([]goast.Decl, 0) for _, assignment := range module.ModuleBody.AssignmentList { switch a := assignment.(type) { case TypeAssignment: decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type)) if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil { decls = append(decls, decl) } case ValueAssignment: if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil { decls = append(decls, decl) } } } return decls } func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl { var isSet bool typeBody := ctx.generateTypeBody(typeDescr, &isSet) spec := &goast.TypeSpec{ Name: goast.NewIdent(goifyName(reference.Name())), Type: typeBody, Assign: 1, // not a valid Pos, but formatter just needs non-empty value } decl := &goast.GenDecl{ Tok: gotoken.TYPE, Specs: []goast.Spec{spec}, } if _, ok := typeBody.(*goast.StructType); ok { spec.Assign = 0 } if isSet { oldName := spec.Name.Name spec.Name.Name += "SET" spec.Assign = 0 newName := spec.Name.Name decl.Specs = append(decl.Specs, &goast.TypeSpec{ Name: goast.NewIdent(oldName), Assign: 1, Type: goast.NewIdent(newName), }) } return decl } func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl { stubIsSet := false var valExpr goast.Expr switch val := val.(type) { case Number: valExpr = numberToExpr(val, ctx.params.IntegerRepr) case Boolean: if val { valExpr = &goast.BasicLit{Value: "true"} } else { valExpr = &goast.BasicLit{Value: "false"} } case Real: valExpr = &goast.BasicLit{Value: fmt.Sprint(val)} default: // TODO: produce a warning? return nil } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: []goast.Spec{ &goast.ValueSpec{ Names: []*goast.Ident{valueRefToIdent(ref)}, Type: ctx.generateTypeBody(t, &stubIsSet), Values: []goast.Expr{valExpr}, }, }, } } func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr { switch t := typeDescr.(type) { case BooleanType: return goast.NewIdent("bool") case IntegerType: // TODO: generate consts switch ctx.params.IntegerRepr { case IntegerReprInt64: return goast.NewIdent("int64") // TODO signed, unsigned, range constraints case IntegerReprBigInt: ctx.requireModule("math/big") return &goast.StarExpr{X: goast.NewIdent("big.Int")} default: ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr)) return goast.NewIdent("int64") } case CharacterStringType: return goast.NewIdent("string") case RealType: return goast.NewIdent("float64") case OctetStringType: return &goast.ArrayType{Elt: goast.NewIdent("byte")} case SequenceType: return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SetType: *isSet = true return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SequenceOfType: return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case SetOfType: *isSet = true return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case TaggedType: // TODO should put tags in go code? return ctx.generateTypeBody(t.Type, isSet) case ConstraintedType: // TODO should generate checking code? return ctx.generateTypeBody(t.Type, isSet) case TypeReference: // TODO should useful types be separate type by itself? nameAndType := ctx.resolveTypeReference(t) if nameAndType != nil { specialCase := ctx.generateSpecialCase(*nameAndType) if specialCase != nil { return specialCase } } return goast.NewIdent(goifyName(t.Name())) case RestrictedStringType: // TODO should generate checking code? return goast.NewIdent("string") case BitStringType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") case EnumeratedType: // TODO: generate consts ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.Enumerated") case AnyType: return &goast.InterfaceType{Methods: &goast.FieldList{}} case ObjectIdentifierType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.ObjectIdentifier") case ChoiceType: return ctx.generateChoiceType(t, isSet) default: // NullType ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr)) return nil } } func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl { switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) { case IntegerType: if len(typeDescr.NamedNumberList) == 0 { return nil } var specs []goast.Spec for _, namedNumber := range typeDescr.NamedNumberList { var valueExpr goast.Expr switch v := namedNumber.Value.(type) { case Number: valueExpr = numberToExpr(v, ctx.params.IntegerRepr) case DefinedValue: if v.ModuleName != "" { ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName)) } valueExpr = valueRefToIdent(v.ValueName) } typeName := goifyName(string(reference)) specs = append(specs, &goast.ValueSpec{ Type: goast.NewIdent(goifyName(string(reference))), Names: []*goast.Ident{goast.NewIdent(typeName + "Val" + goifyName(string(namedNumber.Name)))}, Values: []goast.Expr{valueExpr}, }) } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: specs, } default: return nil } } func valueRefToIdent(ref ValueReference) *goast.Ident { return goast.NewIdent("Val" + goifyName(string(ref))) } func numberToExpr(val Number, repr IntegerRepr) goast.Expr { var valueExpr goast.Expr valueExpr = &goast.BasicLit{Value: fmt.Sprint(val.IntValue())} if repr == IntegerReprBigInt { valueExpr = &goast.CallExpr{Fun: goast.NewIdent("big.NewInt"), Args: []goast.Expr{valueExpr}} } return valueExpr } func (ctx *moduleContext) generateChoiceType(t ChoiceType, isSet *bool) goast.Expr { if ctx.hasTaggedAlternatives(t) { return goast.NewIdent("asn1.RawValue") } if len(t.AlternativeTypeList) == 1 { return ctx.generateTypeBody(t.AlternativeTypeList[0].Type, isSet) // optimization for X.509 edge case } return &goast.InterfaceType{Methods: &goast.FieldList{}} } func (ctx *moduleContext) hasTaggedAlternatives(t ChoiceType) bool { for _, f := range t.AlternativeTypeList { if ctx.taggedChoiceTypeAlternative(f.Identifier, f.Type) { return true } } return false } func (ctx *moduleContext) taggedChoiceTypeAlternative(name Identifier, t Type) bool { switch t := t.(type) { case TaggedType: return true case TypeReference: if t.Name() == GeneralizedTimeName || t.Name() == UTCTimeName { return false } realType := ctx.resolveTypeReference(t) if realType == nil { return false } return ctx.taggedChoiceTypeAlternative(name, realType.Type) case ConstraintedType: return ctx.taggedChoiceTypeAlternative(name, t.Type) default: return false } } func (ctx *moduleContext) structFromComponents(components ComponentTypeList, extensions ExtensionAdditions) goast.Expr { fields := &goast.FieldList{} for _, field := range components { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } for _, field := range extensions { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } return &goast.StructType{ Fields: fields, } } func (ctx *moduleContext) generateStructField(f NamedComponentType) *goast.Field { var stubBool bool // we care about isSet / shouldAssign only for top-level decls return &goast.Field{ Names: append(make([]*goast.Ident, 0), goast.NewIdent(goifyName(f.NamedType.Identifier.Name()))), Type: ctx.generateTypeBody(f.NamedType.Type, &stubBool), Tag: ctx.asn1TagFromType(f), } } func (ctx *moduleContext) asn1TagFromType(nt NamedComponentType) *goast.BasicLit { t := nt.NamedType.Type components := make([]string, 0) if nt.IsOptional { components = append(components, "optional") } if nt.Default != nil { if defaultNumber, ok := (*nt.Default).(Number); ok { components = append(components, fmt.Sprintf("default:%v", defaultNumber.IntValue())) } if !nt.IsOptional { // ensure it's marked as optional components = append(components, "optional") } } // unwrap type unwrap: for { switch tt := t.(type) { case TaggedType: switch tt.Tag.Class { case CLASS_APPLICATION: components = append(components, "application") case CLASS_PRIVATE: components = append(components, "private") } tagType := ctx.tagDefault if tt.HasTagType { tagType = tt.TagType } switch tagType { case TAGS_EXPLICIT: components = append(components, "explicit") case TAGS_IMPLICIT: // nothing to do case TAGS_AUTOMATIC: ctx.appendError(fmt.Errorf("type %v: AUTOMATIC tags are not supported", nt.NamedType.Identifier)) } switch cn := ctx.lookupValue(tt.Tag.ClassNumber).(type) { case Number: components = append(components, fmt.Sprintf("tag:%v", cn.IntValue())) default: ctx.appendError(fmt.Errorf("tag value should be Number, got %#v", cn)) } t = tt.Type case ConstraintedType: t = tt.Type default: break unwrap } } // add type-specific tags switch tt := t.(type) { case RestrictedStringType: switch tt.LexType { case IA5String: components = append(components, "ia5") case UTF8String: components = append(components, "utf8") case NumericString: components = append(components, "numeric") case PrintableString: // default type } case SetType: components = append(components, "set") case SetOfType: components = append(components, "set") case TypeReference: switch ctx.unwrapToLeafType(tt).TypeReference.Name() { case GeneralizedTimeName: components = append(components, "generalized") case UTCTimeName: components = append(components, "utc") } // TODO omitempty causes empty slices to be skipped\ } if len(components) > 0 { return &goast.BasicLit{ Value: fmt.Sprintf("`asn1:\"%s\"`", strings.Join(components, ",")), Kind: gotoken.STRING, } } else { return nil } } func (ctx *moduleContext) generateSpecialCase(resolved TypeAssignment) goast.Expr { if resolved.TypeReference.Name() == GeneralizedTimeName || resolved.TypeReference.Name() == UTCTimeName { // time types in encoding/asn1go don't support wrapping of time.Time ctx.requireModule("time") return goast.NewIdent("time.Time") } else if _, ok := ctx.removeWrapperTypes(resolved.Type).(BitStringType); ok { ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") } return nil } // TODO really lookup values from module and imports func (ctx *moduleContext) lookupValue(val Value) Value { return val } // resolveTypeReference resolves references until reaches unresolved type, useful type, or declared type // returns type reference of most nested type which is not type reference itself // returns nil if type is not resolved func (ctx *moduleContext) resolveTypeReference(reference TypeReference) *TypeAssignment { unwrapped := ctx.unwrapToLeafType(reference) if unwrapped.Type != nil { return &unwrapped } else if tt := ctx.lookupUsefulType(unwrapped.TypeReference); tt != nil { return &TypeAssignment{unwrapped.TypeReference, tt} } else { ctx.appendError(fmt.Errorf("can not resolve TypeReference %v", reference.Name())) return nil } } func (ctx *moduleContext) lookupUsefulType(reference TypeReference) Type { if usefulType, ok := USEFUL_TYPES[reference.Name()]; ok { return usefulType } else { return nil } } func (ctx *moduleContext) removeWrapperTypes(t Type) Type { for { switch tt := t.(type) { case TaggedType: t = tt.Type case ConstraintedType: t = tt.Type default: return t } } } // unwrapToLeafType walks over transitive type references, tags and constraints and yields "root" type reference func (ctx *moduleContext) unwrapToLeafType(reference TypeReference) TypeAssignment { if assignment := ctx.lookupContext.AssignmentList.GetType(reference.Name()); assignment != nil { t := assignment.Type if tt, ok := ctx.removeWrapperTypes(t).(TypeReference); ok { return ctx.unwrapToLeafType(tt) } else { return *assignment } } return TypeAssignment{reference, nil} }
{ for _, existing := range ctx.requiredModules { if existing == module { return } } ctx.requiredModules = append(ctx.requiredModules, module) }
identifier_body
codegen.go
package asn1go import ( "errors" "fmt" goast "go/ast" goprint "go/printer" gotoken "go/token" "io" "strings" ) // CodeGenerator is an interface for code generation from ASN.1 modules. type CodeGenerator interface { Generate(module ModuleDefinition, writer io.Writer) error } // GenParams is code generator configuration. type GenParams struct { // Package is go package name. // If not specified, ASN.1 module name will be used to derive go module name. Package string // Type is a type of code generation to run. // TODO: deprecate in favor of separate New methods. Type GenType // IntegerRepr controls how INTEGER type is expressed in generated go code. IntegerRepr IntegerRepr } // GenType is code generator type. type GenType int const ( // GEN_DECLARATIONS is code generator that is GEN_DECLARATIONS GenType = iota ) // IntegerRepr is enum controlling how INTEGER is represented. type IntegerRepr string // IntegerRepr modes supported. const ( IntegerReprInt64 IntegerRepr = "int64" IntegerReprBigInt IntegerRepr = "big.Int" ) // NewCodeGenerator creates a new code generator from provided params. func NewCodeGenerator(params GenParams) CodeGenerator { if params.IntegerRepr == "" { params.IntegerRepr = IntegerReprInt64 } switch params.Type { case GEN_DECLARATIONS: return &declCodeGen{params} default: return nil } } type declCodeGen struct { Params GenParams } // moduleContext is context used to track state of the code generation. type moduleContext struct { extensibilityImplied bool // tagDefault is a ModuleDefinition.TagDefault value. tagDefault int // errors collected during conversion. // TODO: switch to explicit error passing. errors []error lookupContext ModuleBody // requiredModules holds go modules required by generated code. requiredModules []string params GenParams } func (ctx *moduleContext) appendError(err error) { ctx.errors = append(ctx.errors, err) } func (ctx *moduleContext) requireModule(module string) { for _, existing := range ctx.requiredModules { if existing == module { return } } ctx.requiredModules = append(ctx.requiredModules, module) } // Generate declarations from module to be used together with encoding/asn1. // // Feature support status: // - [x] ModuleIdentifier // - [x] TagDefault (except AUTOMATIC) // - [ ] ExtensibilityImplied // - [.] ModuleBody -- see moduleContext.generateDeclarations. func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error { if module.TagDefault == TAGS_AUTOMATIC { // See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented. return errors.New("AUTOMATIC tagged modules are not supported") } ctx := moduleContext{ extensibilityImplied: module.ExtensibilityImplied, tagDefault: module.TagDefault, lookupContext: module.ModuleBody, params: gen.Params, } moduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference)) if len(gen.Params.Package) > 0 { moduleName = goast.NewIdent(gen.Params.Package) } ast := &goast.File{ Name: moduleName, Decls: ctx.generateDeclarations(module), } if len(ctx.errors) != 0 { msg := "errors generating Go AST from module: \n" for _, err := range ctx.errors { msg += " " + err.Error() + "\n" } return errors.New(msg) } importDecls := make([]goast.Decl, 0) for _, moduleName := range ctx.requiredModules { modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)} specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}} importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs}) } ast.Decls = append(importDecls, ast.Decls...) return goprint.Fprint(writer, gotoken.NewFileSet(), ast) } func goifyName(name string) string { return strings.Title(strings.Replace(name, "-", "_", -1)) } // generateDeclarations produces go declarations based on ModuleBody of module. // // Feature support status: // - [.] AssignmentList // - [ ] ValueAssignment // - [x] TypeAssignment // - [ ] Imports func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl { decls := make([]goast.Decl, 0) for _, assignment := range module.ModuleBody.AssignmentList { switch a := assignment.(type) { case TypeAssignment: decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type)) if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil { decls = append(decls, decl) } case ValueAssignment: if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil { decls = append(decls, decl) } } } return decls } func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl { var isSet bool typeBody := ctx.generateTypeBody(typeDescr, &isSet) spec := &goast.TypeSpec{ Name: goast.NewIdent(goifyName(reference.Name())), Type: typeBody, Assign: 1, // not a valid Pos, but formatter just needs non-empty value } decl := &goast.GenDecl{ Tok: gotoken.TYPE, Specs: []goast.Spec{spec}, } if _, ok := typeBody.(*goast.StructType); ok { spec.Assign = 0 } if isSet { oldName := spec.Name.Name spec.Name.Name += "SET" spec.Assign = 0 newName := spec.Name.Name decl.Specs = append(decl.Specs, &goast.TypeSpec{ Name: goast.NewIdent(oldName), Assign: 1, Type: goast.NewIdent(newName), }) } return decl } func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl { stubIsSet := false var valExpr goast.Expr switch val := val.(type) { case Number: valExpr = numberToExpr(val, ctx.params.IntegerRepr) case Boolean: if val { valExpr = &goast.BasicLit{Value: "true"} } else { valExpr = &goast.BasicLit{Value: "false"} } case Real: valExpr = &goast.BasicLit{Value: fmt.Sprint(val)} default: // TODO: produce a warning? return nil } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: []goast.Spec{ &goast.ValueSpec{ Names: []*goast.Ident{valueRefToIdent(ref)}, Type: ctx.generateTypeBody(t, &stubIsSet), Values: []goast.Expr{valExpr}, }, }, } } func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr { switch t := typeDescr.(type) { case BooleanType: return goast.NewIdent("bool")
case IntegerType: // TODO: generate consts switch ctx.params.IntegerRepr { case IntegerReprInt64: return goast.NewIdent("int64") // TODO signed, unsigned, range constraints case IntegerReprBigInt: ctx.requireModule("math/big") return &goast.StarExpr{X: goast.NewIdent("big.Int")} default: ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr)) return goast.NewIdent("int64") } case CharacterStringType: return goast.NewIdent("string") case RealType: return goast.NewIdent("float64") case OctetStringType: return &goast.ArrayType{Elt: goast.NewIdent("byte")} case SequenceType: return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SetType: *isSet = true return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SequenceOfType: return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case SetOfType: *isSet = true return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case TaggedType: // TODO should put tags in go code? return ctx.generateTypeBody(t.Type, isSet) case ConstraintedType: // TODO should generate checking code? return ctx.generateTypeBody(t.Type, isSet) case TypeReference: // TODO should useful types be separate type by itself? nameAndType := ctx.resolveTypeReference(t) if nameAndType != nil { specialCase := ctx.generateSpecialCase(*nameAndType) if specialCase != nil { return specialCase } } return goast.NewIdent(goifyName(t.Name())) case RestrictedStringType: // TODO should generate checking code? return goast.NewIdent("string") case BitStringType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") case EnumeratedType: // TODO: generate consts ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.Enumerated") case AnyType: return &goast.InterfaceType{Methods: &goast.FieldList{}} case ObjectIdentifierType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.ObjectIdentifier") case ChoiceType: return ctx.generateChoiceType(t, isSet) default: // NullType ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr)) return nil } } func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl { switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) { case IntegerType: if len(typeDescr.NamedNumberList) == 0 { return nil } var specs []goast.Spec for _, namedNumber := range typeDescr.NamedNumberList { var valueExpr goast.Expr switch v := namedNumber.Value.(type) { case Number: valueExpr = numberToExpr(v, ctx.params.IntegerRepr) case DefinedValue: if v.ModuleName != "" { ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName)) } valueExpr = valueRefToIdent(v.ValueName) } typeName := goifyName(string(reference)) specs = append(specs, &goast.ValueSpec{ Type: goast.NewIdent(goifyName(string(reference))), Names: []*goast.Ident{goast.NewIdent(typeName + "Val" + goifyName(string(namedNumber.Name)))}, Values: []goast.Expr{valueExpr}, }) } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: specs, } default: return nil } } func valueRefToIdent(ref ValueReference) *goast.Ident { return goast.NewIdent("Val" + goifyName(string(ref))) } func numberToExpr(val Number, repr IntegerRepr) goast.Expr { var valueExpr goast.Expr valueExpr = &goast.BasicLit{Value: fmt.Sprint(val.IntValue())} if repr == IntegerReprBigInt { valueExpr = &goast.CallExpr{Fun: goast.NewIdent("big.NewInt"), Args: []goast.Expr{valueExpr}} } return valueExpr } func (ctx *moduleContext) generateChoiceType(t ChoiceType, isSet *bool) goast.Expr { if ctx.hasTaggedAlternatives(t) { return goast.NewIdent("asn1.RawValue") } if len(t.AlternativeTypeList) == 1 { return ctx.generateTypeBody(t.AlternativeTypeList[0].Type, isSet) // optimization for X.509 edge case } return &goast.InterfaceType{Methods: &goast.FieldList{}} } func (ctx *moduleContext) hasTaggedAlternatives(t ChoiceType) bool { for _, f := range t.AlternativeTypeList { if ctx.taggedChoiceTypeAlternative(f.Identifier, f.Type) { return true } } return false } func (ctx *moduleContext) taggedChoiceTypeAlternative(name Identifier, t Type) bool { switch t := t.(type) { case TaggedType: return true case TypeReference: if t.Name() == GeneralizedTimeName || t.Name() == UTCTimeName { return false } realType := ctx.resolveTypeReference(t) if realType == nil { return false } return ctx.taggedChoiceTypeAlternative(name, realType.Type) case ConstraintedType: return ctx.taggedChoiceTypeAlternative(name, t.Type) default: return false } } func (ctx *moduleContext) structFromComponents(components ComponentTypeList, extensions ExtensionAdditions) goast.Expr { fields := &goast.FieldList{} for _, field := range components { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } for _, field := range extensions { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } return &goast.StructType{ Fields: fields, } } func (ctx *moduleContext) generateStructField(f NamedComponentType) *goast.Field { var stubBool bool // we care about isSet / shouldAssign only for top-level decls return &goast.Field{ Names: append(make([]*goast.Ident, 0), goast.NewIdent(goifyName(f.NamedType.Identifier.Name()))), Type: ctx.generateTypeBody(f.NamedType.Type, &stubBool), Tag: ctx.asn1TagFromType(f), } } func (ctx *moduleContext) asn1TagFromType(nt NamedComponentType) *goast.BasicLit { t := nt.NamedType.Type components := make([]string, 0) if nt.IsOptional { components = append(components, "optional") } if nt.Default != nil { if defaultNumber, ok := (*nt.Default).(Number); ok { components = append(components, fmt.Sprintf("default:%v", defaultNumber.IntValue())) } if !nt.IsOptional { // ensure it's marked as optional components = append(components, "optional") } } // unwrap type unwrap: for { switch tt := t.(type) { case TaggedType: switch tt.Tag.Class { case CLASS_APPLICATION: components = append(components, "application") case CLASS_PRIVATE: components = append(components, "private") } tagType := ctx.tagDefault if tt.HasTagType { tagType = tt.TagType } switch tagType { case TAGS_EXPLICIT: components = append(components, "explicit") case TAGS_IMPLICIT: // nothing to do case TAGS_AUTOMATIC: ctx.appendError(fmt.Errorf("type %v: AUTOMATIC tags are not supported", nt.NamedType.Identifier)) } switch cn := ctx.lookupValue(tt.Tag.ClassNumber).(type) { case Number: components = append(components, fmt.Sprintf("tag:%v", cn.IntValue())) default: ctx.appendError(fmt.Errorf("tag value should be Number, got %#v", cn)) } t = tt.Type case ConstraintedType: t = tt.Type default: break unwrap } } // add type-specific tags switch tt := t.(type) { case RestrictedStringType: switch tt.LexType { case IA5String: components = append(components, "ia5") case UTF8String: components = append(components, "utf8") case NumericString: components = append(components, "numeric") case PrintableString: // default type } case SetType: components = append(components, "set") case SetOfType: components = append(components, "set") case TypeReference: switch ctx.unwrapToLeafType(tt).TypeReference.Name() { case GeneralizedTimeName: components = append(components, "generalized") case UTCTimeName: components = append(components, "utc") } // TODO omitempty causes empty slices to be skipped\ } if len(components) > 0 { return &goast.BasicLit{ Value: fmt.Sprintf("`asn1:\"%s\"`", strings.Join(components, ",")), Kind: gotoken.STRING, } } else { return nil } } func (ctx *moduleContext) generateSpecialCase(resolved TypeAssignment) goast.Expr { if resolved.TypeReference.Name() == GeneralizedTimeName || resolved.TypeReference.Name() == UTCTimeName { // time types in encoding/asn1go don't support wrapping of time.Time ctx.requireModule("time") return goast.NewIdent("time.Time") } else if _, ok := ctx.removeWrapperTypes(resolved.Type).(BitStringType); ok { ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") } return nil } // TODO really lookup values from module and imports func (ctx *moduleContext) lookupValue(val Value) Value { return val } // resolveTypeReference resolves references until reaches unresolved type, useful type, or declared type // returns type reference of most nested type which is not type reference itself // returns nil if type is not resolved func (ctx *moduleContext) resolveTypeReference(reference TypeReference) *TypeAssignment { unwrapped := ctx.unwrapToLeafType(reference) if unwrapped.Type != nil { return &unwrapped } else if tt := ctx.lookupUsefulType(unwrapped.TypeReference); tt != nil { return &TypeAssignment{unwrapped.TypeReference, tt} } else { ctx.appendError(fmt.Errorf("can not resolve TypeReference %v", reference.Name())) return nil } } func (ctx *moduleContext) lookupUsefulType(reference TypeReference) Type { if usefulType, ok := USEFUL_TYPES[reference.Name()]; ok { return usefulType } else { return nil } } func (ctx *moduleContext) removeWrapperTypes(t Type) Type { for { switch tt := t.(type) { case TaggedType: t = tt.Type case ConstraintedType: t = tt.Type default: return t } } } // unwrapToLeafType walks over transitive type references, tags and constraints and yields "root" type reference func (ctx *moduleContext) unwrapToLeafType(reference TypeReference) TypeAssignment { if assignment := ctx.lookupContext.AssignmentList.GetType(reference.Name()); assignment != nil { t := assignment.Type if tt, ok := ctx.removeWrapperTypes(t).(TypeReference); ok { return ctx.unwrapToLeafType(tt) } else { return *assignment } } return TypeAssignment{reference, nil} }
random_line_split
codegen.go
package asn1go import ( "errors" "fmt" goast "go/ast" goprint "go/printer" gotoken "go/token" "io" "strings" ) // CodeGenerator is an interface for code generation from ASN.1 modules. type CodeGenerator interface { Generate(module ModuleDefinition, writer io.Writer) error } // GenParams is code generator configuration. type GenParams struct { // Package is go package name. // If not specified, ASN.1 module name will be used to derive go module name. Package string // Type is a type of code generation to run. // TODO: deprecate in favor of separate New methods. Type GenType // IntegerRepr controls how INTEGER type is expressed in generated go code. IntegerRepr IntegerRepr } // GenType is code generator type. type GenType int const ( // GEN_DECLARATIONS is code generator that is GEN_DECLARATIONS GenType = iota ) // IntegerRepr is enum controlling how INTEGER is represented. type IntegerRepr string // IntegerRepr modes supported. const ( IntegerReprInt64 IntegerRepr = "int64" IntegerReprBigInt IntegerRepr = "big.Int" ) // NewCodeGenerator creates a new code generator from provided params. func NewCodeGenerator(params GenParams) CodeGenerator { if params.IntegerRepr == "" { params.IntegerRepr = IntegerReprInt64 } switch params.Type { case GEN_DECLARATIONS: return &declCodeGen{params} default: return nil } } type declCodeGen struct { Params GenParams } // moduleContext is context used to track state of the code generation. type moduleContext struct { extensibilityImplied bool // tagDefault is a ModuleDefinition.TagDefault value. tagDefault int // errors collected during conversion. // TODO: switch to explicit error passing. errors []error lookupContext ModuleBody // requiredModules holds go modules required by generated code. requiredModules []string params GenParams } func (ctx *moduleContext) appendError(err error) { ctx.errors = append(ctx.errors, err) } func (ctx *moduleContext) requireModule(module string) { for _, existing := range ctx.requiredModules { if existing == module
} ctx.requiredModules = append(ctx.requiredModules, module) } // Generate declarations from module to be used together with encoding/asn1. // // Feature support status: // - [x] ModuleIdentifier // - [x] TagDefault (except AUTOMATIC) // - [ ] ExtensibilityImplied // - [.] ModuleBody -- see moduleContext.generateDeclarations. func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error { if module.TagDefault == TAGS_AUTOMATIC { // See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented. return errors.New("AUTOMATIC tagged modules are not supported") } ctx := moduleContext{ extensibilityImplied: module.ExtensibilityImplied, tagDefault: module.TagDefault, lookupContext: module.ModuleBody, params: gen.Params, } moduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference)) if len(gen.Params.Package) > 0 { moduleName = goast.NewIdent(gen.Params.Package) } ast := &goast.File{ Name: moduleName, Decls: ctx.generateDeclarations(module), } if len(ctx.errors) != 0 { msg := "errors generating Go AST from module: \n" for _, err := range ctx.errors { msg += " " + err.Error() + "\n" } return errors.New(msg) } importDecls := make([]goast.Decl, 0) for _, moduleName := range ctx.requiredModules { modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)} specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}} importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs}) } ast.Decls = append(importDecls, ast.Decls...) return goprint.Fprint(writer, gotoken.NewFileSet(), ast) } func goifyName(name string) string { return strings.Title(strings.Replace(name, "-", "_", -1)) } // generateDeclarations produces go declarations based on ModuleBody of module. // // Feature support status: // - [.] AssignmentList // - [ ] ValueAssignment // - [x] TypeAssignment // - [ ] Imports func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl { decls := make([]goast.Decl, 0) for _, assignment := range module.ModuleBody.AssignmentList { switch a := assignment.(type) { case TypeAssignment: decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type)) if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil { decls = append(decls, decl) } case ValueAssignment: if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil { decls = append(decls, decl) } } } return decls } func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl { var isSet bool typeBody := ctx.generateTypeBody(typeDescr, &isSet) spec := &goast.TypeSpec{ Name: goast.NewIdent(goifyName(reference.Name())), Type: typeBody, Assign: 1, // not a valid Pos, but formatter just needs non-empty value } decl := &goast.GenDecl{ Tok: gotoken.TYPE, Specs: []goast.Spec{spec}, } if _, ok := typeBody.(*goast.StructType); ok { spec.Assign = 0 } if isSet { oldName := spec.Name.Name spec.Name.Name += "SET" spec.Assign = 0 newName := spec.Name.Name decl.Specs = append(decl.Specs, &goast.TypeSpec{ Name: goast.NewIdent(oldName), Assign: 1, Type: goast.NewIdent(newName), }) } return decl } func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl { stubIsSet := false var valExpr goast.Expr switch val := val.(type) { case Number: valExpr = numberToExpr(val, ctx.params.IntegerRepr) case Boolean: if val { valExpr = &goast.BasicLit{Value: "true"} } else { valExpr = &goast.BasicLit{Value: "false"} } case Real: valExpr = &goast.BasicLit{Value: fmt.Sprint(val)} default: // TODO: produce a warning? return nil } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: []goast.Spec{ &goast.ValueSpec{ Names: []*goast.Ident{valueRefToIdent(ref)}, Type: ctx.generateTypeBody(t, &stubIsSet), Values: []goast.Expr{valExpr}, }, }, } } func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr { switch t := typeDescr.(type) { case BooleanType: return goast.NewIdent("bool") case IntegerType: // TODO: generate consts switch ctx.params.IntegerRepr { case IntegerReprInt64: return goast.NewIdent("int64") // TODO signed, unsigned, range constraints case IntegerReprBigInt: ctx.requireModule("math/big") return &goast.StarExpr{X: goast.NewIdent("big.Int")} default: ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr)) return goast.NewIdent("int64") } case CharacterStringType: return goast.NewIdent("string") case RealType: return goast.NewIdent("float64") case OctetStringType: return &goast.ArrayType{Elt: goast.NewIdent("byte")} case SequenceType: return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SetType: *isSet = true return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SequenceOfType: return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case SetOfType: *isSet = true return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case TaggedType: // TODO should put tags in go code? return ctx.generateTypeBody(t.Type, isSet) case ConstraintedType: // TODO should generate checking code? return ctx.generateTypeBody(t.Type, isSet) case TypeReference: // TODO should useful types be separate type by itself? nameAndType := ctx.resolveTypeReference(t) if nameAndType != nil { specialCase := ctx.generateSpecialCase(*nameAndType) if specialCase != nil { return specialCase } } return goast.NewIdent(goifyName(t.Name())) case RestrictedStringType: // TODO should generate checking code? return goast.NewIdent("string") case BitStringType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") case EnumeratedType: // TODO: generate consts ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.Enumerated") case AnyType: return &goast.InterfaceType{Methods: &goast.FieldList{}} case ObjectIdentifierType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.ObjectIdentifier") case ChoiceType: return ctx.generateChoiceType(t, isSet) default: // NullType ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr)) return nil } } func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl { switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) { case IntegerType: if len(typeDescr.NamedNumberList) == 0 { return nil } var specs []goast.Spec for _, namedNumber := range typeDescr.NamedNumberList { var valueExpr goast.Expr switch v := namedNumber.Value.(type) { case Number: valueExpr = numberToExpr(v, ctx.params.IntegerRepr) case DefinedValue: if v.ModuleName != "" { ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName)) } valueExpr = valueRefToIdent(v.ValueName) } typeName := goifyName(string(reference)) specs = append(specs, &goast.ValueSpec{ Type: goast.NewIdent(goifyName(string(reference))), Names: []*goast.Ident{goast.NewIdent(typeName + "Val" + goifyName(string(namedNumber.Name)))}, Values: []goast.Expr{valueExpr}, }) } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: specs, } default: return nil } } func valueRefToIdent(ref ValueReference) *goast.Ident { return goast.NewIdent("Val" + goifyName(string(ref))) } func numberToExpr(val Number, repr IntegerRepr) goast.Expr { var valueExpr goast.Expr valueExpr = &goast.BasicLit{Value: fmt.Sprint(val.IntValue())} if repr == IntegerReprBigInt { valueExpr = &goast.CallExpr{Fun: goast.NewIdent("big.NewInt"), Args: []goast.Expr{valueExpr}} } return valueExpr } func (ctx *moduleContext) generateChoiceType(t ChoiceType, isSet *bool) goast.Expr { if ctx.hasTaggedAlternatives(t) { return goast.NewIdent("asn1.RawValue") } if len(t.AlternativeTypeList) == 1 { return ctx.generateTypeBody(t.AlternativeTypeList[0].Type, isSet) // optimization for X.509 edge case } return &goast.InterfaceType{Methods: &goast.FieldList{}} } func (ctx *moduleContext) hasTaggedAlternatives(t ChoiceType) bool { for _, f := range t.AlternativeTypeList { if ctx.taggedChoiceTypeAlternative(f.Identifier, f.Type) { return true } } return false } func (ctx *moduleContext) taggedChoiceTypeAlternative(name Identifier, t Type) bool { switch t := t.(type) { case TaggedType: return true case TypeReference: if t.Name() == GeneralizedTimeName || t.Name() == UTCTimeName { return false } realType := ctx.resolveTypeReference(t) if realType == nil { return false } return ctx.taggedChoiceTypeAlternative(name, realType.Type) case ConstraintedType: return ctx.taggedChoiceTypeAlternative(name, t.Type) default: return false } } func (ctx *moduleContext) structFromComponents(components ComponentTypeList, extensions ExtensionAdditions) goast.Expr { fields := &goast.FieldList{} for _, field := range components { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } for _, field := range extensions { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } return &goast.StructType{ Fields: fields, } } func (ctx *moduleContext) generateStructField(f NamedComponentType) *goast.Field { var stubBool bool // we care about isSet / shouldAssign only for top-level decls return &goast.Field{ Names: append(make([]*goast.Ident, 0), goast.NewIdent(goifyName(f.NamedType.Identifier.Name()))), Type: ctx.generateTypeBody(f.NamedType.Type, &stubBool), Tag: ctx.asn1TagFromType(f), } } func (ctx *moduleContext) asn1TagFromType(nt NamedComponentType) *goast.BasicLit { t := nt.NamedType.Type components := make([]string, 0) if nt.IsOptional { components = append(components, "optional") } if nt.Default != nil { if defaultNumber, ok := (*nt.Default).(Number); ok { components = append(components, fmt.Sprintf("default:%v", defaultNumber.IntValue())) } if !nt.IsOptional { // ensure it's marked as optional components = append(components, "optional") } } // unwrap type unwrap: for { switch tt := t.(type) { case TaggedType: switch tt.Tag.Class { case CLASS_APPLICATION: components = append(components, "application") case CLASS_PRIVATE: components = append(components, "private") } tagType := ctx.tagDefault if tt.HasTagType { tagType = tt.TagType } switch tagType { case TAGS_EXPLICIT: components = append(components, "explicit") case TAGS_IMPLICIT: // nothing to do case TAGS_AUTOMATIC: ctx.appendError(fmt.Errorf("type %v: AUTOMATIC tags are not supported", nt.NamedType.Identifier)) } switch cn := ctx.lookupValue(tt.Tag.ClassNumber).(type) { case Number: components = append(components, fmt.Sprintf("tag:%v", cn.IntValue())) default: ctx.appendError(fmt.Errorf("tag value should be Number, got %#v", cn)) } t = tt.Type case ConstraintedType: t = tt.Type default: break unwrap } } // add type-specific tags switch tt := t.(type) { case RestrictedStringType: switch tt.LexType { case IA5String: components = append(components, "ia5") case UTF8String: components = append(components, "utf8") case NumericString: components = append(components, "numeric") case PrintableString: // default type } case SetType: components = append(components, "set") case SetOfType: components = append(components, "set") case TypeReference: switch ctx.unwrapToLeafType(tt).TypeReference.Name() { case GeneralizedTimeName: components = append(components, "generalized") case UTCTimeName: components = append(components, "utc") } // TODO omitempty causes empty slices to be skipped\ } if len(components) > 0 { return &goast.BasicLit{ Value: fmt.Sprintf("`asn1:\"%s\"`", strings.Join(components, ",")), Kind: gotoken.STRING, } } else { return nil } } func (ctx *moduleContext) generateSpecialCase(resolved TypeAssignment) goast.Expr { if resolved.TypeReference.Name() == GeneralizedTimeName || resolved.TypeReference.Name() == UTCTimeName { // time types in encoding/asn1go don't support wrapping of time.Time ctx.requireModule("time") return goast.NewIdent("time.Time") } else if _, ok := ctx.removeWrapperTypes(resolved.Type).(BitStringType); ok { ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") } return nil } // TODO really lookup values from module and imports func (ctx *moduleContext) lookupValue(val Value) Value { return val } // resolveTypeReference resolves references until reaches unresolved type, useful type, or declared type // returns type reference of most nested type which is not type reference itself // returns nil if type is not resolved func (ctx *moduleContext) resolveTypeReference(reference TypeReference) *TypeAssignment { unwrapped := ctx.unwrapToLeafType(reference) if unwrapped.Type != nil { return &unwrapped } else if tt := ctx.lookupUsefulType(unwrapped.TypeReference); tt != nil { return &TypeAssignment{unwrapped.TypeReference, tt} } else { ctx.appendError(fmt.Errorf("can not resolve TypeReference %v", reference.Name())) return nil } } func (ctx *moduleContext) lookupUsefulType(reference TypeReference) Type { if usefulType, ok := USEFUL_TYPES[reference.Name()]; ok { return usefulType } else { return nil } } func (ctx *moduleContext) removeWrapperTypes(t Type) Type { for { switch tt := t.(type) { case TaggedType: t = tt.Type case ConstraintedType: t = tt.Type default: return t } } } // unwrapToLeafType walks over transitive type references, tags and constraints and yields "root" type reference func (ctx *moduleContext) unwrapToLeafType(reference TypeReference) TypeAssignment { if assignment := ctx.lookupContext.AssignmentList.GetType(reference.Name()); assignment != nil { t := assignment.Type if tt, ok := ctx.removeWrapperTypes(t).(TypeReference); ok { return ctx.unwrapToLeafType(tt) } else { return *assignment } } return TypeAssignment{reference, nil} }
{ return }
conditional_block
codegen.go
package asn1go import ( "errors" "fmt" goast "go/ast" goprint "go/printer" gotoken "go/token" "io" "strings" ) // CodeGenerator is an interface for code generation from ASN.1 modules. type CodeGenerator interface { Generate(module ModuleDefinition, writer io.Writer) error } // GenParams is code generator configuration. type GenParams struct { // Package is go package name. // If not specified, ASN.1 module name will be used to derive go module name. Package string // Type is a type of code generation to run. // TODO: deprecate in favor of separate New methods. Type GenType // IntegerRepr controls how INTEGER type is expressed in generated go code. IntegerRepr IntegerRepr } // GenType is code generator type. type GenType int const ( // GEN_DECLARATIONS is code generator that is GEN_DECLARATIONS GenType = iota ) // IntegerRepr is enum controlling how INTEGER is represented. type IntegerRepr string // IntegerRepr modes supported. const ( IntegerReprInt64 IntegerRepr = "int64" IntegerReprBigInt IntegerRepr = "big.Int" ) // NewCodeGenerator creates a new code generator from provided params. func NewCodeGenerator(params GenParams) CodeGenerator { if params.IntegerRepr == "" { params.IntegerRepr = IntegerReprInt64 } switch params.Type { case GEN_DECLARATIONS: return &declCodeGen{params} default: return nil } } type declCodeGen struct { Params GenParams } // moduleContext is context used to track state of the code generation. type moduleContext struct { extensibilityImplied bool // tagDefault is a ModuleDefinition.TagDefault value. tagDefault int // errors collected during conversion. // TODO: switch to explicit error passing. errors []error lookupContext ModuleBody // requiredModules holds go modules required by generated code. requiredModules []string params GenParams } func (ctx *moduleContext) appendError(err error) { ctx.errors = append(ctx.errors, err) } func (ctx *moduleContext) requireModule(module string) { for _, existing := range ctx.requiredModules { if existing == module { return } } ctx.requiredModules = append(ctx.requiredModules, module) } // Generate declarations from module to be used together with encoding/asn1. // // Feature support status: // - [x] ModuleIdentifier // - [x] TagDefault (except AUTOMATIC) // - [ ] ExtensibilityImplied // - [.] ModuleBody -- see moduleContext.generateDeclarations. func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error { if module.TagDefault == TAGS_AUTOMATIC { // See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented. return errors.New("AUTOMATIC tagged modules are not supported") } ctx := moduleContext{ extensibilityImplied: module.ExtensibilityImplied, tagDefault: module.TagDefault, lookupContext: module.ModuleBody, params: gen.Params, } moduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference)) if len(gen.Params.Package) > 0 { moduleName = goast.NewIdent(gen.Params.Package) } ast := &goast.File{ Name: moduleName, Decls: ctx.generateDeclarations(module), } if len(ctx.errors) != 0 { msg := "errors generating Go AST from module: \n" for _, err := range ctx.errors { msg += " " + err.Error() + "\n" } return errors.New(msg) } importDecls := make([]goast.Decl, 0) for _, moduleName := range ctx.requiredModules { modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)} specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}} importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs}) } ast.Decls = append(importDecls, ast.Decls...) return goprint.Fprint(writer, gotoken.NewFileSet(), ast) } func goifyName(name string) string { return strings.Title(strings.Replace(name, "-", "_", -1)) } // generateDeclarations produces go declarations based on ModuleBody of module. // // Feature support status: // - [.] AssignmentList // - [ ] ValueAssignment // - [x] TypeAssignment // - [ ] Imports func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl { decls := make([]goast.Decl, 0) for _, assignment := range module.ModuleBody.AssignmentList { switch a := assignment.(type) { case TypeAssignment: decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type)) if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil { decls = append(decls, decl) } case ValueAssignment: if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil { decls = append(decls, decl) } } } return decls } func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl { var isSet bool typeBody := ctx.generateTypeBody(typeDescr, &isSet) spec := &goast.TypeSpec{ Name: goast.NewIdent(goifyName(reference.Name())), Type: typeBody, Assign: 1, // not a valid Pos, but formatter just needs non-empty value } decl := &goast.GenDecl{ Tok: gotoken.TYPE, Specs: []goast.Spec{spec}, } if _, ok := typeBody.(*goast.StructType); ok { spec.Assign = 0 } if isSet { oldName := spec.Name.Name spec.Name.Name += "SET" spec.Assign = 0 newName := spec.Name.Name decl.Specs = append(decl.Specs, &goast.TypeSpec{ Name: goast.NewIdent(oldName), Assign: 1, Type: goast.NewIdent(newName), }) } return decl } func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl { stubIsSet := false var valExpr goast.Expr switch val := val.(type) { case Number: valExpr = numberToExpr(val, ctx.params.IntegerRepr) case Boolean: if val { valExpr = &goast.BasicLit{Value: "true"} } else { valExpr = &goast.BasicLit{Value: "false"} } case Real: valExpr = &goast.BasicLit{Value: fmt.Sprint(val)} default: // TODO: produce a warning? return nil } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: []goast.Spec{ &goast.ValueSpec{ Names: []*goast.Ident{valueRefToIdent(ref)}, Type: ctx.generateTypeBody(t, &stubIsSet), Values: []goast.Expr{valExpr}, }, }, } } func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr { switch t := typeDescr.(type) { case BooleanType: return goast.NewIdent("bool") case IntegerType: // TODO: generate consts switch ctx.params.IntegerRepr { case IntegerReprInt64: return goast.NewIdent("int64") // TODO signed, unsigned, range constraints case IntegerReprBigInt: ctx.requireModule("math/big") return &goast.StarExpr{X: goast.NewIdent("big.Int")} default: ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr)) return goast.NewIdent("int64") } case CharacterStringType: return goast.NewIdent("string") case RealType: return goast.NewIdent("float64") case OctetStringType: return &goast.ArrayType{Elt: goast.NewIdent("byte")} case SequenceType: return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SetType: *isSet = true return ctx.structFromComponents(t.Components, t.ExtensionAdditions) case SequenceOfType: return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case SetOfType: *isSet = true return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)} case TaggedType: // TODO should put tags in go code? return ctx.generateTypeBody(t.Type, isSet) case ConstraintedType: // TODO should generate checking code? return ctx.generateTypeBody(t.Type, isSet) case TypeReference: // TODO should useful types be separate type by itself? nameAndType := ctx.resolveTypeReference(t) if nameAndType != nil { specialCase := ctx.generateSpecialCase(*nameAndType) if specialCase != nil { return specialCase } } return goast.NewIdent(goifyName(t.Name())) case RestrictedStringType: // TODO should generate checking code? return goast.NewIdent("string") case BitStringType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") case EnumeratedType: // TODO: generate consts ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.Enumerated") case AnyType: return &goast.InterfaceType{Methods: &goast.FieldList{}} case ObjectIdentifierType: ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.ObjectIdentifier") case ChoiceType: return ctx.generateChoiceType(t, isSet) default: // NullType ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr)) return nil } } func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl { switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) { case IntegerType: if len(typeDescr.NamedNumberList) == 0 { return nil } var specs []goast.Spec for _, namedNumber := range typeDescr.NamedNumberList { var valueExpr goast.Expr switch v := namedNumber.Value.(type) { case Number: valueExpr = numberToExpr(v, ctx.params.IntegerRepr) case DefinedValue: if v.ModuleName != "" { ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName)) } valueExpr = valueRefToIdent(v.ValueName) } typeName := goifyName(string(reference)) specs = append(specs, &goast.ValueSpec{ Type: goast.NewIdent(goifyName(string(reference))), Names: []*goast.Ident{goast.NewIdent(typeName + "Val" + goifyName(string(namedNumber.Name)))}, Values: []goast.Expr{valueExpr}, }) } return &goast.GenDecl{ Tok: gotoken.VAR, Specs: specs, } default: return nil } } func valueRefToIdent(ref ValueReference) *goast.Ident { return goast.NewIdent("Val" + goifyName(string(ref))) } func numberToExpr(val Number, repr IntegerRepr) goast.Expr { var valueExpr goast.Expr valueExpr = &goast.BasicLit{Value: fmt.Sprint(val.IntValue())} if repr == IntegerReprBigInt { valueExpr = &goast.CallExpr{Fun: goast.NewIdent("big.NewInt"), Args: []goast.Expr{valueExpr}} } return valueExpr } func (ctx *moduleContext) generateChoiceType(t ChoiceType, isSet *bool) goast.Expr { if ctx.hasTaggedAlternatives(t) { return goast.NewIdent("asn1.RawValue") } if len(t.AlternativeTypeList) == 1 { return ctx.generateTypeBody(t.AlternativeTypeList[0].Type, isSet) // optimization for X.509 edge case } return &goast.InterfaceType{Methods: &goast.FieldList{}} } func (ctx *moduleContext) hasTaggedAlternatives(t ChoiceType) bool { for _, f := range t.AlternativeTypeList { if ctx.taggedChoiceTypeAlternative(f.Identifier, f.Type) { return true } } return false } func (ctx *moduleContext)
(name Identifier, t Type) bool { switch t := t.(type) { case TaggedType: return true case TypeReference: if t.Name() == GeneralizedTimeName || t.Name() == UTCTimeName { return false } realType := ctx.resolveTypeReference(t) if realType == nil { return false } return ctx.taggedChoiceTypeAlternative(name, realType.Type) case ConstraintedType: return ctx.taggedChoiceTypeAlternative(name, t.Type) default: return false } } func (ctx *moduleContext) structFromComponents(components ComponentTypeList, extensions ExtensionAdditions) goast.Expr { fields := &goast.FieldList{} for _, field := range components { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } for _, field := range extensions { switch f := field.(type) { case NamedComponentType: fields.List = append(fields.List, ctx.generateStructField(f)) case ComponentsOfComponentType: // TODO: implement ctx.appendError(errors.New("COMPONENTS OF is not supported")) } } return &goast.StructType{ Fields: fields, } } func (ctx *moduleContext) generateStructField(f NamedComponentType) *goast.Field { var stubBool bool // we care about isSet / shouldAssign only for top-level decls return &goast.Field{ Names: append(make([]*goast.Ident, 0), goast.NewIdent(goifyName(f.NamedType.Identifier.Name()))), Type: ctx.generateTypeBody(f.NamedType.Type, &stubBool), Tag: ctx.asn1TagFromType(f), } } func (ctx *moduleContext) asn1TagFromType(nt NamedComponentType) *goast.BasicLit { t := nt.NamedType.Type components := make([]string, 0) if nt.IsOptional { components = append(components, "optional") } if nt.Default != nil { if defaultNumber, ok := (*nt.Default).(Number); ok { components = append(components, fmt.Sprintf("default:%v", defaultNumber.IntValue())) } if !nt.IsOptional { // ensure it's marked as optional components = append(components, "optional") } } // unwrap type unwrap: for { switch tt := t.(type) { case TaggedType: switch tt.Tag.Class { case CLASS_APPLICATION: components = append(components, "application") case CLASS_PRIVATE: components = append(components, "private") } tagType := ctx.tagDefault if tt.HasTagType { tagType = tt.TagType } switch tagType { case TAGS_EXPLICIT: components = append(components, "explicit") case TAGS_IMPLICIT: // nothing to do case TAGS_AUTOMATIC: ctx.appendError(fmt.Errorf("type %v: AUTOMATIC tags are not supported", nt.NamedType.Identifier)) } switch cn := ctx.lookupValue(tt.Tag.ClassNumber).(type) { case Number: components = append(components, fmt.Sprintf("tag:%v", cn.IntValue())) default: ctx.appendError(fmt.Errorf("tag value should be Number, got %#v", cn)) } t = tt.Type case ConstraintedType: t = tt.Type default: break unwrap } } // add type-specific tags switch tt := t.(type) { case RestrictedStringType: switch tt.LexType { case IA5String: components = append(components, "ia5") case UTF8String: components = append(components, "utf8") case NumericString: components = append(components, "numeric") case PrintableString: // default type } case SetType: components = append(components, "set") case SetOfType: components = append(components, "set") case TypeReference: switch ctx.unwrapToLeafType(tt).TypeReference.Name() { case GeneralizedTimeName: components = append(components, "generalized") case UTCTimeName: components = append(components, "utc") } // TODO omitempty causes empty slices to be skipped\ } if len(components) > 0 { return &goast.BasicLit{ Value: fmt.Sprintf("`asn1:\"%s\"`", strings.Join(components, ",")), Kind: gotoken.STRING, } } else { return nil } } func (ctx *moduleContext) generateSpecialCase(resolved TypeAssignment) goast.Expr { if resolved.TypeReference.Name() == GeneralizedTimeName || resolved.TypeReference.Name() == UTCTimeName { // time types in encoding/asn1go don't support wrapping of time.Time ctx.requireModule("time") return goast.NewIdent("time.Time") } else if _, ok := ctx.removeWrapperTypes(resolved.Type).(BitStringType); ok { ctx.requireModule("encoding/asn1") return goast.NewIdent("asn1.BitString") } return nil } // TODO really lookup values from module and imports func (ctx *moduleContext) lookupValue(val Value) Value { return val } // resolveTypeReference resolves references until reaches unresolved type, useful type, or declared type // returns type reference of most nested type which is not type reference itself // returns nil if type is not resolved func (ctx *moduleContext) resolveTypeReference(reference TypeReference) *TypeAssignment { unwrapped := ctx.unwrapToLeafType(reference) if unwrapped.Type != nil { return &unwrapped } else if tt := ctx.lookupUsefulType(unwrapped.TypeReference); tt != nil { return &TypeAssignment{unwrapped.TypeReference, tt} } else { ctx.appendError(fmt.Errorf("can not resolve TypeReference %v", reference.Name())) return nil } } func (ctx *moduleContext) lookupUsefulType(reference TypeReference) Type { if usefulType, ok := USEFUL_TYPES[reference.Name()]; ok { return usefulType } else { return nil } } func (ctx *moduleContext) removeWrapperTypes(t Type) Type { for { switch tt := t.(type) { case TaggedType: t = tt.Type case ConstraintedType: t = tt.Type default: return t } } } // unwrapToLeafType walks over transitive type references, tags and constraints and yields "root" type reference func (ctx *moduleContext) unwrapToLeafType(reference TypeReference) TypeAssignment { if assignment := ctx.lookupContext.AssignmentList.GetType(reference.Name()); assignment != nil { t := assignment.Type if tt, ok := ctx.removeWrapperTypes(t).(TypeReference); ok { return ctx.unwrapToLeafType(tt) } else { return *assignment } } return TypeAssignment{reference, nil} }
taggedChoiceTypeAlternative
identifier_name
family_structs.go
package family import ( "database/sql" "fmt" "github.com/chaseWilliams/family-map/lib/datagen/external" "github.com/chaseWilliams/family-map/lib/models" "github.com/chaseWilliams/family-map/lib/util" "math/rand" ) /* Person is a family construct that represents a singular person in a family tree */ type Person struct { model models.Person events []models.Event father *Person mother *Person spouses []*Person children []*Person straight bool marriageYears []int divorceYears []int married bool birthYear int // this is in simulation time, not in AD deathYear int } // TODO // create events // instead of divorce on death, widow instead func (f *Person) createEvent(name string, year int) models.Event { var city external.City if len(f.events) == 0 { city = external.RandomCity() } else { recentEvent := f.events[len(f.events)-1] city = external.RandomCloseCity(recentEvent.Latitude, recentEvent.Longitude) } event := models.Event{ EventID: util.RandomID(), PersonID: f.model.PersonID, Latitude: city.Latitude, Longitude: city.Longitude, Country: city.Country, City: city.City, EventType: name, Year: year, } f.events = append(f.events, event) return event } func (f *Person) createMirrorEvent(event models.Event) { event.EventID = util.RandomID() event.PersonID = f.model.PersonID f.events = append(f.events, event) } /* NumEvents returns the person's number of events */ func (f Person) NumEvents() int { return len(f.events) } /* Save will persist the person and their events in the database */ func (f *Person) Save(username string) (err error) { f.model.Username = username err = f.model.Save() if err != nil { return } for _, event := range f.events { event.Username = username err = event.Save() if err != nil { return } } return } /* Dies will appropriately set the Person as dead at given year */ func (f *Person) Dies(year int) { f.deathYear = year f.createEvent("DEATH", year) } /* Born will set the person's birth year and create the birth event */ func (f *Person) Born(year int) { f.birthYear = year f.createEvent("BIRTH", year) } /* Marry will set appropriate fields for Person now being married. */ func (f *Person) Marry(spouse *Person, year int) { if f.married { panic(fmt.Sprintf("Person is already married\n%v", *f)) } f.spouses = append(f.spouses, spouse) f.marriageYears = append(f.marriageYears, year) f.married = true f.model.SpouseID = sql.NullString{spouse.model.PersonID, true} event := f.createEvent("MARRIAGE", year) spouse.spouses = append(spouse.spouses, f) spouse.marriageYears = append(spouse.marriageYears, year) spouse.married = true spouse.model.SpouseID = sql.NullString{f.model.PersonID, true} spouse.createMirrorEvent(event) } /* Divorce will set appropriate fields for Person getting divorced. */ func (f *Person) Divorce(year int) { if !f.married { panic(fmt.Sprintf("Person is not married\n%v", *f)) } spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.divorceYears = append(f.divorceYears, year) f.married = false f.model.SpouseID = sql.NullString{"", false} event := f.createEvent("DIVORCE", year) spouse.divorceYears = append(spouse.divorceYears, year) spouse.married = false spouse.model.SpouseID = sql.NullString{"", false} spouse.createMirrorEvent(event) } /* HaveChild will edit the person's children and add the newborn event */ func (f *Person) HaveChild(child *Person, year int) { spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.children = append(f.children, child) spouse.children = append(spouse.children, child) event := f.createEvent("NEWBORN", year) spouse.createMirrorEvent(event) } /* HaveParents will set the parents of the person */ func (f *Person) HaveParents(father *Person, mother *Person) { f.father = father f.mother = mother f.model.FatherID = sql.NullString{father.model.PersonID, true} f.model.MotherID = sql.NullString{mother.model.PersonID, true} } /* IsDead returns if the person is dead or not */ func (f Person) IsDead() bool { return f.deathYear != -1 } /* IsMarried returns if the person is married or not */ func (f Person) IsMarried() bool { return f.married } /* IsStraight returns if the person is straight or not */ func (f Person) IsStraight() bool { return f.straight } /* Gender will return the person's gender */ func (f Person) Gender() string { return f.model.Gender } /* Age returns the person's year, given a year */ func (f Person) Age(year int) int { return year - f.birthYear } /* DeathYear returns the person's DeathYear, which is -1 if the person isn't dead yet */ func (f Person) DeathYear() int { return f.deathYear } /* Spouses returns a slice of the person's spouses in chronological order */ func (f Person) Spouses() []*Person { return f.spouses } /* CurrSpouse will return the person's current spouse, and return an error if person isn't married */ func (f Person) CurrSpouse() (spouse *Person, err error) { if !f.married { return nil, fmt.Errorf("person is not married:\n%v", f) } return f.spouses[len(f.spouses)-1], nil } /* Children will return a map of all spouses -> slice of children */ func (f Person) Children() (m map[*Person][]*Person) { m = make(map[*Person][]*Person) for _, spouse := range f.spouses { for _, child := range f.children { if child.father == spouse || child.mother == spouse { if _, ok := m[spouse]; !ok { m[spouse] = make([]*Person, 0) }
} } return } /* MarriageYears will return the person's marriage years */ func (f Person) MarriageYears() []int { return f.marriageYears } /* DivorceYears returns the person's divorce years */ func (f Person) DivorceYears() []int { return f.divorceYears } /* Generation is what the name implies, and represented by a slice of Person pointers, in order to keep everything mutable. */ type Generation []*Person /* AllDead will returns whether or not everyone in the generation is dead */ func (g Generation) AllDead() bool { if g == nil || len(g) == 0 { return true } for _, p := range g { if !p.IsDead() { return false } } return true } /* Population is what the name implies, and represented by a slice of Generations */ type Population []Generation /* GetAlive returns a slice of all alive people, irrespective of their generation */ func (pop *Population) GetAlive() []*Person { people := make([]*Person, 0) for _, gen := range *pop { for _, p := range gen { if !p.IsDead() { people = append(people, p) } } } return people } /* AddPerson will add the person to the proper generation in the population */ func (pop *Population) AddPerson(f *Person) { generation := 0 for i, gen := range *pop { for _, p := range gen { if (p == f.mother || p == f.father) && i >= generation { generation = i + 1 } } } if generation >= len(*pop) { // should only need to add one more generation *pop = append(*pop, make(Generation, 0)) } (*pop)[generation] = append((*pop)[generation], f) } /* AreFamily returns whether or not the two people are family members */ func AreFamily(a *Person, b *Person) bool { if a == b { return false } // are parents or siblings x removed if recursiveAreParentsOrSiblings(a, b) { return true } // are spouses for _, spouse := range a.spouses { if spouse == b { return true } if recursiveAreParentsOrSiblings(spouse, b) { return true } } for _, spouse := range b.spouses { if spouse == a { return true } if recursiveAreParentsOrSiblings(spouse, a) { return true } } // are cousins for _, parentA := range []*Person{a.mother, a.father} { if parentA == nil { continue } for _, parentB := range []*Person{b.mother, b.father} { if parentB == nil { continue } if areSiblingsOrParents(parentA, parentB) { return true } } } return false } func recursiveAreParentsOrSiblings(a *Person, b *Person) bool { // recursion up the tree if goUpTree(a, b) || goUpTree(b, a) { return true } // recusion down the tree if goDownTree(a, b) || goDownTree(b, a) { return true } return false } func areSiblingsOrParents(a *Person, b *Person) bool { // is either one parents of the other if a.mother == b || a.father == b || b.mother == a || b.father == a { return true } // are siblings if (a.mother == b.mother && a.mother != nil) || (a.father == b.father && a.father != nil) { return true } return false } func goUpTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } if goUpTree(a.father, b) || goUpTree(a.mother, b) { return true } return false } func goDownTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } for _, child := range a.children { if goDownTree(child, b) { return true } } return false } /* RandomFamily returns a random family, determined by a random person at generation numGen and all family members of that person. The person will be the first person in the returned slice of people. */ func (pop Population) RandomFamily(personModel models.Person, numGen int) []*Person { genIndex := numGen - 1 if len(pop) < genIndex { panic(fmt.Sprintf( "Population has %d generations, cannot create family at %d generation", len(pop), numGen, )) } generation := pop[genIndex] person := generation[rand.Intn(len(generation))] // overrides the person's attributes in favor of what's provided person.model.PersonID = personModel.PersonID person.model.Username = personModel.Username person.model.FirstName = personModel.FirstName person.model.LastName = personModel.LastName person.model.Gender = personModel.Gender for i := range person.events { person.events[i].PersonID = personModel.PersonID } familyMembers := []*Person{person} for _, gen := range pop { for _, stranger := range gen { if AreFamily(person, stranger) { familyMembers = append(familyMembers, stranger) } } } return familyMembers } /* CreatePopulation will create a Population with one generation that has the specified people in it. */ func CreatePopulation(numPeople int) Population { pop := make(Population, 1) pop[0] = make(Generation, 0, numPeople) // create the born people, born at year 0 for i := 0; i < numPeople; i++ { pop[0] = append(pop[0], RandomPerson(0)) } return pop }
m[spouse] = append(m[spouse], child) }
random_line_split
family_structs.go
package family import ( "database/sql" "fmt" "github.com/chaseWilliams/family-map/lib/datagen/external" "github.com/chaseWilliams/family-map/lib/models" "github.com/chaseWilliams/family-map/lib/util" "math/rand" ) /* Person is a family construct that represents a singular person in a family tree */ type Person struct { model models.Person events []models.Event father *Person mother *Person spouses []*Person children []*Person straight bool marriageYears []int divorceYears []int married bool birthYear int // this is in simulation time, not in AD deathYear int } // TODO // create events // instead of divorce on death, widow instead func (f *Person) createEvent(name string, year int) models.Event { var city external.City if len(f.events) == 0 { city = external.RandomCity() } else { recentEvent := f.events[len(f.events)-1] city = external.RandomCloseCity(recentEvent.Latitude, recentEvent.Longitude) } event := models.Event{ EventID: util.RandomID(), PersonID: f.model.PersonID, Latitude: city.Latitude, Longitude: city.Longitude, Country: city.Country, City: city.City, EventType: name, Year: year, } f.events = append(f.events, event) return event } func (f *Person) createMirrorEvent(event models.Event) { event.EventID = util.RandomID() event.PersonID = f.model.PersonID f.events = append(f.events, event) } /* NumEvents returns the person's number of events */ func (f Person) NumEvents() int { return len(f.events) } /* Save will persist the person and their events in the database */ func (f *Person) Save(username string) (err error) { f.model.Username = username err = f.model.Save() if err != nil { return } for _, event := range f.events { event.Username = username err = event.Save() if err != nil { return } } return } /* Dies will appropriately set the Person as dead at given year */ func (f *Person) Dies(year int) { f.deathYear = year f.createEvent("DEATH", year) } /* Born will set the person's birth year and create the birth event */ func (f *Person) Born(year int) { f.birthYear = year f.createEvent("BIRTH", year) } /* Marry will set appropriate fields for Person now being married. */ func (f *Person) Marry(spouse *Person, year int) { if f.married { panic(fmt.Sprintf("Person is already married\n%v", *f)) } f.spouses = append(f.spouses, spouse) f.marriageYears = append(f.marriageYears, year) f.married = true f.model.SpouseID = sql.NullString{spouse.model.PersonID, true} event := f.createEvent("MARRIAGE", year) spouse.spouses = append(spouse.spouses, f) spouse.marriageYears = append(spouse.marriageYears, year) spouse.married = true spouse.model.SpouseID = sql.NullString{f.model.PersonID, true} spouse.createMirrorEvent(event) } /* Divorce will set appropriate fields for Person getting divorced. */ func (f *Person) Divorce(year int) { if !f.married { panic(fmt.Sprintf("Person is not married\n%v", *f)) } spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.divorceYears = append(f.divorceYears, year) f.married = false f.model.SpouseID = sql.NullString{"", false} event := f.createEvent("DIVORCE", year) spouse.divorceYears = append(spouse.divorceYears, year) spouse.married = false spouse.model.SpouseID = sql.NullString{"", false} spouse.createMirrorEvent(event) } /* HaveChild will edit the person's children and add the newborn event */ func (f *Person) HaveChild(child *Person, year int) { spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.children = append(f.children, child) spouse.children = append(spouse.children, child) event := f.createEvent("NEWBORN", year) spouse.createMirrorEvent(event) } /* HaveParents will set the parents of the person */ func (f *Person) HaveParents(father *Person, mother *Person) { f.father = father f.mother = mother f.model.FatherID = sql.NullString{father.model.PersonID, true} f.model.MotherID = sql.NullString{mother.model.PersonID, true} } /* IsDead returns if the person is dead or not */ func (f Person) IsDead() bool { return f.deathYear != -1 } /* IsMarried returns if the person is married or not */ func (f Person) IsMarried() bool { return f.married } /* IsStraight returns if the person is straight or not */ func (f Person) IsStraight() bool { return f.straight } /* Gender will return the person's gender */ func (f Person) Gender() string { return f.model.Gender } /* Age returns the person's year, given a year */ func (f Person) Age(year int) int { return year - f.birthYear } /* DeathYear returns the person's DeathYear, which is -1 if the person isn't dead yet */ func (f Person) DeathYear() int { return f.deathYear } /* Spouses returns a slice of the person's spouses in chronological order */ func (f Person) Spouses() []*Person { return f.spouses } /* CurrSpouse will return the person's current spouse, and return an error if person isn't married */ func (f Person) CurrSpouse() (spouse *Person, err error) { if !f.married { return nil, fmt.Errorf("person is not married:\n%v", f) } return f.spouses[len(f.spouses)-1], nil } /* Children will return a map of all spouses -> slice of children */ func (f Person) Children() (m map[*Person][]*Person) { m = make(map[*Person][]*Person) for _, spouse := range f.spouses { for _, child := range f.children { if child.father == spouse || child.mother == spouse { if _, ok := m[spouse]; !ok { m[spouse] = make([]*Person, 0) } m[spouse] = append(m[spouse], child) } } } return } /* MarriageYears will return the person's marriage years */ func (f Person) MarriageYears() []int { return f.marriageYears } /* DivorceYears returns the person's divorce years */ func (f Person) DivorceYears() []int { return f.divorceYears } /* Generation is what the name implies, and represented by a slice of Person pointers, in order to keep everything mutable. */ type Generation []*Person /* AllDead will returns whether or not everyone in the generation is dead */ func (g Generation) AllDead() bool { if g == nil || len(g) == 0 { return true } for _, p := range g { if !p.IsDead() { return false } } return true } /* Population is what the name implies, and represented by a slice of Generations */ type Population []Generation /* GetAlive returns a slice of all alive people, irrespective of their generation */ func (pop *Population) GetAlive() []*Person { people := make([]*Person, 0) for _, gen := range *pop { for _, p := range gen { if !p.IsDead() { people = append(people, p) } } } return people } /* AddPerson will add the person to the proper generation in the population */ func (pop *Population) AddPerson(f *Person) { generation := 0 for i, gen := range *pop { for _, p := range gen { if (p == f.mother || p == f.father) && i >= generation { generation = i + 1 } } } if generation >= len(*pop) { // should only need to add one more generation *pop = append(*pop, make(Generation, 0)) } (*pop)[generation] = append((*pop)[generation], f) } /* AreFamily returns whether or not the two people are family members */ func AreFamily(a *Person, b *Person) bool { if a == b { return false } // are parents or siblings x removed if recursiveAreParentsOrSiblings(a, b) { return true } // are spouses for _, spouse := range a.spouses { if spouse == b { return true } if recursiveAreParentsOrSiblings(spouse, b) { return true } } for _, spouse := range b.spouses { if spouse == a { return true } if recursiveAreParentsOrSiblings(spouse, a) { return true } } // are cousins for _, parentA := range []*Person{a.mother, a.father} { if parentA == nil { continue } for _, parentB := range []*Person{b.mother, b.father} { if parentB == nil { continue } if areSiblingsOrParents(parentA, parentB) { return true } } } return false } func recursiveAreParentsOrSiblings(a *Person, b *Person) bool { // recursion up the tree if goUpTree(a, b) || goUpTree(b, a) { return true } // recusion down the tree if goDownTree(a, b) || goDownTree(b, a) { return true } return false } func areSiblingsOrParents(a *Person, b *Person) bool { // is either one parents of the other if a.mother == b || a.father == b || b.mother == a || b.father == a { return true } // are siblings if (a.mother == b.mother && a.mother != nil) || (a.father == b.father && a.father != nil) { return true } return false } func goUpTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } if goUpTree(a.father, b) || goUpTree(a.mother, b) { return true } return false } func goDownTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b)
for _, child := range a.children { if goDownTree(child, b) { return true } } return false } /* RandomFamily returns a random family, determined by a random person at generation numGen and all family members of that person. The person will be the first person in the returned slice of people. */ func (pop Population) RandomFamily(personModel models.Person, numGen int) []*Person { genIndex := numGen - 1 if len(pop) < genIndex { panic(fmt.Sprintf( "Population has %d generations, cannot create family at %d generation", len(pop), numGen, )) } generation := pop[genIndex] person := generation[rand.Intn(len(generation))] // overrides the person's attributes in favor of what's provided person.model.PersonID = personModel.PersonID person.model.Username = personModel.Username person.model.FirstName = personModel.FirstName person.model.LastName = personModel.LastName person.model.Gender = personModel.Gender for i := range person.events { person.events[i].PersonID = personModel.PersonID } familyMembers := []*Person{person} for _, gen := range pop { for _, stranger := range gen { if AreFamily(person, stranger) { familyMembers = append(familyMembers, stranger) } } } return familyMembers } /* CreatePopulation will create a Population with one generation that has the specified people in it. */ func CreatePopulation(numPeople int) Population { pop := make(Population, 1) pop[0] = make(Generation, 0, numPeople) // create the born people, born at year 0 for i := 0; i < numPeople; i++ { pop[0] = append(pop[0], RandomPerson(0)) } return pop }
{ return true }
conditional_block
family_structs.go
package family import ( "database/sql" "fmt" "github.com/chaseWilliams/family-map/lib/datagen/external" "github.com/chaseWilliams/family-map/lib/models" "github.com/chaseWilliams/family-map/lib/util" "math/rand" ) /* Person is a family construct that represents a singular person in a family tree */ type Person struct { model models.Person events []models.Event father *Person mother *Person spouses []*Person children []*Person straight bool marriageYears []int divorceYears []int married bool birthYear int // this is in simulation time, not in AD deathYear int } // TODO // create events // instead of divorce on death, widow instead func (f *Person) createEvent(name string, year int) models.Event { var city external.City if len(f.events) == 0 { city = external.RandomCity() } else { recentEvent := f.events[len(f.events)-1] city = external.RandomCloseCity(recentEvent.Latitude, recentEvent.Longitude) } event := models.Event{ EventID: util.RandomID(), PersonID: f.model.PersonID, Latitude: city.Latitude, Longitude: city.Longitude, Country: city.Country, City: city.City, EventType: name, Year: year, } f.events = append(f.events, event) return event } func (f *Person) createMirrorEvent(event models.Event) { event.EventID = util.RandomID() event.PersonID = f.model.PersonID f.events = append(f.events, event) } /* NumEvents returns the person's number of events */ func (f Person) NumEvents() int { return len(f.events) } /* Save will persist the person and their events in the database */ func (f *Person) Save(username string) (err error) { f.model.Username = username err = f.model.Save() if err != nil { return } for _, event := range f.events { event.Username = username err = event.Save() if err != nil { return } } return } /* Dies will appropriately set the Person as dead at given year */ func (f *Person) Dies(year int) { f.deathYear = year f.createEvent("DEATH", year) } /* Born will set the person's birth year and create the birth event */ func (f *Person) Born(year int) { f.birthYear = year f.createEvent("BIRTH", year) } /* Marry will set appropriate fields for Person now being married. */ func (f *Person) Marry(spouse *Person, year int) { if f.married { panic(fmt.Sprintf("Person is already married\n%v", *f)) } f.spouses = append(f.spouses, spouse) f.marriageYears = append(f.marriageYears, year) f.married = true f.model.SpouseID = sql.NullString{spouse.model.PersonID, true} event := f.createEvent("MARRIAGE", year) spouse.spouses = append(spouse.spouses, f) spouse.marriageYears = append(spouse.marriageYears, year) spouse.married = true spouse.model.SpouseID = sql.NullString{f.model.PersonID, true} spouse.createMirrorEvent(event) } /* Divorce will set appropriate fields for Person getting divorced. */ func (f *Person) Divorce(year int) { if !f.married { panic(fmt.Sprintf("Person is not married\n%v", *f)) } spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.divorceYears = append(f.divorceYears, year) f.married = false f.model.SpouseID = sql.NullString{"", false} event := f.createEvent("DIVORCE", year) spouse.divorceYears = append(spouse.divorceYears, year) spouse.married = false spouse.model.SpouseID = sql.NullString{"", false} spouse.createMirrorEvent(event) } /* HaveChild will edit the person's children and add the newborn event */ func (f *Person) HaveChild(child *Person, year int) { spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.children = append(f.children, child) spouse.children = append(spouse.children, child) event := f.createEvent("NEWBORN", year) spouse.createMirrorEvent(event) } /* HaveParents will set the parents of the person */ func (f *Person) HaveParents(father *Person, mother *Person) { f.father = father f.mother = mother f.model.FatherID = sql.NullString{father.model.PersonID, true} f.model.MotherID = sql.NullString{mother.model.PersonID, true} } /* IsDead returns if the person is dead or not */ func (f Person)
() bool { return f.deathYear != -1 } /* IsMarried returns if the person is married or not */ func (f Person) IsMarried() bool { return f.married } /* IsStraight returns if the person is straight or not */ func (f Person) IsStraight() bool { return f.straight } /* Gender will return the person's gender */ func (f Person) Gender() string { return f.model.Gender } /* Age returns the person's year, given a year */ func (f Person) Age(year int) int { return year - f.birthYear } /* DeathYear returns the person's DeathYear, which is -1 if the person isn't dead yet */ func (f Person) DeathYear() int { return f.deathYear } /* Spouses returns a slice of the person's spouses in chronological order */ func (f Person) Spouses() []*Person { return f.spouses } /* CurrSpouse will return the person's current spouse, and return an error if person isn't married */ func (f Person) CurrSpouse() (spouse *Person, err error) { if !f.married { return nil, fmt.Errorf("person is not married:\n%v", f) } return f.spouses[len(f.spouses)-1], nil } /* Children will return a map of all spouses -> slice of children */ func (f Person) Children() (m map[*Person][]*Person) { m = make(map[*Person][]*Person) for _, spouse := range f.spouses { for _, child := range f.children { if child.father == spouse || child.mother == spouse { if _, ok := m[spouse]; !ok { m[spouse] = make([]*Person, 0) } m[spouse] = append(m[spouse], child) } } } return } /* MarriageYears will return the person's marriage years */ func (f Person) MarriageYears() []int { return f.marriageYears } /* DivorceYears returns the person's divorce years */ func (f Person) DivorceYears() []int { return f.divorceYears } /* Generation is what the name implies, and represented by a slice of Person pointers, in order to keep everything mutable. */ type Generation []*Person /* AllDead will returns whether or not everyone in the generation is dead */ func (g Generation) AllDead() bool { if g == nil || len(g) == 0 { return true } for _, p := range g { if !p.IsDead() { return false } } return true } /* Population is what the name implies, and represented by a slice of Generations */ type Population []Generation /* GetAlive returns a slice of all alive people, irrespective of their generation */ func (pop *Population) GetAlive() []*Person { people := make([]*Person, 0) for _, gen := range *pop { for _, p := range gen { if !p.IsDead() { people = append(people, p) } } } return people } /* AddPerson will add the person to the proper generation in the population */ func (pop *Population) AddPerson(f *Person) { generation := 0 for i, gen := range *pop { for _, p := range gen { if (p == f.mother || p == f.father) && i >= generation { generation = i + 1 } } } if generation >= len(*pop) { // should only need to add one more generation *pop = append(*pop, make(Generation, 0)) } (*pop)[generation] = append((*pop)[generation], f) } /* AreFamily returns whether or not the two people are family members */ func AreFamily(a *Person, b *Person) bool { if a == b { return false } // are parents or siblings x removed if recursiveAreParentsOrSiblings(a, b) { return true } // are spouses for _, spouse := range a.spouses { if spouse == b { return true } if recursiveAreParentsOrSiblings(spouse, b) { return true } } for _, spouse := range b.spouses { if spouse == a { return true } if recursiveAreParentsOrSiblings(spouse, a) { return true } } // are cousins for _, parentA := range []*Person{a.mother, a.father} { if parentA == nil { continue } for _, parentB := range []*Person{b.mother, b.father} { if parentB == nil { continue } if areSiblingsOrParents(parentA, parentB) { return true } } } return false } func recursiveAreParentsOrSiblings(a *Person, b *Person) bool { // recursion up the tree if goUpTree(a, b) || goUpTree(b, a) { return true } // recusion down the tree if goDownTree(a, b) || goDownTree(b, a) { return true } return false } func areSiblingsOrParents(a *Person, b *Person) bool { // is either one parents of the other if a.mother == b || a.father == b || b.mother == a || b.father == a { return true } // are siblings if (a.mother == b.mother && a.mother != nil) || (a.father == b.father && a.father != nil) { return true } return false } func goUpTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } if goUpTree(a.father, b) || goUpTree(a.mother, b) { return true } return false } func goDownTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } for _, child := range a.children { if goDownTree(child, b) { return true } } return false } /* RandomFamily returns a random family, determined by a random person at generation numGen and all family members of that person. The person will be the first person in the returned slice of people. */ func (pop Population) RandomFamily(personModel models.Person, numGen int) []*Person { genIndex := numGen - 1 if len(pop) < genIndex { panic(fmt.Sprintf( "Population has %d generations, cannot create family at %d generation", len(pop), numGen, )) } generation := pop[genIndex] person := generation[rand.Intn(len(generation))] // overrides the person's attributes in favor of what's provided person.model.PersonID = personModel.PersonID person.model.Username = personModel.Username person.model.FirstName = personModel.FirstName person.model.LastName = personModel.LastName person.model.Gender = personModel.Gender for i := range person.events { person.events[i].PersonID = personModel.PersonID } familyMembers := []*Person{person} for _, gen := range pop { for _, stranger := range gen { if AreFamily(person, stranger) { familyMembers = append(familyMembers, stranger) } } } return familyMembers } /* CreatePopulation will create a Population with one generation that has the specified people in it. */ func CreatePopulation(numPeople int) Population { pop := make(Population, 1) pop[0] = make(Generation, 0, numPeople) // create the born people, born at year 0 for i := 0; i < numPeople; i++ { pop[0] = append(pop[0], RandomPerson(0)) } return pop }
IsDead
identifier_name
family_structs.go
package family import ( "database/sql" "fmt" "github.com/chaseWilliams/family-map/lib/datagen/external" "github.com/chaseWilliams/family-map/lib/models" "github.com/chaseWilliams/family-map/lib/util" "math/rand" ) /* Person is a family construct that represents a singular person in a family tree */ type Person struct { model models.Person events []models.Event father *Person mother *Person spouses []*Person children []*Person straight bool marriageYears []int divorceYears []int married bool birthYear int // this is in simulation time, not in AD deathYear int } // TODO // create events // instead of divorce on death, widow instead func (f *Person) createEvent(name string, year int) models.Event { var city external.City if len(f.events) == 0 { city = external.RandomCity() } else { recentEvent := f.events[len(f.events)-1] city = external.RandomCloseCity(recentEvent.Latitude, recentEvent.Longitude) } event := models.Event{ EventID: util.RandomID(), PersonID: f.model.PersonID, Latitude: city.Latitude, Longitude: city.Longitude, Country: city.Country, City: city.City, EventType: name, Year: year, } f.events = append(f.events, event) return event } func (f *Person) createMirrorEvent(event models.Event) { event.EventID = util.RandomID() event.PersonID = f.model.PersonID f.events = append(f.events, event) } /* NumEvents returns the person's number of events */ func (f Person) NumEvents() int { return len(f.events) } /* Save will persist the person and their events in the database */ func (f *Person) Save(username string) (err error) { f.model.Username = username err = f.model.Save() if err != nil { return } for _, event := range f.events { event.Username = username err = event.Save() if err != nil { return } } return } /* Dies will appropriately set the Person as dead at given year */ func (f *Person) Dies(year int) { f.deathYear = year f.createEvent("DEATH", year) } /* Born will set the person's birth year and create the birth event */ func (f *Person) Born(year int) { f.birthYear = year f.createEvent("BIRTH", year) } /* Marry will set appropriate fields for Person now being married. */ func (f *Person) Marry(spouse *Person, year int) { if f.married { panic(fmt.Sprintf("Person is already married\n%v", *f)) } f.spouses = append(f.spouses, spouse) f.marriageYears = append(f.marriageYears, year) f.married = true f.model.SpouseID = sql.NullString{spouse.model.PersonID, true} event := f.createEvent("MARRIAGE", year) spouse.spouses = append(spouse.spouses, f) spouse.marriageYears = append(spouse.marriageYears, year) spouse.married = true spouse.model.SpouseID = sql.NullString{f.model.PersonID, true} spouse.createMirrorEvent(event) } /* Divorce will set appropriate fields for Person getting divorced. */ func (f *Person) Divorce(year int) { if !f.married { panic(fmt.Sprintf("Person is not married\n%v", *f)) } spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.divorceYears = append(f.divorceYears, year) f.married = false f.model.SpouseID = sql.NullString{"", false} event := f.createEvent("DIVORCE", year) spouse.divorceYears = append(spouse.divorceYears, year) spouse.married = false spouse.model.SpouseID = sql.NullString{"", false} spouse.createMirrorEvent(event) } /* HaveChild will edit the person's children and add the newborn event */ func (f *Person) HaveChild(child *Person, year int) { spouse, err := f.CurrSpouse() if err != nil { panic(err) } f.children = append(f.children, child) spouse.children = append(spouse.children, child) event := f.createEvent("NEWBORN", year) spouse.createMirrorEvent(event) } /* HaveParents will set the parents of the person */ func (f *Person) HaveParents(father *Person, mother *Person) { f.father = father f.mother = mother f.model.FatherID = sql.NullString{father.model.PersonID, true} f.model.MotherID = sql.NullString{mother.model.PersonID, true} } /* IsDead returns if the person is dead or not */ func (f Person) IsDead() bool { return f.deathYear != -1 } /* IsMarried returns if the person is married or not */ func (f Person) IsMarried() bool { return f.married } /* IsStraight returns if the person is straight or not */ func (f Person) IsStraight() bool { return f.straight } /* Gender will return the person's gender */ func (f Person) Gender() string { return f.model.Gender } /* Age returns the person's year, given a year */ func (f Person) Age(year int) int { return year - f.birthYear } /* DeathYear returns the person's DeathYear, which is -1 if the person isn't dead yet */ func (f Person) DeathYear() int
/* Spouses returns a slice of the person's spouses in chronological order */ func (f Person) Spouses() []*Person { return f.spouses } /* CurrSpouse will return the person's current spouse, and return an error if person isn't married */ func (f Person) CurrSpouse() (spouse *Person, err error) { if !f.married { return nil, fmt.Errorf("person is not married:\n%v", f) } return f.spouses[len(f.spouses)-1], nil } /* Children will return a map of all spouses -> slice of children */ func (f Person) Children() (m map[*Person][]*Person) { m = make(map[*Person][]*Person) for _, spouse := range f.spouses { for _, child := range f.children { if child.father == spouse || child.mother == spouse { if _, ok := m[spouse]; !ok { m[spouse] = make([]*Person, 0) } m[spouse] = append(m[spouse], child) } } } return } /* MarriageYears will return the person's marriage years */ func (f Person) MarriageYears() []int { return f.marriageYears } /* DivorceYears returns the person's divorce years */ func (f Person) DivorceYears() []int { return f.divorceYears } /* Generation is what the name implies, and represented by a slice of Person pointers, in order to keep everything mutable. */ type Generation []*Person /* AllDead will returns whether or not everyone in the generation is dead */ func (g Generation) AllDead() bool { if g == nil || len(g) == 0 { return true } for _, p := range g { if !p.IsDead() { return false } } return true } /* Population is what the name implies, and represented by a slice of Generations */ type Population []Generation /* GetAlive returns a slice of all alive people, irrespective of their generation */ func (pop *Population) GetAlive() []*Person { people := make([]*Person, 0) for _, gen := range *pop { for _, p := range gen { if !p.IsDead() { people = append(people, p) } } } return people } /* AddPerson will add the person to the proper generation in the population */ func (pop *Population) AddPerson(f *Person) { generation := 0 for i, gen := range *pop { for _, p := range gen { if (p == f.mother || p == f.father) && i >= generation { generation = i + 1 } } } if generation >= len(*pop) { // should only need to add one more generation *pop = append(*pop, make(Generation, 0)) } (*pop)[generation] = append((*pop)[generation], f) } /* AreFamily returns whether or not the two people are family members */ func AreFamily(a *Person, b *Person) bool { if a == b { return false } // are parents or siblings x removed if recursiveAreParentsOrSiblings(a, b) { return true } // are spouses for _, spouse := range a.spouses { if spouse == b { return true } if recursiveAreParentsOrSiblings(spouse, b) { return true } } for _, spouse := range b.spouses { if spouse == a { return true } if recursiveAreParentsOrSiblings(spouse, a) { return true } } // are cousins for _, parentA := range []*Person{a.mother, a.father} { if parentA == nil { continue } for _, parentB := range []*Person{b.mother, b.father} { if parentB == nil { continue } if areSiblingsOrParents(parentA, parentB) { return true } } } return false } func recursiveAreParentsOrSiblings(a *Person, b *Person) bool { // recursion up the tree if goUpTree(a, b) || goUpTree(b, a) { return true } // recusion down the tree if goDownTree(a, b) || goDownTree(b, a) { return true } return false } func areSiblingsOrParents(a *Person, b *Person) bool { // is either one parents of the other if a.mother == b || a.father == b || b.mother == a || b.father == a { return true } // are siblings if (a.mother == b.mother && a.mother != nil) || (a.father == b.father && a.father != nil) { return true } return false } func goUpTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } if goUpTree(a.father, b) || goUpTree(a.mother, b) { return true } return false } func goDownTree(a *Person, b *Person) bool { if a == nil || b == nil { return false } if areSiblingsOrParents(a, b) { return true } for _, child := range a.children { if goDownTree(child, b) { return true } } return false } /* RandomFamily returns a random family, determined by a random person at generation numGen and all family members of that person. The person will be the first person in the returned slice of people. */ func (pop Population) RandomFamily(personModel models.Person, numGen int) []*Person { genIndex := numGen - 1 if len(pop) < genIndex { panic(fmt.Sprintf( "Population has %d generations, cannot create family at %d generation", len(pop), numGen, )) } generation := pop[genIndex] person := generation[rand.Intn(len(generation))] // overrides the person's attributes in favor of what's provided person.model.PersonID = personModel.PersonID person.model.Username = personModel.Username person.model.FirstName = personModel.FirstName person.model.LastName = personModel.LastName person.model.Gender = personModel.Gender for i := range person.events { person.events[i].PersonID = personModel.PersonID } familyMembers := []*Person{person} for _, gen := range pop { for _, stranger := range gen { if AreFamily(person, stranger) { familyMembers = append(familyMembers, stranger) } } } return familyMembers } /* CreatePopulation will create a Population with one generation that has the specified people in it. */ func CreatePopulation(numPeople int) Population { pop := make(Population, 1) pop[0] = make(Generation, 0, numPeople) // create the born people, born at year 0 for i := 0; i < numPeople; i++ { pop[0] = append(pop[0], RandomPerson(0)) } return pop }
{ return f.deathYear }
identifier_body
medicalbillregistersummary.component.ts
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core'; import { FormControl, NgForm } from '@angular/forms'; import { CommonService } from '../../shared/common.service'; import { BillingPharmacy } from '../../Models/ViewModels/BillingPharmacy_master.model'; import Swal from 'sweetalert2'; import * as XLSX from 'xlsx'; import html2canvas from 'html2canvas'; import { MatTableDataSource, MAT_DATE_FORMATS, DateAdapter, MAT_DATE_LOCALE } from '@angular/material'; import { MatSort } from '@angular/material/sort'; import { element } from '@angular/core/src/render3'; import { MomentDateAdapter } from '@angular/material-moment-adapter'; export const MY_FORMATS = { parse: { dateInput: 'DD/MM/YYYY', }, display: { dateInput: 'DD-MMM-YYYY', monthYearLabel: 'MMM YYYY', dateA11yLabel: 'DD-MM-YYYY', monthYearA11yLabel: 'MMMM YYYY', }, } @Component({ selector: 'app-medicalbillregistersummary', templateUrl: './medicalbillregistersummary.component.html', styleUrls: ['./medicalbillregistersummary.component.less'], providers: [ { provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] }, { provide: MAT_DATE_FORMATS, useValue: MY_FORMATS }, ] }) export class MedicalbillregistersummaryComponent implements OnInit { MFromDate; @ViewChild('MedForm') Form: NgForm maxDate(): string { return new Date().toISOString().split('T')[0] } //maxDate2(): string { // return new Date().toISOString().split('T')[0] //} constructor(public commonService: CommonService<BillingPharmacy>) { } MedicalBillRegisterTable: boolean = false; MedicalBillSummaryTable: boolean = false; MBS_label: boolean = false; date = new FormControl(new Date()); ngOnInit() { } applyFilter(filterValue: string) { this.dataSource.filter = filterValue.trim().toLowerCase(); } applyFilter1(filterValue: string) { this.dataSource1.filter = filterValue.trim().toLowerCase(); } displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost']; dataSource = new MatTableDataSource(); displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1']; dataSource1 = new MatTableDataSource(); @ViewChild(MatSort) sort: MatSort; minToDate = new Date(); CheckToDate() { debugger; this.minToDate = this.MFromDate; } MToDate; M_FromDat; M_ToDat; changeValueTotal(id, element, property: string) { var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount; resTotal = parseFloat(resTotal.toFixed(2)); element.TotalCost = resTotal; } getTotalProdVal() { var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0); totProdVal = parseFloat(totProdVal.toFixed(2)); return totProdVal; } getDiscountAmount() { var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0); totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2)); return totDiscntAmt; } getGSTAmount() { var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0); totGSTAmt = parseFloat(totGSTAmt.toFixed(2)); return totGSTAmt; } getTotalCostamount() { var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0); totCstAmt = parseFloat(totCstAmt.toFixed(2)); return totCstAmt; } getTotalCostamount1() { var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0); totCstAmt1 = parseFloat(totCstAmt1.toFixed(2)); return totCstAmt1; } getTotalProdVal1() { var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0); totProdVal1 = parseFloat(totProdVal1.toFixed(2)); return totProdVal1; } getDiscountAmount1() { var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0); totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2)); return totDiscntAmt1; } getGSTAmount1() { var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0); totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2)); return totGSTAmt1; } @ViewChild('RegTable') RegTable: ElementRef; @ViewChild('SummaryTable') SummaryTable: ElementRef; @ViewChild('table') table: ElementRef; @ViewChild('table1') table1: ElementRef; fireEvent() { debugger; const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx'); } captureScreen() { var data = document.getElementById('RegTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; //var width = data.internal.pageSize.getWidth(); //var height = data.internal.pageSize.getHeight(); var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Register.pdf'); // Generated PDF }); //const tabletojson = require('tabletojson'); //var table = tabletojson($('#table-id').get(0)); //var doc = new jspdf('l', 'pt', 'letter', true); //$.each(table, function (i, row) { // $.each(row, function (j, cell) { // if (j == "email" | j == 1) { // doc.cell(1, 10, 190, 20, cell, i); // } // else { // doc.cell(1, 10, 90, 20, cell, i); // } // }); //}); //doc.save('Safaa.pdf'); //var doc = new jspdf(); //var specialElementHandlers = { // '#hidediv': function (element, render) { return true; } //}; //doc.fromHTML($('#RegTable').get(0), 20, 20, { // 'width': 500, // 'elementHandlers': specialElementHandlers //}); //doc.save('Test.pdf'); } captureScreen1() { var data = document.getElementById('SummaryTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF }); } fireEvent1() { const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx'); } backdrop; cancelblock; Cancel() { debugger; if (this.MFromDate != null || this.MToDate != null) { this.backdrop = 'block'; this.cancelblock = 'block'; } else { this.Form.onReset(); } } modalClose() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelNo() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelYes()
onSubmit(form: NgForm) { debugger; if (form.valid) { this.M_FromDat = this.MFromDate.toISOString(); this.M_ToDat = this.MToDate.toISOString(); this.commonService.getListOfData("MedicalBillRegister/getMedBillDet/" + this.M_FromDat + '/' + this.M_ToDat + '/' + parseInt(localStorage.getItem("CompanyID"))) .subscribe(data => { debugger; if (data.getRegisterDetail != null && data.getRegisterDetail.length != 0) { debugger; if (data.getRegisterDetail != null) { for (var i = 0; i < data.getRegisterDetail.length; i++) { debugger; var res = ((data.getRegisterDetail[i].Quantity * data.getRegisterDetail[i].Rate) + data.getRegisterDetail[i].GSTValue) - data.getRegisterDetail[i].DiscountAmount; data.getRegisterDetail[i].TotalCost = res; } console.log(data.getRegisterDetail); } if (data.getSummaryDet != null) { for (var i = 0; i < data.getSummaryDet.length; i++) { debugger; var rslt = ((data.getSummaryDet[i].Quan * data.getSummaryDet[i].Irate) + data.getSummaryDet[i].Gamt) - data.getSummaryDet[i].Damt; data.getSummaryDet[i].TotalCost1 = rslt; } console.log(data.getRegisterDetail1); } this.commonService.data = data; this.dataSource.data = data.getRegisterDetail; this.dataSource1.data = data.getSummaryDet; debugger; this.dataSource.sort = this.sort; this.dataSource1.sort = this.sort; this.MedicalBillRegisterTable = true; this.MedicalBillSummaryTable = true; this.MBS_label = true; } else { debugger; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; Swal.fire({ type: 'warning', title: 'No Data Found', }) } }); } } }
{ debugger; this.backdrop = 'none'; this.cancelblock = 'none'; //this.MFromDate = ''; //this.MToDate = ''; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; }
identifier_body
medicalbillregistersummary.component.ts
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core'; import { FormControl, NgForm } from '@angular/forms'; import { CommonService } from '../../shared/common.service'; import { BillingPharmacy } from '../../Models/ViewModels/BillingPharmacy_master.model'; import Swal from 'sweetalert2'; import * as XLSX from 'xlsx'; import html2canvas from 'html2canvas'; import { MatTableDataSource, MAT_DATE_FORMATS, DateAdapter, MAT_DATE_LOCALE } from '@angular/material'; import { MatSort } from '@angular/material/sort'; import { element } from '@angular/core/src/render3'; import { MomentDateAdapter } from '@angular/material-moment-adapter'; export const MY_FORMATS = { parse: { dateInput: 'DD/MM/YYYY', }, display: { dateInput: 'DD-MMM-YYYY', monthYearLabel: 'MMM YYYY', dateA11yLabel: 'DD-MM-YYYY', monthYearA11yLabel: 'MMMM YYYY', }, } @Component({ selector: 'app-medicalbillregistersummary', templateUrl: './medicalbillregistersummary.component.html', styleUrls: ['./medicalbillregistersummary.component.less'], providers: [ { provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] }, { provide: MAT_DATE_FORMATS, useValue: MY_FORMATS }, ] }) export class MedicalbillregistersummaryComponent implements OnInit { MFromDate; @ViewChild('MedForm') Form: NgForm maxDate(): string { return new Date().toISOString().split('T')[0] } //maxDate2(): string { // return new Date().toISOString().split('T')[0] //} constructor(public commonService: CommonService<BillingPharmacy>) { } MedicalBillRegisterTable: boolean = false; MedicalBillSummaryTable: boolean = false; MBS_label: boolean = false; date = new FormControl(new Date()); ngOnInit() { } applyFilter(filterValue: string) { this.dataSource.filter = filterValue.trim().toLowerCase(); } applyFilter1(filterValue: string) { this.dataSource1.filter = filterValue.trim().toLowerCase(); } displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost']; dataSource = new MatTableDataSource(); displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1']; dataSource1 = new MatTableDataSource(); @ViewChild(MatSort) sort: MatSort; minToDate = new Date(); CheckToDate() { debugger; this.minToDate = this.MFromDate; } MToDate; M_FromDat; M_ToDat; changeValueTotal(id, element, property: string) { var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount; resTotal = parseFloat(resTotal.toFixed(2)); element.TotalCost = resTotal; } getTotalProdVal() { var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0); totProdVal = parseFloat(totProdVal.toFixed(2)); return totProdVal; } getDiscountAmount() { var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0); totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2)); return totDiscntAmt; } getGSTAmount() { var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0); totGSTAmt = parseFloat(totGSTAmt.toFixed(2)); return totGSTAmt; } getTotalCostamount() { var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0); totCstAmt = parseFloat(totCstAmt.toFixed(2)); return totCstAmt; } getTotalCostamount1() { var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0); totCstAmt1 = parseFloat(totCstAmt1.toFixed(2)); return totCstAmt1; } getTotalProdVal1() { var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0); totProdVal1 = parseFloat(totProdVal1.toFixed(2)); return totProdVal1; }
() { var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0); totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2)); return totDiscntAmt1; } getGSTAmount1() { var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0); totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2)); return totGSTAmt1; } @ViewChild('RegTable') RegTable: ElementRef; @ViewChild('SummaryTable') SummaryTable: ElementRef; @ViewChild('table') table: ElementRef; @ViewChild('table1') table1: ElementRef; fireEvent() { debugger; const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx'); } captureScreen() { var data = document.getElementById('RegTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; //var width = data.internal.pageSize.getWidth(); //var height = data.internal.pageSize.getHeight(); var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Register.pdf'); // Generated PDF }); //const tabletojson = require('tabletojson'); //var table = tabletojson($('#table-id').get(0)); //var doc = new jspdf('l', 'pt', 'letter', true); //$.each(table, function (i, row) { // $.each(row, function (j, cell) { // if (j == "email" | j == 1) { // doc.cell(1, 10, 190, 20, cell, i); // } // else { // doc.cell(1, 10, 90, 20, cell, i); // } // }); //}); //doc.save('Safaa.pdf'); //var doc = new jspdf(); //var specialElementHandlers = { // '#hidediv': function (element, render) { return true; } //}; //doc.fromHTML($('#RegTable').get(0), 20, 20, { // 'width': 500, // 'elementHandlers': specialElementHandlers //}); //doc.save('Test.pdf'); } captureScreen1() { var data = document.getElementById('SummaryTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF }); } fireEvent1() { const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx'); } backdrop; cancelblock; Cancel() { debugger; if (this.MFromDate != null || this.MToDate != null) { this.backdrop = 'block'; this.cancelblock = 'block'; } else { this.Form.onReset(); } } modalClose() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelNo() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelYes() { debugger; this.backdrop = 'none'; this.cancelblock = 'none'; //this.MFromDate = ''; //this.MToDate = ''; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; } onSubmit(form: NgForm) { debugger; if (form.valid) { this.M_FromDat = this.MFromDate.toISOString(); this.M_ToDat = this.MToDate.toISOString(); this.commonService.getListOfData("MedicalBillRegister/getMedBillDet/" + this.M_FromDat + '/' + this.M_ToDat + '/' + parseInt(localStorage.getItem("CompanyID"))) .subscribe(data => { debugger; if (data.getRegisterDetail != null && data.getRegisterDetail.length != 0) { debugger; if (data.getRegisterDetail != null) { for (var i = 0; i < data.getRegisterDetail.length; i++) { debugger; var res = ((data.getRegisterDetail[i].Quantity * data.getRegisterDetail[i].Rate) + data.getRegisterDetail[i].GSTValue) - data.getRegisterDetail[i].DiscountAmount; data.getRegisterDetail[i].TotalCost = res; } console.log(data.getRegisterDetail); } if (data.getSummaryDet != null) { for (var i = 0; i < data.getSummaryDet.length; i++) { debugger; var rslt = ((data.getSummaryDet[i].Quan * data.getSummaryDet[i].Irate) + data.getSummaryDet[i].Gamt) - data.getSummaryDet[i].Damt; data.getSummaryDet[i].TotalCost1 = rslt; } console.log(data.getRegisterDetail1); } this.commonService.data = data; this.dataSource.data = data.getRegisterDetail; this.dataSource1.data = data.getSummaryDet; debugger; this.dataSource.sort = this.sort; this.dataSource1.sort = this.sort; this.MedicalBillRegisterTable = true; this.MedicalBillSummaryTable = true; this.MBS_label = true; } else { debugger; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; Swal.fire({ type: 'warning', title: 'No Data Found', }) } }); } } }
getDiscountAmount1
identifier_name
medicalbillregistersummary.component.ts
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core'; import { FormControl, NgForm } from '@angular/forms'; import { CommonService } from '../../shared/common.service'; import { BillingPharmacy } from '../../Models/ViewModels/BillingPharmacy_master.model'; import Swal from 'sweetalert2'; import * as XLSX from 'xlsx'; import html2canvas from 'html2canvas'; import { MatTableDataSource, MAT_DATE_FORMATS, DateAdapter, MAT_DATE_LOCALE } from '@angular/material'; import { MatSort } from '@angular/material/sort'; import { element } from '@angular/core/src/render3'; import { MomentDateAdapter } from '@angular/material-moment-adapter'; export const MY_FORMATS = { parse: { dateInput: 'DD/MM/YYYY', }, display: { dateInput: 'DD-MMM-YYYY', monthYearLabel: 'MMM YYYY', dateA11yLabel: 'DD-MM-YYYY', monthYearA11yLabel: 'MMMM YYYY', }, } @Component({ selector: 'app-medicalbillregistersummary', templateUrl: './medicalbillregistersummary.component.html', styleUrls: ['./medicalbillregistersummary.component.less'], providers: [ { provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] }, { provide: MAT_DATE_FORMATS, useValue: MY_FORMATS }, ] }) export class MedicalbillregistersummaryComponent implements OnInit { MFromDate; @ViewChild('MedForm') Form: NgForm maxDate(): string { return new Date().toISOString().split('T')[0] } //maxDate2(): string { // return new Date().toISOString().split('T')[0] //} constructor(public commonService: CommonService<BillingPharmacy>) { } MedicalBillRegisterTable: boolean = false; MedicalBillSummaryTable: boolean = false; MBS_label: boolean = false; date = new FormControl(new Date()); ngOnInit() { } applyFilter(filterValue: string) { this.dataSource.filter = filterValue.trim().toLowerCase(); } applyFilter1(filterValue: string) { this.dataSource1.filter = filterValue.trim().toLowerCase(); } displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost']; dataSource = new MatTableDataSource(); displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1']; dataSource1 = new MatTableDataSource(); @ViewChild(MatSort) sort: MatSort; minToDate = new Date(); CheckToDate() { debugger; this.minToDate = this.MFromDate; } MToDate; M_FromDat;
changeValueTotal(id, element, property: string) { var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount; resTotal = parseFloat(resTotal.toFixed(2)); element.TotalCost = resTotal; } getTotalProdVal() { var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0); totProdVal = parseFloat(totProdVal.toFixed(2)); return totProdVal; } getDiscountAmount() { var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0); totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2)); return totDiscntAmt; } getGSTAmount() { var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0); totGSTAmt = parseFloat(totGSTAmt.toFixed(2)); return totGSTAmt; } getTotalCostamount() { var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0); totCstAmt = parseFloat(totCstAmt.toFixed(2)); return totCstAmt; } getTotalCostamount1() { var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0); totCstAmt1 = parseFloat(totCstAmt1.toFixed(2)); return totCstAmt1; } getTotalProdVal1() { var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0); totProdVal1 = parseFloat(totProdVal1.toFixed(2)); return totProdVal1; } getDiscountAmount1() { var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0); totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2)); return totDiscntAmt1; } getGSTAmount1() { var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0); totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2)); return totGSTAmt1; } @ViewChild('RegTable') RegTable: ElementRef; @ViewChild('SummaryTable') SummaryTable: ElementRef; @ViewChild('table') table: ElementRef; @ViewChild('table1') table1: ElementRef; fireEvent() { debugger; const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx'); } captureScreen() { var data = document.getElementById('RegTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; //var width = data.internal.pageSize.getWidth(); //var height = data.internal.pageSize.getHeight(); var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Register.pdf'); // Generated PDF }); //const tabletojson = require('tabletojson'); //var table = tabletojson($('#table-id').get(0)); //var doc = new jspdf('l', 'pt', 'letter', true); //$.each(table, function (i, row) { // $.each(row, function (j, cell) { // if (j == "email" | j == 1) { // doc.cell(1, 10, 190, 20, cell, i); // } // else { // doc.cell(1, 10, 90, 20, cell, i); // } // }); //}); //doc.save('Safaa.pdf'); //var doc = new jspdf(); //var specialElementHandlers = { // '#hidediv': function (element, render) { return true; } //}; //doc.fromHTML($('#RegTable').get(0), 20, 20, { // 'width': 500, // 'elementHandlers': specialElementHandlers //}); //doc.save('Test.pdf'); } captureScreen1() { var data = document.getElementById('SummaryTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF }); } fireEvent1() { const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx'); } backdrop; cancelblock; Cancel() { debugger; if (this.MFromDate != null || this.MToDate != null) { this.backdrop = 'block'; this.cancelblock = 'block'; } else { this.Form.onReset(); } } modalClose() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelNo() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelYes() { debugger; this.backdrop = 'none'; this.cancelblock = 'none'; //this.MFromDate = ''; //this.MToDate = ''; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; } onSubmit(form: NgForm) { debugger; if (form.valid) { this.M_FromDat = this.MFromDate.toISOString(); this.M_ToDat = this.MToDate.toISOString(); this.commonService.getListOfData("MedicalBillRegister/getMedBillDet/" + this.M_FromDat + '/' + this.M_ToDat + '/' + parseInt(localStorage.getItem("CompanyID"))) .subscribe(data => { debugger; if (data.getRegisterDetail != null && data.getRegisterDetail.length != 0) { debugger; if (data.getRegisterDetail != null) { for (var i = 0; i < data.getRegisterDetail.length; i++) { debugger; var res = ((data.getRegisterDetail[i].Quantity * data.getRegisterDetail[i].Rate) + data.getRegisterDetail[i].GSTValue) - data.getRegisterDetail[i].DiscountAmount; data.getRegisterDetail[i].TotalCost = res; } console.log(data.getRegisterDetail); } if (data.getSummaryDet != null) { for (var i = 0; i < data.getSummaryDet.length; i++) { debugger; var rslt = ((data.getSummaryDet[i].Quan * data.getSummaryDet[i].Irate) + data.getSummaryDet[i].Gamt) - data.getSummaryDet[i].Damt; data.getSummaryDet[i].TotalCost1 = rslt; } console.log(data.getRegisterDetail1); } this.commonService.data = data; this.dataSource.data = data.getRegisterDetail; this.dataSource1.data = data.getSummaryDet; debugger; this.dataSource.sort = this.sort; this.dataSource1.sort = this.sort; this.MedicalBillRegisterTable = true; this.MedicalBillSummaryTable = true; this.MBS_label = true; } else { debugger; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; Swal.fire({ type: 'warning', title: 'No Data Found', }) } }); } } }
M_ToDat;
random_line_split
medicalbillregistersummary.component.ts
import { Component, OnInit, ViewChild, ElementRef } from '@angular/core'; import { FormControl, NgForm } from '@angular/forms'; import { CommonService } from '../../shared/common.service'; import { BillingPharmacy } from '../../Models/ViewModels/BillingPharmacy_master.model'; import Swal from 'sweetalert2'; import * as XLSX from 'xlsx'; import html2canvas from 'html2canvas'; import { MatTableDataSource, MAT_DATE_FORMATS, DateAdapter, MAT_DATE_LOCALE } from '@angular/material'; import { MatSort } from '@angular/material/sort'; import { element } from '@angular/core/src/render3'; import { MomentDateAdapter } from '@angular/material-moment-adapter'; export const MY_FORMATS = { parse: { dateInput: 'DD/MM/YYYY', }, display: { dateInput: 'DD-MMM-YYYY', monthYearLabel: 'MMM YYYY', dateA11yLabel: 'DD-MM-YYYY', monthYearA11yLabel: 'MMMM YYYY', }, } @Component({ selector: 'app-medicalbillregistersummary', templateUrl: './medicalbillregistersummary.component.html', styleUrls: ['./medicalbillregistersummary.component.less'], providers: [ { provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] }, { provide: MAT_DATE_FORMATS, useValue: MY_FORMATS }, ] }) export class MedicalbillregistersummaryComponent implements OnInit { MFromDate; @ViewChild('MedForm') Form: NgForm maxDate(): string { return new Date().toISOString().split('T')[0] } //maxDate2(): string { // return new Date().toISOString().split('T')[0] //} constructor(public commonService: CommonService<BillingPharmacy>) { } MedicalBillRegisterTable: boolean = false; MedicalBillSummaryTable: boolean = false; MBS_label: boolean = false; date = new FormControl(new Date()); ngOnInit() { } applyFilter(filterValue: string) { this.dataSource.filter = filterValue.trim().toLowerCase(); } applyFilter1(filterValue: string) { this.dataSource1.filter = filterValue.trim().toLowerCase(); } displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost']; dataSource = new MatTableDataSource(); displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1']; dataSource1 = new MatTableDataSource(); @ViewChild(MatSort) sort: MatSort; minToDate = new Date(); CheckToDate() { debugger; this.minToDate = this.MFromDate; } MToDate; M_FromDat; M_ToDat; changeValueTotal(id, element, property: string) { var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount; resTotal = parseFloat(resTotal.toFixed(2)); element.TotalCost = resTotal; } getTotalProdVal() { var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0); totProdVal = parseFloat(totProdVal.toFixed(2)); return totProdVal; } getDiscountAmount() { var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0); totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2)); return totDiscntAmt; } getGSTAmount() { var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0); totGSTAmt = parseFloat(totGSTAmt.toFixed(2)); return totGSTAmt; } getTotalCostamount() { var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0); totCstAmt = parseFloat(totCstAmt.toFixed(2)); return totCstAmt; } getTotalCostamount1() { var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0); totCstAmt1 = parseFloat(totCstAmt1.toFixed(2)); return totCstAmt1; } getTotalProdVal1() { var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0); totProdVal1 = parseFloat(totProdVal1.toFixed(2)); return totProdVal1; } getDiscountAmount1() { var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0); totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2)); return totDiscntAmt1; } getGSTAmount1() { var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0); totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2)); return totGSTAmt1; } @ViewChild('RegTable') RegTable: ElementRef; @ViewChild('SummaryTable') SummaryTable: ElementRef; @ViewChild('table') table: ElementRef; @ViewChild('table1') table1: ElementRef; fireEvent() { debugger; const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx'); } captureScreen() { var data = document.getElementById('RegTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; //var width = data.internal.pageSize.getWidth(); //var height = data.internal.pageSize.getHeight(); var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Register.pdf'); // Generated PDF }); //const tabletojson = require('tabletojson'); //var table = tabletojson($('#table-id').get(0)); //var doc = new jspdf('l', 'pt', 'letter', true); //$.each(table, function (i, row) { // $.each(row, function (j, cell) { // if (j == "email" | j == 1) { // doc.cell(1, 10, 190, 20, cell, i); // } // else { // doc.cell(1, 10, 90, 20, cell, i); // } // }); //}); //doc.save('Safaa.pdf'); //var doc = new jspdf(); //var specialElementHandlers = { // '#hidediv': function (element, render) { return true; } //}; //doc.fromHTML($('#RegTable').get(0), 20, 20, { // 'width': 500, // 'elementHandlers': specialElementHandlers //}); //doc.save('Test.pdf'); } captureScreen1() { var data = document.getElementById('SummaryTable'); html2canvas(data).then(canvas => { var imgWidth = 239; var pageHeight = 55; var imgHeight = canvas.height * imgWidth / canvas.width; var heightLeft = imgHeight; const contentDataURL = canvas.toDataURL('image/PDF') //let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF var position = 5; //pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight) //pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF }); } fireEvent1() { const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement); const wb: XLSX.WorkBook = XLSX.utils.book_new(); XLSX.utils.book_append_sheet(wb, ws, 'Sheet1'); XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx'); } backdrop; cancelblock; Cancel() { debugger; if (this.MFromDate != null || this.MToDate != null) { this.backdrop = 'block'; this.cancelblock = 'block'; } else { this.Form.onReset(); } } modalClose() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelNo() { this.backdrop = 'none'; this.cancelblock = 'none'; } CancelYes() { debugger; this.backdrop = 'none'; this.cancelblock = 'none'; //this.MFromDate = ''; //this.MToDate = ''; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; } onSubmit(form: NgForm) { debugger; if (form.valid) { this.M_FromDat = this.MFromDate.toISOString(); this.M_ToDat = this.MToDate.toISOString(); this.commonService.getListOfData("MedicalBillRegister/getMedBillDet/" + this.M_FromDat + '/' + this.M_ToDat + '/' + parseInt(localStorage.getItem("CompanyID"))) .subscribe(data => { debugger; if (data.getRegisterDetail != null && data.getRegisterDetail.length != 0) { debugger; if (data.getRegisterDetail != null) { for (var i = 0; i < data.getRegisterDetail.length; i++)
console.log(data.getRegisterDetail); } if (data.getSummaryDet != null) { for (var i = 0; i < data.getSummaryDet.length; i++) { debugger; var rslt = ((data.getSummaryDet[i].Quan * data.getSummaryDet[i].Irate) + data.getSummaryDet[i].Gamt) - data.getSummaryDet[i].Damt; data.getSummaryDet[i].TotalCost1 = rslt; } console.log(data.getRegisterDetail1); } this.commonService.data = data; this.dataSource.data = data.getRegisterDetail; this.dataSource1.data = data.getSummaryDet; debugger; this.dataSource.sort = this.sort; this.dataSource1.sort = this.sort; this.MedicalBillRegisterTable = true; this.MedicalBillSummaryTable = true; this.MBS_label = true; } else { debugger; this.MedicalBillRegisterTable = false; this.MedicalBillSummaryTable = false; this.MBS_label = false; Swal.fire({ type: 'warning', title: 'No Data Found', }) } }); } } }
{ debugger; var res = ((data.getRegisterDetail[i].Quantity * data.getRegisterDetail[i].Rate) + data.getRegisterDetail[i].GSTValue) - data.getRegisterDetail[i].DiscountAmount; data.getRegisterDetail[i].TotalCost = res; }
conditional_block
cargo_workspace.rs
//! FIXME: write short doc here use std::{ ops, path::{Path, PathBuf}, }; use anyhow::{Context, Result}; use cargo_metadata::{BuildScript, CargoOpt, Message, MetadataCommand, PackageId}; use ra_arena::{Arena, Idx}; use ra_cargo_watch::run_cargo; use ra_db::Edition; use rustc_hash::FxHashMap; use serde::Deserialize; /// `CargoWorkspace` represents the logical structure of, well, a Cargo /// workspace. It pretty closely mirrors `cargo metadata` output. /// /// Note that internally, rust analyzer uses a different structure: /// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates, /// while this knows about `Packages` & `Targets`: purely cargo-related /// concepts. #[derive(Debug, Clone)] pub struct CargoWorkspace { packages: Arena<PackageData>, targets: Arena<TargetData>, workspace_root: PathBuf, } impl ops::Index<Package> for CargoWorkspace { type Output = PackageData; fn index(&self, index: Package) -> &PackageData { &self.packages[index] } } impl ops::Index<Target> for CargoWorkspace { type Output = TargetData; fn index(&self, index: Target) -> &TargetData { &self.targets[index] } } #[derive(Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase", default)] pub struct CargoFeatures { /// Do not activate the `default` feature. pub no_default_features: bool, /// Activate all available features pub all_features: bool, /// List of features to activate. /// This will be ignored if `cargo_all_features` is true. pub features: Vec<String>, /// Runs cargo check on launch to figure out the correct values of OUT_DIR pub load_out_dirs_from_check: bool, } impl Default for CargoFeatures { fn default() -> Self { CargoFeatures { no_default_features: false, all_features: true, features: Vec::new(), load_out_dirs_from_check: false, } } } pub type Package = Idx<PackageData>; pub type Target = Idx<TargetData>; #[derive(Debug, Clone)] pub struct PackageData { pub name: String, pub manifest: PathBuf, pub targets: Vec<Target>, pub is_member: bool, pub dependencies: Vec<PackageDependency>, pub edition: Edition, pub features: Vec<String>, pub out_dir: Option<PathBuf>, pub proc_macro_dylib_path: Option<PathBuf>, } #[derive(Debug, Clone)] pub struct PackageDependency { pub pkg: Package, pub name: String, } #[derive(Debug, Clone)] pub struct TargetData { pub package: Package, pub name: String, pub root: PathBuf, pub kind: TargetKind, pub is_proc_macro: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TargetKind { Bin, /// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro, ...). Lib, Example, Test, Bench, Other, } impl TargetKind { fn new(kinds: &[String]) -> TargetKind { for kind in kinds { return match kind.as_str() { "bin" => TargetKind::Bin, "test" => TargetKind::Test, "bench" => TargetKind::Bench, "example" => TargetKind::Example, "proc-macro" => TargetKind::Lib, _ if kind.contains("lib") => TargetKind::Lib, _ => continue, }; } TargetKind::Other } } impl PackageData { pub fn root(&self) -> &Path { self.manifest.parent().unwrap() } } impl CargoWorkspace { pub fn from_cargo_metadata( cargo_toml: &Path, cargo_features: &CargoFeatures, ) -> Result<CargoWorkspace> { let mut meta = MetadataCommand::new(); meta.manifest_path(cargo_toml); if cargo_features.all_features { meta.features(CargoOpt::AllFeatures); } else if cargo_features.no_default_features { // FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures` // https://github.com/oli-obk/cargo_metadata/issues/79 meta.features(CargoOpt::NoDefaultFeatures); } else if !cargo_features.features.is_empty() { meta.features(CargoOpt::SomeFeatures(cargo_features.features.clone())); } if let Some(parent) = cargo_toml.parent() { meta.current_dir(parent); } let meta = meta.exec().with_context(|| { format!("Failed to run `cargo metadata --manifest-path {}`", cargo_toml.display()) })?; let mut out_dir_by_id = FxHashMap::default(); let mut proc_macro_dylib_paths = FxHashMap::default(); if cargo_features.load_out_dirs_from_check { let resources = load_extern_resources(cargo_toml, cargo_features); out_dir_by_id = resources.out_dirs; proc_macro_dylib_paths = resources.proc_dylib_paths; } let mut pkg_by_id = FxHashMap::default(); let mut packages = Arena::default(); let mut targets = Arena::default(); let ws_members = &meta.workspace_members; for meta_pkg in meta.packages { let cargo_metadata::Package { id, edition, name, manifest_path, .. } = meta_pkg; let is_member = ws_members.contains(&id); let edition = edition .parse::<Edition>() .with_context(|| format!("Failed to parse edition {}", edition))?; let pkg = packages.alloc(PackageData { name, manifest: manifest_path, targets: Vec::new(), is_member, edition, dependencies: Vec::new(), features: Vec::new(), out_dir: out_dir_by_id.get(&id).cloned(), proc_macro_dylib_path: proc_macro_dylib_paths.get(&id).cloned(), }); let pkg_data = &mut packages[pkg]; pkg_by_id.insert(id, pkg); for meta_tgt in meta_pkg.targets { let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"]; let tgt = targets.alloc(TargetData { package: pkg, name: meta_tgt.name, root: meta_tgt.src_path.clone(), kind: TargetKind::new(meta_tgt.kind.as_slice()), is_proc_macro, }); pkg_data.targets.push(tgt); } } let resolve = meta.resolve.expect("metadata executed with deps"); for node in resolve.nodes { let source = match pkg_by_id.get(&node.id) { Some(&src) => src, // FIXME: replace this and a similar branch below with `.unwrap`, once // https://github.com/rust-lang/cargo/issues/7841 // is fixed and hits stable (around 1.43-is probably?). None => { log::error!("Node id do not match in cargo metadata, ignoring {}", node.id); continue; } }; for dep_node in node.deps { let pkg = match pkg_by_id.get(&dep_node.pkg) { Some(&pkg) => pkg, None => { log::error!( "Dep node id do not match in cargo metadata, ignoring {}", dep_node.pkg ); continue; } }; let dep = PackageDependency { name: dep_node.name, pkg }; packages[source].dependencies.push(dep); } packages[source].features.extend(node.features); } Ok(CargoWorkspace { packages, targets, workspace_root: meta.workspace_root }) } pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a { self.packages.iter().map(|(id, _pkg)| id) } pub fn target_by_root(&self, root: &Path) -> Option<Target> { self.packages() .filter_map(|pkg| self[pkg].targets.iter().find(|&&it| self[it].root == root)) .next()
} } #[derive(Debug, Clone, Default)] pub struct ExternResources { out_dirs: FxHashMap<PackageId, PathBuf>, proc_dylib_paths: FxHashMap<PackageId, PathBuf>, } pub fn load_extern_resources(cargo_toml: &Path, cargo_features: &CargoFeatures) -> ExternResources { let mut args: Vec<String> = vec![ "check".to_string(), "--message-format=json".to_string(), "--manifest-path".to_string(), cargo_toml.display().to_string(), ]; if cargo_features.all_features { args.push("--all-features".to_string()); } else if cargo_features.no_default_features { // FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures` // https://github.com/oli-obk/cargo_metadata/issues/79 args.push("--no-default-features".to_string()); } else { args.extend(cargo_features.features.iter().cloned()); } let mut acc = ExternResources::default(); let res = run_cargo(&args, cargo_toml.parent(), &mut |message| { match message { Message::BuildScriptExecuted(BuildScript { package_id, out_dir, .. }) => { acc.out_dirs.insert(package_id, out_dir); } Message::CompilerArtifact(message) => { if message.target.kind.contains(&"proc-macro".to_string()) { let package_id = message.package_id; if let Some(filename) = message.filenames.get(0) { acc.proc_dylib_paths.insert(package_id, filename.clone()); } } } Message::CompilerMessage(_) => (), Message::Unknown => (), } true }); if let Err(err) = res { log::error!("Failed to load outdirs: {:?}", err); } acc }
.copied() } pub fn workspace_root(&self) -> &Path { &self.workspace_root
random_line_split
cargo_workspace.rs
//! FIXME: write short doc here use std::{ ops, path::{Path, PathBuf}, }; use anyhow::{Context, Result}; use cargo_metadata::{BuildScript, CargoOpt, Message, MetadataCommand, PackageId}; use ra_arena::{Arena, Idx}; use ra_cargo_watch::run_cargo; use ra_db::Edition; use rustc_hash::FxHashMap; use serde::Deserialize; /// `CargoWorkspace` represents the logical structure of, well, a Cargo /// workspace. It pretty closely mirrors `cargo metadata` output. /// /// Note that internally, rust analyzer uses a different structure: /// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates, /// while this knows about `Packages` & `Targets`: purely cargo-related /// concepts. #[derive(Debug, Clone)] pub struct CargoWorkspace { packages: Arena<PackageData>, targets: Arena<TargetData>, workspace_root: PathBuf, } impl ops::Index<Package> for CargoWorkspace { type Output = PackageData; fn index(&self, index: Package) -> &PackageData { &self.packages[index] } } impl ops::Index<Target> for CargoWorkspace { type Output = TargetData; fn index(&self, index: Target) -> &TargetData { &self.targets[index] } } #[derive(Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase", default)] pub struct
{ /// Do not activate the `default` feature. pub no_default_features: bool, /// Activate all available features pub all_features: bool, /// List of features to activate. /// This will be ignored if `cargo_all_features` is true. pub features: Vec<String>, /// Runs cargo check on launch to figure out the correct values of OUT_DIR pub load_out_dirs_from_check: bool, } impl Default for CargoFeatures { fn default() -> Self { CargoFeatures { no_default_features: false, all_features: true, features: Vec::new(), load_out_dirs_from_check: false, } } } pub type Package = Idx<PackageData>; pub type Target = Idx<TargetData>; #[derive(Debug, Clone)] pub struct PackageData { pub name: String, pub manifest: PathBuf, pub targets: Vec<Target>, pub is_member: bool, pub dependencies: Vec<PackageDependency>, pub edition: Edition, pub features: Vec<String>, pub out_dir: Option<PathBuf>, pub proc_macro_dylib_path: Option<PathBuf>, } #[derive(Debug, Clone)] pub struct PackageDependency { pub pkg: Package, pub name: String, } #[derive(Debug, Clone)] pub struct TargetData { pub package: Package, pub name: String, pub root: PathBuf, pub kind: TargetKind, pub is_proc_macro: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TargetKind { Bin, /// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro, ...). Lib, Example, Test, Bench, Other, } impl TargetKind { fn new(kinds: &[String]) -> TargetKind { for kind in kinds { return match kind.as_str() { "bin" => TargetKind::Bin, "test" => TargetKind::Test, "bench" => TargetKind::Bench, "example" => TargetKind::Example, "proc-macro" => TargetKind::Lib, _ if kind.contains("lib") => TargetKind::Lib, _ => continue, }; } TargetKind::Other } } impl PackageData { pub fn root(&self) -> &Path { self.manifest.parent().unwrap() } } impl CargoWorkspace { pub fn from_cargo_metadata( cargo_toml: &Path, cargo_features: &CargoFeatures, ) -> Result<CargoWorkspace> { let mut meta = MetadataCommand::new(); meta.manifest_path(cargo_toml); if cargo_features.all_features { meta.features(CargoOpt::AllFeatures); } else if cargo_features.no_default_features { // FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures` // https://github.com/oli-obk/cargo_metadata/issues/79 meta.features(CargoOpt::NoDefaultFeatures); } else if !cargo_features.features.is_empty() { meta.features(CargoOpt::SomeFeatures(cargo_features.features.clone())); } if let Some(parent) = cargo_toml.parent() { meta.current_dir(parent); } let meta = meta.exec().with_context(|| { format!("Failed to run `cargo metadata --manifest-path {}`", cargo_toml.display()) })?; let mut out_dir_by_id = FxHashMap::default(); let mut proc_macro_dylib_paths = FxHashMap::default(); if cargo_features.load_out_dirs_from_check { let resources = load_extern_resources(cargo_toml, cargo_features); out_dir_by_id = resources.out_dirs; proc_macro_dylib_paths = resources.proc_dylib_paths; } let mut pkg_by_id = FxHashMap::default(); let mut packages = Arena::default(); let mut targets = Arena::default(); let ws_members = &meta.workspace_members; for meta_pkg in meta.packages { let cargo_metadata::Package { id, edition, name, manifest_path, .. } = meta_pkg; let is_member = ws_members.contains(&id); let edition = edition .parse::<Edition>() .with_context(|| format!("Failed to parse edition {}", edition))?; let pkg = packages.alloc(PackageData { name, manifest: manifest_path, targets: Vec::new(), is_member, edition, dependencies: Vec::new(), features: Vec::new(), out_dir: out_dir_by_id.get(&id).cloned(), proc_macro_dylib_path: proc_macro_dylib_paths.get(&id).cloned(), }); let pkg_data = &mut packages[pkg]; pkg_by_id.insert(id, pkg); for meta_tgt in meta_pkg.targets { let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"]; let tgt = targets.alloc(TargetData { package: pkg, name: meta_tgt.name, root: meta_tgt.src_path.clone(), kind: TargetKind::new(meta_tgt.kind.as_slice()), is_proc_macro, }); pkg_data.targets.push(tgt); } } let resolve = meta.resolve.expect("metadata executed with deps"); for node in resolve.nodes { let source = match pkg_by_id.get(&node.id) { Some(&src) => src, // FIXME: replace this and a similar branch below with `.unwrap`, once // https://github.com/rust-lang/cargo/issues/7841 // is fixed and hits stable (around 1.43-is probably?). None => { log::error!("Node id do not match in cargo metadata, ignoring {}", node.id); continue; } }; for dep_node in node.deps { let pkg = match pkg_by_id.get(&dep_node.pkg) { Some(&pkg) => pkg, None => { log::error!( "Dep node id do not match in cargo metadata, ignoring {}", dep_node.pkg ); continue; } }; let dep = PackageDependency { name: dep_node.name, pkg }; packages[source].dependencies.push(dep); } packages[source].features.extend(node.features); } Ok(CargoWorkspace { packages, targets, workspace_root: meta.workspace_root }) } pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a { self.packages.iter().map(|(id, _pkg)| id) } pub fn target_by_root(&self, root: &Path) -> Option<Target> { self.packages() .filter_map(|pkg| self[pkg].targets.iter().find(|&&it| self[it].root == root)) .next() .copied() } pub fn workspace_root(&self) -> &Path { &self.workspace_root } } #[derive(Debug, Clone, Default)] pub struct ExternResources { out_dirs: FxHashMap<PackageId, PathBuf>, proc_dylib_paths: FxHashMap<PackageId, PathBuf>, } pub fn load_extern_resources(cargo_toml: &Path, cargo_features: &CargoFeatures) -> ExternResources { let mut args: Vec<String> = vec![ "check".to_string(), "--message-format=json".to_string(), "--manifest-path".to_string(), cargo_toml.display().to_string(), ]; if cargo_features.all_features { args.push("--all-features".to_string()); } else if cargo_features.no_default_features { // FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures` // https://github.com/oli-obk/cargo_metadata/issues/79 args.push("--no-default-features".to_string()); } else { args.extend(cargo_features.features.iter().cloned()); } let mut acc = ExternResources::default(); let res = run_cargo(&args, cargo_toml.parent(), &mut |message| { match message { Message::BuildScriptExecuted(BuildScript { package_id, out_dir, .. }) => { acc.out_dirs.insert(package_id, out_dir); } Message::CompilerArtifact(message) => { if message.target.kind.contains(&"proc-macro".to_string()) { let package_id = message.package_id; if let Some(filename) = message.filenames.get(0) { acc.proc_dylib_paths.insert(package_id, filename.clone()); } } } Message::CompilerMessage(_) => (), Message::Unknown => (), } true }); if let Err(err) = res { log::error!("Failed to load outdirs: {:?}", err); } acc }
CargoFeatures
identifier_name
SIMulator_functions.py
import numpy as np from numpy import pi, cos, sin from numpy.fft import fft2, ifft2, fftshift, ifftshift from skimage import io, draw, transform, img_as_ubyte, img_as_float from scipy import signal from scipy.signal import convolve2d import scipy.special from numba import jit import time import streamlit as st def
(x, opt): return np.clip(np.cos(x), 0, 1) def cos_wave_envelope(x, h, opt): period_in_pixels = opt.w / (opt.k2) p = period_in_pixels f = 1 / p # h = 2*pi*opt.k2*(h-0.5)+10 h = h*opt.w - opt.w/2 + 10 window = np.where(np.abs(x - h) <= period_in_pixels/4, 1, 0) maxval = np.max(window * np.cos(2*pi*f*(x - h))) return window * np.cos(2*pi*f*(x - h)) # def cos_wave_envelope(x, h, opt): # period_in_pixels = opt.w / (2*opt.k2) # w = period_in_pixels # # h = (2*h/2/pi) % opt.w # h = (h*period_in_pixels / 2 / pi) # window = np.where(np.abs(x - h) <= w/2, 1, 0) # return window * (1 + np.cos(2 * np.pi * (x - h) / w)) def square_wave(x, opt): return np.heaviside(np.cos(x), 0) # return np.where(np.cos(x) >= 0, 1, 0) # def square_wave(x, opt): # # Calculate the period and duty cycle # # period = 4*pi*opt.k2 / opt.w # # duty_cycle = 1 / (opt.Nshifts) # peak_width = opt.peak_width # peak_spacing = opt.peak_spacing # # Convert these pixel values into fractions of the total width # duty_cycle = peak_width / peak_spacing # # Generate the square wave # return signal.square(x, duty_cycle) def square_wave_one_third(x, opt): # sums to 0 return 2 * (np.heaviside(np.cos(x) - np.cos(1 * np.pi / 3), 0) - 1 / 3) def square_wave_large_spacing(x, opt): # sums to 1 # d : peak width d = 2 * np.pi / opt.Nshifts d_pixels = opt.w / (2*pi*opt.k2) * d min_d = 1/ (d_pixels / d) max_d = 2/d_pixels d_orig = d # d = np.clip(d, min_d, max_d) d = max(d, min_d) print(f"d_pixels: {d_pixels}, min_d: {min_d}, max_d: {max_d}, d: {d}, d_orig: {d_orig}") return 2*(np.heaviside(np.cos(x) - np.cos(d/2), 0)-0.3) @jit(nopython=True) def DMDPixelTransform(input_img, dmdMapping, xoffset=0, yoffset=0): # Initialize an array of zeros with same size as the input image transformed_img = np.zeros_like(input_img) # Get the dimensions of the input image rows, cols = input_img.shape # Iterate over the pixels of the input image for i in range(rows): for j in range(cols): # Calculate the new coordinates for the pixel ip = i + yoffset jp = j + xoffset # Apply the dmdMapping transformation if set if dmdMapping > 0: transformed_i = jp + ip - 2 transformed_j = (jp - ip + 4) // 2 else: transformed_i = ip transformed_j = jp # If the new coordinates are within the bounds of the image, copy the pixel value if 0 <= transformed_i < rows and 0 <= transformed_j < cols: transformed_img[transformed_i, transformed_j] = input_img[i, j] # Return the transformed image return transformed_img def Get_X_Y_MeshGrids(w, opt, forPSF=False): # TODO: these hard-coded values are not ideal # and this way of scaling the patterns is # likely going to lead to undesired behaviour if opt.crop_factor: if opt.patterns > 0: # assuming DMD resolution crop_factor_x = 1 crop_factor_y = 1 else: dim = opt.imageSize if type(dim) is int: dim = (dim, dim) crop_factor_x = dim[1] / 912 # 428 crop_factor_y = dim[0] / 1140 # 684 # data from dec 2022 acquired with DMD patterns with the below factors # crop_factor_x = 1 # crop_factor_y = 1 # first version, december 2022 # wo = w / 2 # x = np.linspace(0, w - 1, 912) # y = np.linspace(0, w - 1, 1140) # [X, Y] = np.meshgrid(x, y) if ( opt.dmdMapping == 2 or (opt.dmdMapping == 1 and opt.SIMmodality == "stripes") ) and not forPSF: padding = 4 else: padding = 1 x = np.linspace( 0, padding * crop_factor_x * 512 - 1, padding * int(crop_factor_x * 912) ) y = np.linspace( 0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140) ) [X, Y] = np.meshgrid(x, y) else: x = np.linspace(0, w - 1, w) y = np.linspace(0, w - 1, w) X, Y = np.meshgrid(x, y) return X, Y def PsfOtf(w, opt): # AIM: To generate PSF and OTF using Bessel function # INPUT VARIABLES # w: image size # scale: a parameter used to adjust PSF/OTF width # OUTPUT VRAIBLES # yyo: system PSF # OTF2dc: system OTF eps = np.finfo(np.float64).eps X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True) scale = opt.PSFOTFscale # Generation of the PSF with Besselj. R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2) yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2 yy0 = fftshift(yy) # Generate 2D OTF. OTF2d = fft2(yy) OTF2dmax = np.max([np.abs(OTF2d)]) OTF2d = OTF2d / OTF2dmax OTF2dc = np.abs(fftshift(OTF2d)) return (yy0, OTF2dc) def conv2(x, y, mode="same"): # Make it equivalent to Matlab's conv2 function # https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: assert(opt.patterns == 1) # only patterns are wanted w = DIo wo = w / 2 else: assert(opt.patterns != 1) w = DIo.shape[0] wo = w / 2 opt.w = w X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # Illuminating pattern # orientation direction of illumination patterns orientation = np.zeros(opt.Nangles) for i in range(opt.Nangles): orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError if opt.shuffleOrientations: np.random.shuffle(orientation) # illumination frequency vectors k2mat = np.zeros((opt.Nangles, 2)) for i in range(opt.Nangles): theta = orientation[i] k2mat[i, :] = np.array( [(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)] ) # illumination phase shifts along directions with errors ps = np.zeros((opt.Nangles, opt.Nshifts)) for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s] # illumination patterns frames = [] auxil = [] for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): # illuminated signal if not opt.noStripes: if func == cos_wave_envelope: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope( (k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)), i_s/opt.Nshifts, opt) else: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func( 2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo)) + ps[i_a, i_s] , opt) else: sig = 1 # simulating widefield # whether to transform sig for dmd if opt.dmdMapping > 0: # crop to upper left quadrant if padding was added if opt.dmdMapping == 1: sig = DMDPixelTransform( sig, opt.dmdMapping, xoffset=-sig.shape[1] // 2, yoffset=-sig.shape[0] // 2, ) sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4] elif opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] if int(opt.patterns) == 1: # only patterns frame = sig elif int(opt.patterns) == 2: # patterns + specimen sig = sig.clip(0, 1) frame = DIo * sig auxil.append(sig) else: # with diffraction, pattern = False/0 sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) opt.auxil = auxil return frames def GenSpeckle(dim, opt): N = opt.Nspeckles I = np.zeros((dim, dim)) randx = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) randy = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) for i in range(N): x = randx[i] y = randy[i] r = np.random.randint(3, 5) cr, cc = draw.ellipse(x, y, r, r, (dim, dim)) I[cr, cc] += 0.1 return I def SIMimages_speckle(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpeckle( w, opt ) # opt.meanInten[i_a] + opt.ampInten[i_a] * GenSpeckle(w) sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Gaussian noise generation aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (w, w)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frames.append(STnoisy) return frames @jit(nopython=True) def GenSpots(rows, cols, Nspots, spotSize, dmdMapping, xoffset, yoffset): N = Nspots I = np.zeros((rows, cols)) # ortholinear grid # fill in spots in partitions of NxN # for row in range(0, rows, N): # for col in range(0, cols, N): # for spot_x in range(spotSize): # for spot_y in range(spotSize): # # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 # staggered grid for row in range(-2 * rows, 2 * rows, N): for col in range(-2 * cols, 2 * cols, N): for spot_x in range(spotSize): for spot_y in range(spotSize): # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 ip = row + yoffset + spot_y jp = col + xoffset + spot_x if dmdMapping == 1: i = jp + ip - 2 j = (jp - ip + 4) // 2 else: # ip = (i - 2 * j + 6) // 2 # jp = (2 * j + i + 1) // 2 - 1 # use tilted coordinates i = ip j = jp if i < rows and j < cols and i >= 0 and j >= 0: I[i, j] = 1 return I def SIMimages_spots(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: w = DIo else: w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) N = opt.Nspots # offsets depending on spot size offsets = [ (x, y) for x in range(0, N, opt.spotSize) for y in range(0, N, opt.spotSize) ] t0 = time.perf_counter() # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpots( X.shape[0], X.shape[1], opt.Nspots, opt.spotSize, opt.dmdMapping, *offsets[i_a], ) if not opt.patterns: # pure patterns for reference and DMD control sig = opt.meanInten + opt.ampInten * sig # modify sig if padding was added if opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rotated_image = img_as_float(rotated_image) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] # don't think this is needed # # clip to 0-1 # sig = sig.clip(0, 1) # sig = img_as_ubyte(sig) # crop and resize dim = sig.shape sig = sig[: int(dim[0] * opt.spotResize), : int(dim[1] * opt.spotResize)] sig = transform.resize(sig, dim) if opt.patterns: frame = sig else: sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) print(f"Time taken: {time.perf_counter() - t0}") return frames def Generate_SIM_Image(opt, Io, in_dim=512, gt_dim=1024, func=cos_wave): DIo = Io.astype("float") if in_dim is not None: if type(in_dim) is int: DIo = transform.resize(Io, (in_dim, in_dim), anti_aliasing=True, order=3) else: DIo = transform.resize(Io, in_dim, anti_aliasing=True, order=3) w = DIo.shape[0] # Generation of the PSF with Besselj. PSFo, OTFo = PsfOtf(w, opt) if opt.SIMmodality == "stripes": frames = SIMimages(opt, DIo, func=func) elif opt.SIMmodality == "spots": frames = SIMimages_spots(opt, DIo) elif opt.SIMmodality == "speckle": frames = SIMimages_speckle(opt, DIo) if opt.OTF_and_GT and not opt.patterns: frames.append(OTFo) if type(gt_dim) is int: gt_img = transform.resize(Io, (gt_dim, gt_dim), anti_aliasing=True, order=3) else: gt_img = transform.resize(Io, gt_dim, anti_aliasing=True, order=3) if gt_dim > in_dim: # assumes a upscale factor of 2 is given # gt_img = skimage.transform.resize(gt_img, (gt_dim,gt_dim), order=3) gt11 = gt_img[: in_dim[0], : in_dim[1]] gt21 = gt_img[in_dim[0] :, : in_dim[1]] gt12 = gt_img[: in_dim[0], in_dim[1] :] gt22 = gt_img[in_dim[0] :, in_dim[1] :] # frames.extend([gt11,gt21,gt12,gt22]) frames.append(gt11) frames.append(gt21) frames.append(gt12) frames.append(gt22) else: frames.append(gt_img) stack = np.array(frames) # NORMALIZE # does not work well with partitioned GT # for i in range(len(stack)): # stack[i] = (stack[i] - np.min(stack[i])) / \ # (np.max(stack[i]) - np.min(stack[i])) # normalised SIM stack simstack = stack[: opt.Nframes] stack[: opt.Nframes] = (simstack - np.min(simstack)) / ( np.max(simstack) - np.min(simstack) ) # normalised gt and OTF if opt.OTF_and_GT: if gt_dim > in_dim: gtstack = stack[-4:] stack[-4:] = (gtstack - np.min(gtstack)) / (np.max(gtstack) - np.min(gtstack)) # normalised OTF stack[-5] = (stack[-5] - np.min(stack[-5])) / ( np.max(stack[-5] - np.min(stack[-5])) ) else: stack[-1] = (stack[-1] - np.min(stack[-1])) / ( np.max(stack[-1] - np.min(stack[-1])) ) # normalised OTF stack[-2] = (stack[-2] - np.min(stack[-2])) / ( np.max(stack[-2] - np.min(stack[-2])) ) stack = (stack * 255).astype("uint8") if opt.outputname is not None: io.imsave(opt.outputname, stack) return stack
cos_wave
identifier_name
SIMulator_functions.py
import numpy as np from numpy import pi, cos, sin from numpy.fft import fft2, ifft2, fftshift, ifftshift from skimage import io, draw, transform, img_as_ubyte, img_as_float from scipy import signal from scipy.signal import convolve2d import scipy.special from numba import jit import time import streamlit as st def cos_wave(x, opt): return np.clip(np.cos(x), 0, 1) def cos_wave_envelope(x, h, opt): period_in_pixels = opt.w / (opt.k2) p = period_in_pixels f = 1 / p # h = 2*pi*opt.k2*(h-0.5)+10 h = h*opt.w - opt.w/2 + 10 window = np.where(np.abs(x - h) <= period_in_pixels/4, 1, 0) maxval = np.max(window * np.cos(2*pi*f*(x - h))) return window * np.cos(2*pi*f*(x - h)) # def cos_wave_envelope(x, h, opt): # period_in_pixels = opt.w / (2*opt.k2) # w = period_in_pixels # # h = (2*h/2/pi) % opt.w # h = (h*period_in_pixels / 2 / pi) # window = np.where(np.abs(x - h) <= w/2, 1, 0) # return window * (1 + np.cos(2 * np.pi * (x - h) / w)) def square_wave(x, opt): return np.heaviside(np.cos(x), 0) # return np.where(np.cos(x) >= 0, 1, 0) # def square_wave(x, opt): # # Calculate the period and duty cycle # # period = 4*pi*opt.k2 / opt.w # # duty_cycle = 1 / (opt.Nshifts) # peak_width = opt.peak_width # peak_spacing = opt.peak_spacing # # Convert these pixel values into fractions of the total width # duty_cycle = peak_width / peak_spacing # # Generate the square wave # return signal.square(x, duty_cycle) def square_wave_one_third(x, opt): # sums to 0 return 2 * (np.heaviside(np.cos(x) - np.cos(1 * np.pi / 3), 0) - 1 / 3) def square_wave_large_spacing(x, opt): # sums to 1 # d : peak width d = 2 * np.pi / opt.Nshifts d_pixels = opt.w / (2*pi*opt.k2) * d min_d = 1/ (d_pixels / d) max_d = 2/d_pixels d_orig = d # d = np.clip(d, min_d, max_d) d = max(d, min_d) print(f"d_pixels: {d_pixels}, min_d: {min_d}, max_d: {max_d}, d: {d}, d_orig: {d_orig}") return 2*(np.heaviside(np.cos(x) - np.cos(d/2), 0)-0.3) @jit(nopython=True) def DMDPixelTransform(input_img, dmdMapping, xoffset=0, yoffset=0): # Initialize an array of zeros with same size as the input image transformed_img = np.zeros_like(input_img) # Get the dimensions of the input image rows, cols = input_img.shape # Iterate over the pixels of the input image for i in range(rows): for j in range(cols): # Calculate the new coordinates for the pixel ip = i + yoffset jp = j + xoffset # Apply the dmdMapping transformation if set if dmdMapping > 0: transformed_i = jp + ip - 2 transformed_j = (jp - ip + 4) // 2 else: transformed_i = ip transformed_j = jp # If the new coordinates are within the bounds of the image, copy the pixel value if 0 <= transformed_i < rows and 0 <= transformed_j < cols: transformed_img[transformed_i, transformed_j] = input_img[i, j] # Return the transformed image return transformed_img def Get_X_Y_MeshGrids(w, opt, forPSF=False): # TODO: these hard-coded values are not ideal # and this way of scaling the patterns is # likely going to lead to undesired behaviour if opt.crop_factor: if opt.patterns > 0: # assuming DMD resolution crop_factor_x = 1 crop_factor_y = 1 else: dim = opt.imageSize if type(dim) is int: dim = (dim, dim) crop_factor_x = dim[1] / 912 # 428 crop_factor_y = dim[0] / 1140 # 684 # data from dec 2022 acquired with DMD patterns with the below factors # crop_factor_x = 1 # crop_factor_y = 1 # first version, december 2022 # wo = w / 2 # x = np.linspace(0, w - 1, 912) # y = np.linspace(0, w - 1, 1140) # [X, Y] = np.meshgrid(x, y) if ( opt.dmdMapping == 2 or (opt.dmdMapping == 1 and opt.SIMmodality == "stripes") ) and not forPSF: padding = 4 else: padding = 1 x = np.linspace( 0, padding * crop_factor_x * 512 - 1, padding * int(crop_factor_x * 912) ) y = np.linspace( 0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140) ) [X, Y] = np.meshgrid(x, y) else: x = np.linspace(0, w - 1, w) y = np.linspace(0, w - 1, w) X, Y = np.meshgrid(x, y) return X, Y def PsfOtf(w, opt): # AIM: To generate PSF and OTF using Bessel function # INPUT VARIABLES # w: image size # scale: a parameter used to adjust PSF/OTF width # OUTPUT VRAIBLES # yyo: system PSF # OTF2dc: system OTF eps = np.finfo(np.float64).eps X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True) scale = opt.PSFOTFscale # Generation of the PSF with Besselj. R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2) yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2 yy0 = fftshift(yy) # Generate 2D OTF. OTF2d = fft2(yy) OTF2dmax = np.max([np.abs(OTF2d)]) OTF2d = OTF2d / OTF2dmax OTF2dc = np.abs(fftshift(OTF2d)) return (yy0, OTF2dc) def conv2(x, y, mode="same"): # Make it equivalent to Matlab's conv2 function # https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: assert(opt.patterns == 1) # only patterns are wanted w = DIo wo = w / 2 else: assert(opt.patterns != 1) w = DIo.shape[0] wo = w / 2 opt.w = w X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # Illuminating pattern # orientation direction of illumination patterns orientation = np.zeros(opt.Nangles) for i in range(opt.Nangles): orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError if opt.shuffleOrientations: np.random.shuffle(orientation) # illumination frequency vectors k2mat = np.zeros((opt.Nangles, 2)) for i in range(opt.Nangles): theta = orientation[i] k2mat[i, :] = np.array( [(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)] ) # illumination phase shifts along directions with errors ps = np.zeros((opt.Nangles, opt.Nshifts)) for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s] # illumination patterns frames = [] auxil = [] for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): # illuminated signal if not opt.noStripes: if func == cos_wave_envelope: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope( (k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)), i_s/opt.Nshifts, opt) else: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func( 2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo)) + ps[i_a, i_s] , opt) else: sig = 1 # simulating widefield # whether to transform sig for dmd if opt.dmdMapping > 0: # crop to upper left quadrant if padding was added if opt.dmdMapping == 1: sig = DMDPixelTransform( sig, opt.dmdMapping, xoffset=-sig.shape[1] // 2, yoffset=-sig.shape[0] // 2, ) sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4] elif opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] if int(opt.patterns) == 1: # only patterns frame = sig elif int(opt.patterns) == 2: # patterns + specimen sig = sig.clip(0, 1) frame = DIo * sig auxil.append(sig) else: # with diffraction, pattern = False/0 sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) opt.auxil = auxil return frames def GenSpeckle(dim, opt):
def SIMimages_speckle(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpeckle( w, opt ) # opt.meanInten[i_a] + opt.ampInten[i_a] * GenSpeckle(w) sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Gaussian noise generation aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (w, w)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frames.append(STnoisy) return frames @jit(nopython=True) def GenSpots(rows, cols, Nspots, spotSize, dmdMapping, xoffset, yoffset): N = Nspots I = np.zeros((rows, cols)) # ortholinear grid # fill in spots in partitions of NxN # for row in range(0, rows, N): # for col in range(0, cols, N): # for spot_x in range(spotSize): # for spot_y in range(spotSize): # # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 # staggered grid for row in range(-2 * rows, 2 * rows, N): for col in range(-2 * cols, 2 * cols, N): for spot_x in range(spotSize): for spot_y in range(spotSize): # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 ip = row + yoffset + spot_y jp = col + xoffset + spot_x if dmdMapping == 1: i = jp + ip - 2 j = (jp - ip + 4) // 2 else: # ip = (i - 2 * j + 6) // 2 # jp = (2 * j + i + 1) // 2 - 1 # use tilted coordinates i = ip j = jp if i < rows and j < cols and i >= 0 and j >= 0: I[i, j] = 1 return I def SIMimages_spots(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: w = DIo else: w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) N = opt.Nspots # offsets depending on spot size offsets = [ (x, y) for x in range(0, N, opt.spotSize) for y in range(0, N, opt.spotSize) ] t0 = time.perf_counter() # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpots( X.shape[0], X.shape[1], opt.Nspots, opt.spotSize, opt.dmdMapping, *offsets[i_a], ) if not opt.patterns: # pure patterns for reference and DMD control sig = opt.meanInten + opt.ampInten * sig # modify sig if padding was added if opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rotated_image = img_as_float(rotated_image) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] # don't think this is needed # # clip to 0-1 # sig = sig.clip(0, 1) # sig = img_as_ubyte(sig) # crop and resize dim = sig.shape sig = sig[: int(dim[0] * opt.spotResize), : int(dim[1] * opt.spotResize)] sig = transform.resize(sig, dim) if opt.patterns: frame = sig else: sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) print(f"Time taken: {time.perf_counter() - t0}") return frames def Generate_SIM_Image(opt, Io, in_dim=512, gt_dim=1024, func=cos_wave): DIo = Io.astype("float") if in_dim is not None: if type(in_dim) is int: DIo = transform.resize(Io, (in_dim, in_dim), anti_aliasing=True, order=3) else: DIo = transform.resize(Io, in_dim, anti_aliasing=True, order=3) w = DIo.shape[0] # Generation of the PSF with Besselj. PSFo, OTFo = PsfOtf(w, opt) if opt.SIMmodality == "stripes": frames = SIMimages(opt, DIo, func=func) elif opt.SIMmodality == "spots": frames = SIMimages_spots(opt, DIo) elif opt.SIMmodality == "speckle": frames = SIMimages_speckle(opt, DIo) if opt.OTF_and_GT and not opt.patterns: frames.append(OTFo) if type(gt_dim) is int: gt_img = transform.resize(Io, (gt_dim, gt_dim), anti_aliasing=True, order=3) else: gt_img = transform.resize(Io, gt_dim, anti_aliasing=True, order=3) if gt_dim > in_dim: # assumes a upscale factor of 2 is given # gt_img = skimage.transform.resize(gt_img, (gt_dim,gt_dim), order=3) gt11 = gt_img[: in_dim[0], : in_dim[1]] gt21 = gt_img[in_dim[0] :, : in_dim[1]] gt12 = gt_img[: in_dim[0], in_dim[1] :] gt22 = gt_img[in_dim[0] :, in_dim[1] :] # frames.extend([gt11,gt21,gt12,gt22]) frames.append(gt11) frames.append(gt21) frames.append(gt12) frames.append(gt22) else: frames.append(gt_img) stack = np.array(frames) # NORMALIZE # does not work well with partitioned GT # for i in range(len(stack)): # stack[i] = (stack[i] - np.min(stack[i])) / \ # (np.max(stack[i]) - np.min(stack[i])) # normalised SIM stack simstack = stack[: opt.Nframes] stack[: opt.Nframes] = (simstack - np.min(simstack)) / ( np.max(simstack) - np.min(simstack) ) # normalised gt and OTF if opt.OTF_and_GT: if gt_dim > in_dim: gtstack = stack[-4:] stack[-4:] = (gtstack - np.min(gtstack)) / (np.max(gtstack) - np.min(gtstack)) # normalised OTF stack[-5] = (stack[-5] - np.min(stack[-5])) / ( np.max(stack[-5] - np.min(stack[-5])) ) else: stack[-1] = (stack[-1] - np.min(stack[-1])) / ( np.max(stack[-1] - np.min(stack[-1])) ) # normalised OTF stack[-2] = (stack[-2] - np.min(stack[-2])) / ( np.max(stack[-2] - np.min(stack[-2])) ) stack = (stack * 255).astype("uint8") if opt.outputname is not None: io.imsave(opt.outputname, stack) return stack
N = opt.Nspeckles I = np.zeros((dim, dim)) randx = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) randy = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) for i in range(N): x = randx[i] y = randy[i] r = np.random.randint(3, 5) cr, cc = draw.ellipse(x, y, r, r, (dim, dim)) I[cr, cc] += 0.1 return I
identifier_body
SIMulator_functions.py
import numpy as np from numpy import pi, cos, sin from numpy.fft import fft2, ifft2, fftshift, ifftshift from skimage import io, draw, transform, img_as_ubyte, img_as_float from scipy import signal from scipy.signal import convolve2d import scipy.special from numba import jit import time import streamlit as st def cos_wave(x, opt): return np.clip(np.cos(x), 0, 1) def cos_wave_envelope(x, h, opt): period_in_pixels = opt.w / (opt.k2) p = period_in_pixels f = 1 / p # h = 2*pi*opt.k2*(h-0.5)+10 h = h*opt.w - opt.w/2 + 10 window = np.where(np.abs(x - h) <= period_in_pixels/4, 1, 0) maxval = np.max(window * np.cos(2*pi*f*(x - h))) return window * np.cos(2*pi*f*(x - h)) # def cos_wave_envelope(x, h, opt): # period_in_pixels = opt.w / (2*opt.k2) # w = period_in_pixels # # h = (2*h/2/pi) % opt.w # h = (h*period_in_pixels / 2 / pi) # window = np.where(np.abs(x - h) <= w/2, 1, 0) # return window * (1 + np.cos(2 * np.pi * (x - h) / w)) def square_wave(x, opt): return np.heaviside(np.cos(x), 0) # return np.where(np.cos(x) >= 0, 1, 0) # def square_wave(x, opt): # # Calculate the period and duty cycle # # period = 4*pi*opt.k2 / opt.w # # duty_cycle = 1 / (opt.Nshifts) # peak_width = opt.peak_width # peak_spacing = opt.peak_spacing # # Convert these pixel values into fractions of the total width # duty_cycle = peak_width / peak_spacing # # Generate the square wave # return signal.square(x, duty_cycle) def square_wave_one_third(x, opt): # sums to 0 return 2 * (np.heaviside(np.cos(x) - np.cos(1 * np.pi / 3), 0) - 1 / 3) def square_wave_large_spacing(x, opt): # sums to 1 # d : peak width d = 2 * np.pi / opt.Nshifts d_pixels = opt.w / (2*pi*opt.k2) * d min_d = 1/ (d_pixels / d) max_d = 2/d_pixels d_orig = d # d = np.clip(d, min_d, max_d) d = max(d, min_d) print(f"d_pixels: {d_pixels}, min_d: {min_d}, max_d: {max_d}, d: {d}, d_orig: {d_orig}") return 2*(np.heaviside(np.cos(x) - np.cos(d/2), 0)-0.3) @jit(nopython=True) def DMDPixelTransform(input_img, dmdMapping, xoffset=0, yoffset=0): # Initialize an array of zeros with same size as the input image transformed_img = np.zeros_like(input_img) # Get the dimensions of the input image rows, cols = input_img.shape # Iterate over the pixels of the input image for i in range(rows): for j in range(cols): # Calculate the new coordinates for the pixel ip = i + yoffset jp = j + xoffset # Apply the dmdMapping transformation if set if dmdMapping > 0: transformed_i = jp + ip - 2 transformed_j = (jp - ip + 4) // 2 else: transformed_i = ip transformed_j = jp # If the new coordinates are within the bounds of the image, copy the pixel value if 0 <= transformed_i < rows and 0 <= transformed_j < cols: transformed_img[transformed_i, transformed_j] = input_img[i, j] # Return the transformed image return transformed_img def Get_X_Y_MeshGrids(w, opt, forPSF=False): # TODO: these hard-coded values are not ideal # and this way of scaling the patterns is # likely going to lead to undesired behaviour if opt.crop_factor: if opt.patterns > 0: # assuming DMD resolution crop_factor_x = 1 crop_factor_y = 1 else: dim = opt.imageSize if type(dim) is int: dim = (dim, dim) crop_factor_x = dim[1] / 912 # 428 crop_factor_y = dim[0] / 1140 # 684 # data from dec 2022 acquired with DMD patterns with the below factors # crop_factor_x = 1 # crop_factor_y = 1 # first version, december 2022 # wo = w / 2 # x = np.linspace(0, w - 1, 912) # y = np.linspace(0, w - 1, 1140) # [X, Y] = np.meshgrid(x, y) if ( opt.dmdMapping == 2 or (opt.dmdMapping == 1 and opt.SIMmodality == "stripes") ) and not forPSF: padding = 4 else: padding = 1 x = np.linspace( 0, padding * crop_factor_x * 512 - 1, padding * int(crop_factor_x * 912) ) y = np.linspace( 0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140) ) [X, Y] = np.meshgrid(x, y) else: x = np.linspace(0, w - 1, w) y = np.linspace(0, w - 1, w) X, Y = np.meshgrid(x, y) return X, Y def PsfOtf(w, opt): # AIM: To generate PSF and OTF using Bessel function # INPUT VARIABLES # w: image size # scale: a parameter used to adjust PSF/OTF width # OUTPUT VRAIBLES # yyo: system PSF # OTF2dc: system OTF eps = np.finfo(np.float64).eps X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True) scale = opt.PSFOTFscale # Generation of the PSF with Besselj. R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2) yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2 yy0 = fftshift(yy) # Generate 2D OTF. OTF2d = fft2(yy) OTF2dmax = np.max([np.abs(OTF2d)]) OTF2d = OTF2d / OTF2dmax OTF2dc = np.abs(fftshift(OTF2d)) return (yy0, OTF2dc) def conv2(x, y, mode="same"): # Make it equivalent to Matlab's conv2 function # https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: assert(opt.patterns == 1) # only patterns are wanted w = DIo wo = w / 2 else: assert(opt.patterns != 1) w = DIo.shape[0] wo = w / 2 opt.w = w X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # Illuminating pattern # orientation direction of illumination patterns orientation = np.zeros(opt.Nangles) for i in range(opt.Nangles): orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError if opt.shuffleOrientations: np.random.shuffle(orientation) # illumination frequency vectors k2mat = np.zeros((opt.Nangles, 2)) for i in range(opt.Nangles): theta = orientation[i] k2mat[i, :] = np.array( [(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)] ) # illumination phase shifts along directions with errors ps = np.zeros((opt.Nangles, opt.Nshifts)) for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s] # illumination patterns frames = [] auxil = [] for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): # illuminated signal if not opt.noStripes: if func == cos_wave_envelope: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope( (k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)), i_s/opt.Nshifts, opt) else: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func( 2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo)) + ps[i_a, i_s] , opt) else: sig = 1 # simulating widefield # whether to transform sig for dmd if opt.dmdMapping > 0: # crop to upper left quadrant if padding was added if opt.dmdMapping == 1: sig = DMDPixelTransform( sig, opt.dmdMapping, xoffset=-sig.shape[1] // 2, yoffset=-sig.shape[0] // 2, ) sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4] elif opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] if int(opt.patterns) == 1: # only patterns frame = sig elif int(opt.patterns) == 2: # patterns + specimen sig = sig.clip(0, 1) frame = DIo * sig auxil.append(sig) else: # with diffraction, pattern = False/0 sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) opt.auxil = auxil return frames def GenSpeckle(dim, opt): N = opt.Nspeckles I = np.zeros((dim, dim)) randx = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) randy = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) for i in range(N): x = randx[i] y = randy[i] r = np.random.randint(3, 5) cr, cc = draw.ellipse(x, y, r, r, (dim, dim)) I[cr, cc] += 0.1 return I def SIMimages_speckle(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpeckle( w, opt ) # opt.meanInten[i_a] + opt.ampInten[i_a] * GenSpeckle(w) sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Gaussian noise generation aNoise = opt.NoiseLevel / 100 # noise
# SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (w, w)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frames.append(STnoisy) return frames @jit(nopython=True) def GenSpots(rows, cols, Nspots, spotSize, dmdMapping, xoffset, yoffset): N = Nspots I = np.zeros((rows, cols)) # ortholinear grid # fill in spots in partitions of NxN # for row in range(0, rows, N): # for col in range(0, cols, N): # for spot_x in range(spotSize): # for spot_y in range(spotSize): # # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 # staggered grid for row in range(-2 * rows, 2 * rows, N): for col in range(-2 * cols, 2 * cols, N): for spot_x in range(spotSize): for spot_y in range(spotSize): # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 ip = row + yoffset + spot_y jp = col + xoffset + spot_x if dmdMapping == 1: i = jp + ip - 2 j = (jp - ip + 4) // 2 else: # ip = (i - 2 * j + 6) // 2 # jp = (2 * j + i + 1) // 2 - 1 # use tilted coordinates i = ip j = jp if i < rows and j < cols and i >= 0 and j >= 0: I[i, j] = 1 return I def SIMimages_spots(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: w = DIo else: w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) N = opt.Nspots # offsets depending on spot size offsets = [ (x, y) for x in range(0, N, opt.spotSize) for y in range(0, N, opt.spotSize) ] t0 = time.perf_counter() # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpots( X.shape[0], X.shape[1], opt.Nspots, opt.spotSize, opt.dmdMapping, *offsets[i_a], ) if not opt.patterns: # pure patterns for reference and DMD control sig = opt.meanInten + opt.ampInten * sig # modify sig if padding was added if opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rotated_image = img_as_float(rotated_image) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] # don't think this is needed # # clip to 0-1 # sig = sig.clip(0, 1) # sig = img_as_ubyte(sig) # crop and resize dim = sig.shape sig = sig[: int(dim[0] * opt.spotResize), : int(dim[1] * opt.spotResize)] sig = transform.resize(sig, dim) if opt.patterns: frame = sig else: sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) print(f"Time taken: {time.perf_counter() - t0}") return frames def Generate_SIM_Image(opt, Io, in_dim=512, gt_dim=1024, func=cos_wave): DIo = Io.astype("float") if in_dim is not None: if type(in_dim) is int: DIo = transform.resize(Io, (in_dim, in_dim), anti_aliasing=True, order=3) else: DIo = transform.resize(Io, in_dim, anti_aliasing=True, order=3) w = DIo.shape[0] # Generation of the PSF with Besselj. PSFo, OTFo = PsfOtf(w, opt) if opt.SIMmodality == "stripes": frames = SIMimages(opt, DIo, func=func) elif opt.SIMmodality == "spots": frames = SIMimages_spots(opt, DIo) elif opt.SIMmodality == "speckle": frames = SIMimages_speckle(opt, DIo) if opt.OTF_and_GT and not opt.patterns: frames.append(OTFo) if type(gt_dim) is int: gt_img = transform.resize(Io, (gt_dim, gt_dim), anti_aliasing=True, order=3) else: gt_img = transform.resize(Io, gt_dim, anti_aliasing=True, order=3) if gt_dim > in_dim: # assumes a upscale factor of 2 is given # gt_img = skimage.transform.resize(gt_img, (gt_dim,gt_dim), order=3) gt11 = gt_img[: in_dim[0], : in_dim[1]] gt21 = gt_img[in_dim[0] :, : in_dim[1]] gt12 = gt_img[: in_dim[0], in_dim[1] :] gt22 = gt_img[in_dim[0] :, in_dim[1] :] # frames.extend([gt11,gt21,gt12,gt22]) frames.append(gt11) frames.append(gt21) frames.append(gt12) frames.append(gt22) else: frames.append(gt_img) stack = np.array(frames) # NORMALIZE # does not work well with partitioned GT # for i in range(len(stack)): # stack[i] = (stack[i] - np.min(stack[i])) / \ # (np.max(stack[i]) - np.min(stack[i])) # normalised SIM stack simstack = stack[: opt.Nframes] stack[: opt.Nframes] = (simstack - np.min(simstack)) / ( np.max(simstack) - np.min(simstack) ) # normalised gt and OTF if opt.OTF_and_GT: if gt_dim > in_dim: gtstack = stack[-4:] stack[-4:] = (gtstack - np.min(gtstack)) / (np.max(gtstack) - np.min(gtstack)) # normalised OTF stack[-5] = (stack[-5] - np.min(stack[-5])) / ( np.max(stack[-5] - np.min(stack[-5])) ) else: stack[-1] = (stack[-1] - np.min(stack[-1])) / ( np.max(stack[-1] - np.min(stack[-1])) ) # normalised OTF stack[-2] = (stack[-2] - np.min(stack[-2])) / ( np.max(stack[-2] - np.min(stack[-2])) ) stack = (stack * 255).astype("uint8") if opt.outputname is not None: io.imsave(opt.outputname, stack) return stack
random_line_split
SIMulator_functions.py
import numpy as np from numpy import pi, cos, sin from numpy.fft import fft2, ifft2, fftshift, ifftshift from skimage import io, draw, transform, img_as_ubyte, img_as_float from scipy import signal from scipy.signal import convolve2d import scipy.special from numba import jit import time import streamlit as st def cos_wave(x, opt): return np.clip(np.cos(x), 0, 1) def cos_wave_envelope(x, h, opt): period_in_pixels = opt.w / (opt.k2) p = period_in_pixels f = 1 / p # h = 2*pi*opt.k2*(h-0.5)+10 h = h*opt.w - opt.w/2 + 10 window = np.where(np.abs(x - h) <= period_in_pixels/4, 1, 0) maxval = np.max(window * np.cos(2*pi*f*(x - h))) return window * np.cos(2*pi*f*(x - h)) # def cos_wave_envelope(x, h, opt): # period_in_pixels = opt.w / (2*opt.k2) # w = period_in_pixels # # h = (2*h/2/pi) % opt.w # h = (h*period_in_pixels / 2 / pi) # window = np.where(np.abs(x - h) <= w/2, 1, 0) # return window * (1 + np.cos(2 * np.pi * (x - h) / w)) def square_wave(x, opt): return np.heaviside(np.cos(x), 0) # return np.where(np.cos(x) >= 0, 1, 0) # def square_wave(x, opt): # # Calculate the period and duty cycle # # period = 4*pi*opt.k2 / opt.w # # duty_cycle = 1 / (opt.Nshifts) # peak_width = opt.peak_width # peak_spacing = opt.peak_spacing # # Convert these pixel values into fractions of the total width # duty_cycle = peak_width / peak_spacing # # Generate the square wave # return signal.square(x, duty_cycle) def square_wave_one_third(x, opt): # sums to 0 return 2 * (np.heaviside(np.cos(x) - np.cos(1 * np.pi / 3), 0) - 1 / 3) def square_wave_large_spacing(x, opt): # sums to 1 # d : peak width d = 2 * np.pi / opt.Nshifts d_pixels = opt.w / (2*pi*opt.k2) * d min_d = 1/ (d_pixels / d) max_d = 2/d_pixels d_orig = d # d = np.clip(d, min_d, max_d) d = max(d, min_d) print(f"d_pixels: {d_pixels}, min_d: {min_d}, max_d: {max_d}, d: {d}, d_orig: {d_orig}") return 2*(np.heaviside(np.cos(x) - np.cos(d/2), 0)-0.3) @jit(nopython=True) def DMDPixelTransform(input_img, dmdMapping, xoffset=0, yoffset=0): # Initialize an array of zeros with same size as the input image transformed_img = np.zeros_like(input_img) # Get the dimensions of the input image rows, cols = input_img.shape # Iterate over the pixels of the input image for i in range(rows): for j in range(cols): # Calculate the new coordinates for the pixel ip = i + yoffset jp = j + xoffset # Apply the dmdMapping transformation if set if dmdMapping > 0: transformed_i = jp + ip - 2 transformed_j = (jp - ip + 4) // 2 else: transformed_i = ip transformed_j = jp # If the new coordinates are within the bounds of the image, copy the pixel value if 0 <= transformed_i < rows and 0 <= transformed_j < cols: transformed_img[transformed_i, transformed_j] = input_img[i, j] # Return the transformed image return transformed_img def Get_X_Y_MeshGrids(w, opt, forPSF=False): # TODO: these hard-coded values are not ideal # and this way of scaling the patterns is # likely going to lead to undesired behaviour if opt.crop_factor: if opt.patterns > 0: # assuming DMD resolution crop_factor_x = 1 crop_factor_y = 1 else: dim = opt.imageSize if type(dim) is int:
crop_factor_x = dim[1] / 912 # 428 crop_factor_y = dim[0] / 1140 # 684 # data from dec 2022 acquired with DMD patterns with the below factors # crop_factor_x = 1 # crop_factor_y = 1 # first version, december 2022 # wo = w / 2 # x = np.linspace(0, w - 1, 912) # y = np.linspace(0, w - 1, 1140) # [X, Y] = np.meshgrid(x, y) if ( opt.dmdMapping == 2 or (opt.dmdMapping == 1 and opt.SIMmodality == "stripes") ) and not forPSF: padding = 4 else: padding = 1 x = np.linspace( 0, padding * crop_factor_x * 512 - 1, padding * int(crop_factor_x * 912) ) y = np.linspace( 0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140) ) [X, Y] = np.meshgrid(x, y) else: x = np.linspace(0, w - 1, w) y = np.linspace(0, w - 1, w) X, Y = np.meshgrid(x, y) return X, Y def PsfOtf(w, opt): # AIM: To generate PSF and OTF using Bessel function # INPUT VARIABLES # w: image size # scale: a parameter used to adjust PSF/OTF width # OUTPUT VRAIBLES # yyo: system PSF # OTF2dc: system OTF eps = np.finfo(np.float64).eps X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True) scale = opt.PSFOTFscale # Generation of the PSF with Besselj. R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2) yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2 yy0 = fftshift(yy) # Generate 2D OTF. OTF2d = fft2(yy) OTF2dmax = np.max([np.abs(OTF2d)]) OTF2d = OTF2d / OTF2dmax OTF2dc = np.abs(fftshift(OTF2d)) return (yy0, OTF2dc) def conv2(x, y, mode="same"): # Make it equivalent to Matlab's conv2 function # https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: assert(opt.patterns == 1) # only patterns are wanted w = DIo wo = w / 2 else: assert(opt.patterns != 1) w = DIo.shape[0] wo = w / 2 opt.w = w X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # Illuminating pattern # orientation direction of illumination patterns orientation = np.zeros(opt.Nangles) for i in range(opt.Nangles): orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError if opt.shuffleOrientations: np.random.shuffle(orientation) # illumination frequency vectors k2mat = np.zeros((opt.Nangles, 2)) for i in range(opt.Nangles): theta = orientation[i] k2mat[i, :] = np.array( [(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)] ) # illumination phase shifts along directions with errors ps = np.zeros((opt.Nangles, opt.Nshifts)) for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s] # illumination patterns frames = [] auxil = [] for i_a in range(opt.Nangles): for i_s in range(opt.Nshifts): # illuminated signal if not opt.noStripes: if func == cos_wave_envelope: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope( (k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)), i_s/opt.Nshifts, opt) else: sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func( 2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo)) + ps[i_a, i_s] , opt) else: sig = 1 # simulating widefield # whether to transform sig for dmd if opt.dmdMapping > 0: # crop to upper left quadrant if padding was added if opt.dmdMapping == 1: sig = DMDPixelTransform( sig, opt.dmdMapping, xoffset=-sig.shape[1] // 2, yoffset=-sig.shape[0] // 2, ) sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4] elif opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] if int(opt.patterns) == 1: # only patterns frame = sig elif int(opt.patterns) == 2: # patterns + specimen sig = sig.clip(0, 1) frame = DIo * sig auxil.append(sig) else: # with diffraction, pattern = False/0 sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) opt.auxil = auxil return frames def GenSpeckle(dim, opt): N = opt.Nspeckles I = np.zeros((dim, dim)) randx = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) randy = np.random.choice( list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False ) for i in range(N): x = randx[i] y = randy[i] r = np.random.randint(3, 5) cr, cc = draw.ellipse(x, y, r, r, (dim, dim)) I[cr, cc] += 0.1 return I def SIMimages_speckle(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpeckle( w, opt ) # opt.meanInten[i_a] + opt.ampInten[i_a] * GenSpeckle(w) sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Gaussian noise generation aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (w, w)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frames.append(STnoisy) return frames @jit(nopython=True) def GenSpots(rows, cols, Nspots, spotSize, dmdMapping, xoffset, yoffset): N = Nspots I = np.zeros((rows, cols)) # ortholinear grid # fill in spots in partitions of NxN # for row in range(0, rows, N): # for col in range(0, cols, N): # for spot_x in range(spotSize): # for spot_y in range(spotSize): # # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 # staggered grid for row in range(-2 * rows, 2 * rows, N): for col in range(-2 * cols, 2 * cols, N): for spot_x in range(spotSize): for spot_y in range(spotSize): # prevent index out of bounds # if row + yoffset + spot_y < rows and col + xoffset + spot_x < cols: # I[row + yoffset + spot_y, col + xoffset + spot_x] = 1 ip = row + yoffset + spot_y jp = col + xoffset + spot_x if dmdMapping == 1: i = jp + ip - 2 j = (jp - ip + 4) // 2 else: # ip = (i - 2 * j + 6) // 2 # jp = (2 * j + i + 1) // 2 - 1 # use tilted coordinates i = ip j = jp if i < rows and j < cols and i >= 0 and j >= 0: I[i, j] = 1 return I def SIMimages_spots(opt, DIo): # AIM: to generate raw sim images # INPUT VARIABLES # k2: illumination frequency # DIo: specimen image or integer (dimension) if only patterns are wanted # PSFo: system PSF # OTFo: system OTF # UsePSF: 1 (to blur SIM images by convloving with PSF) # 0 (to blur SIM images by truncating its fourier content beyond OTF) # NoiseLevel: percentage noise level for generating gaussian noise # OUTPUT VARIABLES # frames: raw sim images # DIoTnoisy: noisy wide field image # DIoT: noise-free wide field image if type(DIo) == int: w = DIo else: w = DIo.shape[0] X, Y = Get_X_Y_MeshGrids(w, opt) PSFo, OTFo = PsfOtf(w, opt) N = opt.Nspots # offsets depending on spot size offsets = [ (x, y) for x in range(0, N, opt.spotSize) for y in range(0, N, opt.spotSize) ] t0 = time.perf_counter() # illumination patterns frames = [] for i_a in range(opt.Nframes): # illuminated signal sig = GenSpots( X.shape[0], X.shape[1], opt.Nspots, opt.spotSize, opt.dmdMapping, *offsets[i_a], ) if not opt.patterns: # pure patterns for reference and DMD control sig = opt.meanInten + opt.ampInten * sig # modify sig if padding was added if opt.dmdMapping == 2: # rotate image by 45 degrees rotated_image = transform.rotate(sig, -45) rotated_image = img_as_float(rotated_image) rows, cols = rotated_image.shape[0], rotated_image.shape[1] # crop centre to avoid black corners row_start = rows // 4 + rows // 8 row_end = row_start + rows // 4 col_start = cols // 4 + cols // 8 col_end = col_start + cols // 4 # Crop the center of the image sig = rotated_image[row_start:row_end, col_start:col_end] # don't think this is needed # # clip to 0-1 # sig = sig.clip(0, 1) # sig = img_as_ubyte(sig) # crop and resize dim = sig.shape sig = sig[: int(dim[0] * opt.spotResize), : int(dim[1] * opt.spotResize)] sig = transform.resize(sig, dim) if opt.patterns: frame = sig else: sup_sig = DIo * sig # superposed signal # superposed (noise-free) Images if opt.UsePSF == 1: ST = conv2(sup_sig, PSFo, "same") else: ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo))) # Noise generation if opt.usePoissonNoise: # Poisson vals = 2 ** np.ceil( np.log2(opt.NoiseLevel) ) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian STnoisy = np.random.poisson(ST * vals) / float(vals) else: # Gaussian aNoise = opt.NoiseLevel / 100 # noise # SNR = 1/aNoise # SNRdb = 20*log10(1/aNoise) nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape)) NoiseFrac = 1 # may be set to 0 to avoid noise addition # noise added raw SIM images STnoisy = ST + NoiseFrac * nST frame = STnoisy.clip(0, 1) frames.append(frame) print(f"Time taken: {time.perf_counter() - t0}") return frames def Generate_SIM_Image(opt, Io, in_dim=512, gt_dim=1024, func=cos_wave): DIo = Io.astype("float") if in_dim is not None: if type(in_dim) is int: DIo = transform.resize(Io, (in_dim, in_dim), anti_aliasing=True, order=3) else: DIo = transform.resize(Io, in_dim, anti_aliasing=True, order=3) w = DIo.shape[0] # Generation of the PSF with Besselj. PSFo, OTFo = PsfOtf(w, opt) if opt.SIMmodality == "stripes": frames = SIMimages(opt, DIo, func=func) elif opt.SIMmodality == "spots": frames = SIMimages_spots(opt, DIo) elif opt.SIMmodality == "speckle": frames = SIMimages_speckle(opt, DIo) if opt.OTF_and_GT and not opt.patterns: frames.append(OTFo) if type(gt_dim) is int: gt_img = transform.resize(Io, (gt_dim, gt_dim), anti_aliasing=True, order=3) else: gt_img = transform.resize(Io, gt_dim, anti_aliasing=True, order=3) if gt_dim > in_dim: # assumes a upscale factor of 2 is given # gt_img = skimage.transform.resize(gt_img, (gt_dim,gt_dim), order=3) gt11 = gt_img[: in_dim[0], : in_dim[1]] gt21 = gt_img[in_dim[0] :, : in_dim[1]] gt12 = gt_img[: in_dim[0], in_dim[1] :] gt22 = gt_img[in_dim[0] :, in_dim[1] :] # frames.extend([gt11,gt21,gt12,gt22]) frames.append(gt11) frames.append(gt21) frames.append(gt12) frames.append(gt22) else: frames.append(gt_img) stack = np.array(frames) # NORMALIZE # does not work well with partitioned GT # for i in range(len(stack)): # stack[i] = (stack[i] - np.min(stack[i])) / \ # (np.max(stack[i]) - np.min(stack[i])) # normalised SIM stack simstack = stack[: opt.Nframes] stack[: opt.Nframes] = (simstack - np.min(simstack)) / ( np.max(simstack) - np.min(simstack) ) # normalised gt and OTF if opt.OTF_and_GT: if gt_dim > in_dim: gtstack = stack[-4:] stack[-4:] = (gtstack - np.min(gtstack)) / (np.max(gtstack) - np.min(gtstack)) # normalised OTF stack[-5] = (stack[-5] - np.min(stack[-5])) / ( np.max(stack[-5] - np.min(stack[-5])) ) else: stack[-1] = (stack[-1] - np.min(stack[-1])) / ( np.max(stack[-1] - np.min(stack[-1])) ) # normalised OTF stack[-2] = (stack[-2] - np.min(stack[-2])) / ( np.max(stack[-2] - np.min(stack[-2])) ) stack = (stack * 255).astype("uint8") if opt.outputname is not None: io.imsave(opt.outputname, stack) return stack
dim = (dim, dim)
conditional_block
factory.go
package tx import ( "errors" "fmt" "os" "strings" "github.com/cosmos/go-bip39" "github.com/spf13/pflag" "github.com/spf13/viper" "cosmossdk.io/math" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/tx" "github.com/cosmos/cosmos-sdk/types/tx/signing" ) // Factory defines a client transaction factory that facilitates generating and // signing an application-specific transaction. type Factory struct { keybase keyring.Keyring txConfig client.TxConfig accountRetriever client.AccountRetriever accountNumber uint64 sequence uint64 gas uint64 timeoutHeight uint64 gasAdjustment float64 chainID string offline bool generateOnly bool memo string fees sdk.Coins tip *tx.Tip feeGranter sdk.AccAddress feePayer sdk.AccAddress gasPrices sdk.DecCoins extOptions []*codectypes.Any signMode signing.SignMode simulateAndExecute bool preprocessTxHook client.PreprocessTxFn } // NewFactoryCLI creates a new Factory. func NewFactoryCLI(clientCtx client.Context, flagSet *pflag.FlagSet) (Factory, error) { if clientCtx.Viper == nil { clientCtx.Viper = viper.New() } if err := clientCtx.Viper.BindPFlags(flagSet); err != nil { return Factory{}, fmt.Errorf("failed to bind flags to viper: %w", err) } signModeStr := clientCtx.SignModeStr signMode := signing.SignMode_SIGN_MODE_UNSPECIFIED switch signModeStr { case flags.SignModeDirect: signMode = signing.SignMode_SIGN_MODE_DIRECT case flags.SignModeLegacyAminoJSON: signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON case flags.SignModeDirectAux: signMode = signing.SignMode_SIGN_MODE_DIRECT_AUX case flags.SignModeTextual: signMode = signing.SignMode_SIGN_MODE_TEXTUAL case flags.SignModeEIP191: signMode = signing.SignMode_SIGN_MODE_EIP_191 } var accNum, accSeq uint64 if clientCtx.Offline { if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) { accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber) accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence) } else { return Factory{}, errors.New("account-number and sequence must be set in offline mode") } } gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment) memo := clientCtx.Viper.GetString(flags.FlagNote) timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight) gasStr := clientCtx.Viper.GetString(flags.FlagGas) gasSetting, _ := flags.ParseGasSetting(gasStr) f := Factory{ txConfig: clientCtx.TxConfig, accountRetriever: clientCtx.AccountRetriever, keybase: clientCtx.Keyring, chainID: clientCtx.ChainID, offline: clientCtx.Offline, generateOnly: clientCtx.GenerateOnly, gas: gasSetting.Gas, simulateAndExecute: gasSetting.Simulate, accountNumber: accNum, sequence: accSeq, timeoutHeight: timeoutHeight, gasAdjustment: gasAdj, memo: memo, signMode: signMode, feeGranter: clientCtx.FeeGranter, feePayer: clientCtx.FeePayer, } feesStr := clientCtx.Viper.GetString(flags.FlagFees) f = f.WithFees(feesStr) tipsStr := clientCtx.Viper.GetString(flags.FlagTip) // Add tips to factory. The tipper is necessarily the Msg signer, i.e. // the from address. f = f.WithTips(tipsStr, clientCtx.FromAddress.String()) gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices) f = f.WithGasPrices(gasPricesStr) f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook) return f, nil } func (f Factory) AccountNumber() uint64 { return f.accountNumber } func (f Factory) Sequence() uint64 { return f.sequence } func (f Factory) Gas() uint64 { return f.gas } func (f Factory) GasAdjustment() float64 { return f.gasAdjustment } func (f Factory) Keybase() keyring.Keyring { return f.keybase } func (f Factory) ChainID() string { return f.chainID } func (f Factory) Memo() string { return f.memo } func (f Factory) Fees() sdk.Coins { return f.fees } func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices } func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever } func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight } // SimulateAndExecute returns the option to simulate and then execute the transaction // using the gas from the simulation results func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute } // WithTxConfig returns a copy of the Factory with an updated TxConfig. func (f Factory) WithTxConfig(g client.TxConfig) Factory { f.txConfig = g return f } // WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever. func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory { f.accountRetriever = ar return f } // WithChainID returns a copy of the Factory with an updated chainID. func (f Factory) WithChainID(chainID string) Factory { f.chainID = chainID return f } // WithGas returns a copy of the Factory with an updated gas value. func (f Factory) WithGas(gas uint64) Factory { f.gas = gas return f } // WithFees returns a copy of the Factory with an updated fee. func (f Factory) WithFees(fees string) Factory { parsedFees, err := sdk.ParseCoinsNormalized(fees) if err != nil { panic(err) } f.fees = parsedFees return f } // WithTips returns a copy of the Factory with an updated tip. func (f Factory) WithTips(tip, tipper string) Factory { parsedTips, err := sdk.ParseCoinsNormalized(tip) if err != nil { panic(err) } f.tip = &tx.Tip{ Tipper: tipper, Amount: parsedTips, } return f } // WithGasPrices returns a copy of the Factory with updated gas prices. func (f Factory) WithGasPrices(gasPrices string) Factory { parsedGasPrices, err := sdk.ParseDecCoins(gasPrices) if err != nil { panic(err) } f.gasPrices = parsedGasPrices return f } // WithKeybase returns a copy of the Factory with updated Keybase. func (f Factory) WithKeybase(keybase keyring.Keyring) Factory { f.keybase = keybase return f } // WithSequence returns a copy of the Factory with an updated sequence number. func (f Factory) WithSequence(sequence uint64) Factory { f.sequence = sequence return f } // WithMemo returns a copy of the Factory with an updated memo. func (f Factory) WithMemo(memo string) Factory { f.memo = memo return f } // WithAccountNumber returns a copy of the Factory with an updated account number. func (f Factory) WithAccountNumber(accnum uint64) Factory { f.accountNumber = accnum return f } // WithGasAdjustment returns a copy of the Factory with an updated gas adjustment. func (f Factory) WithGasAdjustment(gasAdj float64) Factory { f.gasAdjustment = gasAdj return f } // WithSimulateAndExecute returns a copy of the Factory with an updated gas // simulation value. func (f Factory) WithSimulateAndExecute(sim bool) Factory { f.simulateAndExecute = sim return f } // SignMode returns the sign mode configured in the Factory func (f Factory) SignMode() signing.SignMode { return f.signMode } // WithSignMode returns a copy of the Factory with an updated sign mode value. func (f Factory) WithSignMode(mode signing.SignMode) Factory { f.signMode = mode return f } // WithTimeoutHeight returns a copy of the Factory with an updated timeout height. func (f Factory) WithTimeoutHeight(height uint64) Factory { f.timeoutHeight = height return f } // WithFeeGranter returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory { f.feeGranter = fg return f } // WithFeePayer returns a copy of the Factory with an updated fee granter. func (f Factory)
(fp sdk.AccAddress) Factory { f.feePayer = fp return f } // WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function, // allows for preprocessing of transaction data using the TxBuilder. func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory { f.preprocessTxHook = preprocessFn return f } // PreprocessTx calls the preprocessing hook with the factory parameters and // returns the result. func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error { if f.preprocessTxHook == nil { // Allow pass-through return nil } key, err := f.Keybase().Key(keyname) if err != nil { return fmt.Errorf("error retrieving key from keyring: %w", err) } return f.preprocessTxHook(f.chainID, key.GetType(), builder) } // WithExtensionOptions returns a Factory with given extension options added to the existing options, // Example to add dynamic fee extension options: // // extOpt := ethermint.ExtensionOptionDynamicFeeTx{ // MaxPriorityPrice: math.NewInt(1000000), // } // // extBytes, _ := extOpt.Marshal() // // extOpts := []*types.Any{ // { // TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx", // Value: extBytes, // }, // } // // txf.WithExtensionOptions(extOpts...) func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory { f.extOptions = extOpts return f } // BuildUnsignedTx builds a transaction to be signed given a set of messages. // Once created, the fee, memo, and messages are set. func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) { if f.offline && f.generateOnly { if f.chainID != "" { return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set") } } else if f.chainID == "" { return nil, fmt.Errorf("chain ID required but not specified") } fees := f.fees if !f.gasPrices.IsZero() { if !fees.IsZero() { return nil, errors.New("cannot provide both fees and gas prices") } glDec := math.LegacyNewDec(int64(f.gas)) // Derive the fees based on the provided gas prices, where // fee = ceil(gasPrice * gasLimit). fees = make(sdk.Coins, len(f.gasPrices)) for i, gp := range f.gasPrices { fee := gp.Amount.Mul(glDec) fees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt()) } } // Prevent simple inclusion of a valid mnemonic in the memo field if f.memo != "" && bip39.IsMnemonicValid(strings.ToLower(f.memo)) { return nil, errors.New("cannot provide a valid mnemonic seed in the memo field") } tx := f.txConfig.NewTxBuilder() if err := tx.SetMsgs(msgs...); err != nil { return nil, err } tx.SetMemo(f.memo) tx.SetFeeAmount(fees) tx.SetGasLimit(f.gas) tx.SetFeeGranter(f.feeGranter) tx.SetFeePayer(f.feePayer) tx.SetTimeoutHeight(f.TimeoutHeight()) if etx, ok := tx.(client.ExtendedTxBuilder); ok { etx.SetExtensionOptions(f.extOptions...) } return tx, nil } // PrintUnsignedTx will generate an unsigned transaction and print it to the writer // specified by ctx.Output. If simulation was requested, the gas will be // simulated and also printed to the same writer before the transaction is // printed. func (f Factory) PrintUnsignedTx(clientCtx client.Context, msgs ...sdk.Msg) error { if f.SimulateAndExecute() { if clientCtx.Offline { return errors.New("cannot estimate gas in offline mode") } // Prepare TxFactory with acc & seq numbers as CalculateGas requires // account and sequence numbers to be set preparedTxf, err := f.Prepare(clientCtx) if err != nil { return err } _, adjusted, err := CalculateGas(clientCtx, preparedTxf, msgs...) if err != nil { return err } f = f.WithGas(adjusted) _, _ = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{GasEstimate: f.Gas()}) } unsignedTx, err := f.BuildUnsignedTx(msgs...) if err != nil { return err } json, err := clientCtx.TxConfig.TxJSONEncoder()(unsignedTx.GetTx()) if err != nil { return err } return clientCtx.PrintString(fmt.Sprintf("%s\n", json)) } // BuildSimTx creates an unsigned tx with an empty single signature and returns // the encoded transaction or an error if the unsigned transaction cannot be // built. func (f Factory) BuildSimTx(msgs ...sdk.Msg) ([]byte, error) { txb, err := f.BuildUnsignedTx(msgs...) if err != nil { return nil, err } pk, err := f.getSimPK() if err != nil { return nil, err } // Create an empty signature literal as the ante handler will populate with a // sentinel pubkey. sig := signing.SignatureV2{ PubKey: pk, Data: &signing.SingleSignatureData{ SignMode: f.signMode, }, Sequence: f.Sequence(), } if err := txb.SetSignatures(sig); err != nil { return nil, err } return f.txConfig.TxEncoder()(txb.GetTx()) } // getSimPK gets the public key to use for building a simulation tx. // Note, we should only check for keys in the keybase if we are in simulate and execute mode, // e.g. when using --gas=auto. // When using --dry-run, we are is simulation mode only and should not check the keybase. // Ref: https://github.com/cosmos/cosmos-sdk/issues/11283 func (f Factory) getSimPK() (cryptotypes.PubKey, error) { var ( ok bool pk cryptotypes.PubKey = &secp256k1.PubKey{} // use default public key type ) // Use the first element from the list of keys in order to generate a valid // pubkey that supports multiple algorithms. if f.simulateAndExecute && f.keybase != nil { records, _ := f.keybase.List() if len(records) == 0 { return nil, errors.New("cannot build signature for simulation, key records slice is empty") } // take the first record just for simulation purposes pk, ok = records[0].PubKey.GetCachedValue().(cryptotypes.PubKey) if !ok { return nil, errors.New("cannot build signature for simulation, failed to convert proto Any to public key") } } return pk, nil } // Prepare ensures the account defined by ctx.GetFromAddress() exists and // if the account number and/or the account sequence number are zero (not set), // they will be queried for and set on the provided Factory. // A new Factory with the updated fields will be returned. // Note: When in offline mode, the Prepare does nothing and returns the original factory. func (f Factory) Prepare(clientCtx client.Context) (Factory, error) { if clientCtx.Offline { return f, nil } fc := f from := clientCtx.GetFromAddress() if err := fc.accountRetriever.EnsureExists(clientCtx, from); err != nil { return fc, err } initNum, initSeq := fc.accountNumber, fc.sequence if initNum == 0 || initSeq == 0 { num, seq, err := fc.accountRetriever.GetAccountNumberSequence(clientCtx, from) if err != nil { return fc, err } if initNum == 0 { fc = fc.WithAccountNumber(num) } if initSeq == 0 { fc = fc.WithSequence(seq) } } return fc, nil }
WithFeePayer
identifier_name
factory.go
package tx import ( "errors" "fmt" "os" "strings" "github.com/cosmos/go-bip39" "github.com/spf13/pflag" "github.com/spf13/viper" "cosmossdk.io/math" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/tx" "github.com/cosmos/cosmos-sdk/types/tx/signing" ) // Factory defines a client transaction factory that facilitates generating and // signing an application-specific transaction. type Factory struct { keybase keyring.Keyring txConfig client.TxConfig accountRetriever client.AccountRetriever accountNumber uint64 sequence uint64 gas uint64 timeoutHeight uint64 gasAdjustment float64 chainID string offline bool generateOnly bool memo string fees sdk.Coins tip *tx.Tip feeGranter sdk.AccAddress feePayer sdk.AccAddress gasPrices sdk.DecCoins extOptions []*codectypes.Any signMode signing.SignMode simulateAndExecute bool preprocessTxHook client.PreprocessTxFn } // NewFactoryCLI creates a new Factory. func NewFactoryCLI(clientCtx client.Context, flagSet *pflag.FlagSet) (Factory, error) { if clientCtx.Viper == nil { clientCtx.Viper = viper.New() } if err := clientCtx.Viper.BindPFlags(flagSet); err != nil { return Factory{}, fmt.Errorf("failed to bind flags to viper: %w", err) } signModeStr := clientCtx.SignModeStr signMode := signing.SignMode_SIGN_MODE_UNSPECIFIED switch signModeStr { case flags.SignModeDirect: signMode = signing.SignMode_SIGN_MODE_DIRECT case flags.SignModeLegacyAminoJSON: signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON case flags.SignModeDirectAux: signMode = signing.SignMode_SIGN_MODE_DIRECT_AUX case flags.SignModeTextual: signMode = signing.SignMode_SIGN_MODE_TEXTUAL case flags.SignModeEIP191: signMode = signing.SignMode_SIGN_MODE_EIP_191 } var accNum, accSeq uint64 if clientCtx.Offline { if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) { accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber) accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence) } else { return Factory{}, errors.New("account-number and sequence must be set in offline mode") } } gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment) memo := clientCtx.Viper.GetString(flags.FlagNote) timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight) gasStr := clientCtx.Viper.GetString(flags.FlagGas) gasSetting, _ := flags.ParseGasSetting(gasStr) f := Factory{ txConfig: clientCtx.TxConfig, accountRetriever: clientCtx.AccountRetriever, keybase: clientCtx.Keyring, chainID: clientCtx.ChainID, offline: clientCtx.Offline, generateOnly: clientCtx.GenerateOnly, gas: gasSetting.Gas, simulateAndExecute: gasSetting.Simulate, accountNumber: accNum, sequence: accSeq, timeoutHeight: timeoutHeight, gasAdjustment: gasAdj, memo: memo, signMode: signMode, feeGranter: clientCtx.FeeGranter, feePayer: clientCtx.FeePayer, } feesStr := clientCtx.Viper.GetString(flags.FlagFees) f = f.WithFees(feesStr) tipsStr := clientCtx.Viper.GetString(flags.FlagTip) // Add tips to factory. The tipper is necessarily the Msg signer, i.e. // the from address. f = f.WithTips(tipsStr, clientCtx.FromAddress.String()) gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices) f = f.WithGasPrices(gasPricesStr) f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook) return f, nil } func (f Factory) AccountNumber() uint64 { return f.accountNumber } func (f Factory) Sequence() uint64 { return f.sequence } func (f Factory) Gas() uint64 { return f.gas } func (f Factory) GasAdjustment() float64 { return f.gasAdjustment } func (f Factory) Keybase() keyring.Keyring { return f.keybase } func (f Factory) ChainID() string { return f.chainID } func (f Factory) Memo() string { return f.memo } func (f Factory) Fees() sdk.Coins { return f.fees } func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices } func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever } func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight } // SimulateAndExecute returns the option to simulate and then execute the transaction // using the gas from the simulation results func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute } // WithTxConfig returns a copy of the Factory with an updated TxConfig. func (f Factory) WithTxConfig(g client.TxConfig) Factory { f.txConfig = g return f } // WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever. func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory { f.accountRetriever = ar return f } // WithChainID returns a copy of the Factory with an updated chainID. func (f Factory) WithChainID(chainID string) Factory { f.chainID = chainID return f } // WithGas returns a copy of the Factory with an updated gas value. func (f Factory) WithGas(gas uint64) Factory { f.gas = gas return f } // WithFees returns a copy of the Factory with an updated fee. func (f Factory) WithFees(fees string) Factory { parsedFees, err := sdk.ParseCoinsNormalized(fees) if err != nil { panic(err) } f.fees = parsedFees return f } // WithTips returns a copy of the Factory with an updated tip. func (f Factory) WithTips(tip, tipper string) Factory { parsedTips, err := sdk.ParseCoinsNormalized(tip) if err != nil { panic(err) } f.tip = &tx.Tip{ Tipper: tipper, Amount: parsedTips, } return f } // WithGasPrices returns a copy of the Factory with updated gas prices. func (f Factory) WithGasPrices(gasPrices string) Factory { parsedGasPrices, err := sdk.ParseDecCoins(gasPrices) if err != nil { panic(err) } f.gasPrices = parsedGasPrices return f } // WithKeybase returns a copy of the Factory with updated Keybase. func (f Factory) WithKeybase(keybase keyring.Keyring) Factory { f.keybase = keybase return f } // WithSequence returns a copy of the Factory with an updated sequence number. func (f Factory) WithSequence(sequence uint64) Factory { f.sequence = sequence return f } // WithMemo returns a copy of the Factory with an updated memo. func (f Factory) WithMemo(memo string) Factory { f.memo = memo return f } // WithAccountNumber returns a copy of the Factory with an updated account number. func (f Factory) WithAccountNumber(accnum uint64) Factory { f.accountNumber = accnum return f } // WithGasAdjustment returns a copy of the Factory with an updated gas adjustment. func (f Factory) WithGasAdjustment(gasAdj float64) Factory { f.gasAdjustment = gasAdj return f } // WithSimulateAndExecute returns a copy of the Factory with an updated gas // simulation value. func (f Factory) WithSimulateAndExecute(sim bool) Factory { f.simulateAndExecute = sim return f } // SignMode returns the sign mode configured in the Factory func (f Factory) SignMode() signing.SignMode { return f.signMode } // WithSignMode returns a copy of the Factory with an updated sign mode value. func (f Factory) WithSignMode(mode signing.SignMode) Factory { f.signMode = mode return f } // WithTimeoutHeight returns a copy of the Factory with an updated timeout height. func (f Factory) WithTimeoutHeight(height uint64) Factory { f.timeoutHeight = height return f } // WithFeeGranter returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory
// WithFeePayer returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeePayer(fp sdk.AccAddress) Factory { f.feePayer = fp return f } // WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function, // allows for preprocessing of transaction data using the TxBuilder. func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory { f.preprocessTxHook = preprocessFn return f } // PreprocessTx calls the preprocessing hook with the factory parameters and // returns the result. func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error { if f.preprocessTxHook == nil { // Allow pass-through return nil } key, err := f.Keybase().Key(keyname) if err != nil { return fmt.Errorf("error retrieving key from keyring: %w", err) } return f.preprocessTxHook(f.chainID, key.GetType(), builder) } // WithExtensionOptions returns a Factory with given extension options added to the existing options, // Example to add dynamic fee extension options: // // extOpt := ethermint.ExtensionOptionDynamicFeeTx{ // MaxPriorityPrice: math.NewInt(1000000), // } // // extBytes, _ := extOpt.Marshal() // // extOpts := []*types.Any{ // { // TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx", // Value: extBytes, // }, // } // // txf.WithExtensionOptions(extOpts...) func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory { f.extOptions = extOpts return f } // BuildUnsignedTx builds a transaction to be signed given a set of messages. // Once created, the fee, memo, and messages are set. func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) { if f.offline && f.generateOnly { if f.chainID != "" { return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set") } } else if f.chainID == "" { return nil, fmt.Errorf("chain ID required but not specified") } fees := f.fees if !f.gasPrices.IsZero() { if !fees.IsZero() { return nil, errors.New("cannot provide both fees and gas prices") } glDec := math.LegacyNewDec(int64(f.gas)) // Derive the fees based on the provided gas prices, where // fee = ceil(gasPrice * gasLimit). fees = make(sdk.Coins, len(f.gasPrices)) for i, gp := range f.gasPrices { fee := gp.Amount.Mul(glDec) fees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt()) } } // Prevent simple inclusion of a valid mnemonic in the memo field if f.memo != "" && bip39.IsMnemonicValid(strings.ToLower(f.memo)) { return nil, errors.New("cannot provide a valid mnemonic seed in the memo field") } tx := f.txConfig.NewTxBuilder() if err := tx.SetMsgs(msgs...); err != nil { return nil, err } tx.SetMemo(f.memo) tx.SetFeeAmount(fees) tx.SetGasLimit(f.gas) tx.SetFeeGranter(f.feeGranter) tx.SetFeePayer(f.feePayer) tx.SetTimeoutHeight(f.TimeoutHeight()) if etx, ok := tx.(client.ExtendedTxBuilder); ok { etx.SetExtensionOptions(f.extOptions...) } return tx, nil } // PrintUnsignedTx will generate an unsigned transaction and print it to the writer // specified by ctx.Output. If simulation was requested, the gas will be // simulated and also printed to the same writer before the transaction is // printed. func (f Factory) PrintUnsignedTx(clientCtx client.Context, msgs ...sdk.Msg) error { if f.SimulateAndExecute() { if clientCtx.Offline { return errors.New("cannot estimate gas in offline mode") } // Prepare TxFactory with acc & seq numbers as CalculateGas requires // account and sequence numbers to be set preparedTxf, err := f.Prepare(clientCtx) if err != nil { return err } _, adjusted, err := CalculateGas(clientCtx, preparedTxf, msgs...) if err != nil { return err } f = f.WithGas(adjusted) _, _ = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{GasEstimate: f.Gas()}) } unsignedTx, err := f.BuildUnsignedTx(msgs...) if err != nil { return err } json, err := clientCtx.TxConfig.TxJSONEncoder()(unsignedTx.GetTx()) if err != nil { return err } return clientCtx.PrintString(fmt.Sprintf("%s\n", json)) } // BuildSimTx creates an unsigned tx with an empty single signature and returns // the encoded transaction or an error if the unsigned transaction cannot be // built. func (f Factory) BuildSimTx(msgs ...sdk.Msg) ([]byte, error) { txb, err := f.BuildUnsignedTx(msgs...) if err != nil { return nil, err } pk, err := f.getSimPK() if err != nil { return nil, err } // Create an empty signature literal as the ante handler will populate with a // sentinel pubkey. sig := signing.SignatureV2{ PubKey: pk, Data: &signing.SingleSignatureData{ SignMode: f.signMode, }, Sequence: f.Sequence(), } if err := txb.SetSignatures(sig); err != nil { return nil, err } return f.txConfig.TxEncoder()(txb.GetTx()) } // getSimPK gets the public key to use for building a simulation tx. // Note, we should only check for keys in the keybase if we are in simulate and execute mode, // e.g. when using --gas=auto. // When using --dry-run, we are is simulation mode only and should not check the keybase. // Ref: https://github.com/cosmos/cosmos-sdk/issues/11283 func (f Factory) getSimPK() (cryptotypes.PubKey, error) { var ( ok bool pk cryptotypes.PubKey = &secp256k1.PubKey{} // use default public key type ) // Use the first element from the list of keys in order to generate a valid // pubkey that supports multiple algorithms. if f.simulateAndExecute && f.keybase != nil { records, _ := f.keybase.List() if len(records) == 0 { return nil, errors.New("cannot build signature for simulation, key records slice is empty") } // take the first record just for simulation purposes pk, ok = records[0].PubKey.GetCachedValue().(cryptotypes.PubKey) if !ok { return nil, errors.New("cannot build signature for simulation, failed to convert proto Any to public key") } } return pk, nil } // Prepare ensures the account defined by ctx.GetFromAddress() exists and // if the account number and/or the account sequence number are zero (not set), // they will be queried for and set on the provided Factory. // A new Factory with the updated fields will be returned. // Note: When in offline mode, the Prepare does nothing and returns the original factory. func (f Factory) Prepare(clientCtx client.Context) (Factory, error) { if clientCtx.Offline { return f, nil } fc := f from := clientCtx.GetFromAddress() if err := fc.accountRetriever.EnsureExists(clientCtx, from); err != nil { return fc, err } initNum, initSeq := fc.accountNumber, fc.sequence if initNum == 0 || initSeq == 0 { num, seq, err := fc.accountRetriever.GetAccountNumberSequence(clientCtx, from) if err != nil { return fc, err } if initNum == 0 { fc = fc.WithAccountNumber(num) } if initSeq == 0 { fc = fc.WithSequence(seq) } } return fc, nil }
{ f.feeGranter = fg return f }
identifier_body
factory.go
package tx import ( "errors" "fmt" "os" "strings" "github.com/cosmos/go-bip39" "github.com/spf13/pflag" "github.com/spf13/viper" "cosmossdk.io/math" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/tx" "github.com/cosmos/cosmos-sdk/types/tx/signing" ) // Factory defines a client transaction factory that facilitates generating and // signing an application-specific transaction. type Factory struct { keybase keyring.Keyring txConfig client.TxConfig accountRetriever client.AccountRetriever accountNumber uint64 sequence uint64 gas uint64 timeoutHeight uint64 gasAdjustment float64 chainID string offline bool generateOnly bool memo string fees sdk.Coins tip *tx.Tip
feePayer sdk.AccAddress gasPrices sdk.DecCoins extOptions []*codectypes.Any signMode signing.SignMode simulateAndExecute bool preprocessTxHook client.PreprocessTxFn } // NewFactoryCLI creates a new Factory. func NewFactoryCLI(clientCtx client.Context, flagSet *pflag.FlagSet) (Factory, error) { if clientCtx.Viper == nil { clientCtx.Viper = viper.New() } if err := clientCtx.Viper.BindPFlags(flagSet); err != nil { return Factory{}, fmt.Errorf("failed to bind flags to viper: %w", err) } signModeStr := clientCtx.SignModeStr signMode := signing.SignMode_SIGN_MODE_UNSPECIFIED switch signModeStr { case flags.SignModeDirect: signMode = signing.SignMode_SIGN_MODE_DIRECT case flags.SignModeLegacyAminoJSON: signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON case flags.SignModeDirectAux: signMode = signing.SignMode_SIGN_MODE_DIRECT_AUX case flags.SignModeTextual: signMode = signing.SignMode_SIGN_MODE_TEXTUAL case flags.SignModeEIP191: signMode = signing.SignMode_SIGN_MODE_EIP_191 } var accNum, accSeq uint64 if clientCtx.Offline { if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) { accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber) accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence) } else { return Factory{}, errors.New("account-number and sequence must be set in offline mode") } } gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment) memo := clientCtx.Viper.GetString(flags.FlagNote) timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight) gasStr := clientCtx.Viper.GetString(flags.FlagGas) gasSetting, _ := flags.ParseGasSetting(gasStr) f := Factory{ txConfig: clientCtx.TxConfig, accountRetriever: clientCtx.AccountRetriever, keybase: clientCtx.Keyring, chainID: clientCtx.ChainID, offline: clientCtx.Offline, generateOnly: clientCtx.GenerateOnly, gas: gasSetting.Gas, simulateAndExecute: gasSetting.Simulate, accountNumber: accNum, sequence: accSeq, timeoutHeight: timeoutHeight, gasAdjustment: gasAdj, memo: memo, signMode: signMode, feeGranter: clientCtx.FeeGranter, feePayer: clientCtx.FeePayer, } feesStr := clientCtx.Viper.GetString(flags.FlagFees) f = f.WithFees(feesStr) tipsStr := clientCtx.Viper.GetString(flags.FlagTip) // Add tips to factory. The tipper is necessarily the Msg signer, i.e. // the from address. f = f.WithTips(tipsStr, clientCtx.FromAddress.String()) gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices) f = f.WithGasPrices(gasPricesStr) f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook) return f, nil } func (f Factory) AccountNumber() uint64 { return f.accountNumber } func (f Factory) Sequence() uint64 { return f.sequence } func (f Factory) Gas() uint64 { return f.gas } func (f Factory) GasAdjustment() float64 { return f.gasAdjustment } func (f Factory) Keybase() keyring.Keyring { return f.keybase } func (f Factory) ChainID() string { return f.chainID } func (f Factory) Memo() string { return f.memo } func (f Factory) Fees() sdk.Coins { return f.fees } func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices } func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever } func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight } // SimulateAndExecute returns the option to simulate and then execute the transaction // using the gas from the simulation results func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute } // WithTxConfig returns a copy of the Factory with an updated TxConfig. func (f Factory) WithTxConfig(g client.TxConfig) Factory { f.txConfig = g return f } // WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever. func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory { f.accountRetriever = ar return f } // WithChainID returns a copy of the Factory with an updated chainID. func (f Factory) WithChainID(chainID string) Factory { f.chainID = chainID return f } // WithGas returns a copy of the Factory with an updated gas value. func (f Factory) WithGas(gas uint64) Factory { f.gas = gas return f } // WithFees returns a copy of the Factory with an updated fee. func (f Factory) WithFees(fees string) Factory { parsedFees, err := sdk.ParseCoinsNormalized(fees) if err != nil { panic(err) } f.fees = parsedFees return f } // WithTips returns a copy of the Factory with an updated tip. func (f Factory) WithTips(tip, tipper string) Factory { parsedTips, err := sdk.ParseCoinsNormalized(tip) if err != nil { panic(err) } f.tip = &tx.Tip{ Tipper: tipper, Amount: parsedTips, } return f } // WithGasPrices returns a copy of the Factory with updated gas prices. func (f Factory) WithGasPrices(gasPrices string) Factory { parsedGasPrices, err := sdk.ParseDecCoins(gasPrices) if err != nil { panic(err) } f.gasPrices = parsedGasPrices return f } // WithKeybase returns a copy of the Factory with updated Keybase. func (f Factory) WithKeybase(keybase keyring.Keyring) Factory { f.keybase = keybase return f } // WithSequence returns a copy of the Factory with an updated sequence number. func (f Factory) WithSequence(sequence uint64) Factory { f.sequence = sequence return f } // WithMemo returns a copy of the Factory with an updated memo. func (f Factory) WithMemo(memo string) Factory { f.memo = memo return f } // WithAccountNumber returns a copy of the Factory with an updated account number. func (f Factory) WithAccountNumber(accnum uint64) Factory { f.accountNumber = accnum return f } // WithGasAdjustment returns a copy of the Factory with an updated gas adjustment. func (f Factory) WithGasAdjustment(gasAdj float64) Factory { f.gasAdjustment = gasAdj return f } // WithSimulateAndExecute returns a copy of the Factory with an updated gas // simulation value. func (f Factory) WithSimulateAndExecute(sim bool) Factory { f.simulateAndExecute = sim return f } // SignMode returns the sign mode configured in the Factory func (f Factory) SignMode() signing.SignMode { return f.signMode } // WithSignMode returns a copy of the Factory with an updated sign mode value. func (f Factory) WithSignMode(mode signing.SignMode) Factory { f.signMode = mode return f } // WithTimeoutHeight returns a copy of the Factory with an updated timeout height. func (f Factory) WithTimeoutHeight(height uint64) Factory { f.timeoutHeight = height return f } // WithFeeGranter returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory { f.feeGranter = fg return f } // WithFeePayer returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeePayer(fp sdk.AccAddress) Factory { f.feePayer = fp return f } // WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function, // allows for preprocessing of transaction data using the TxBuilder. func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory { f.preprocessTxHook = preprocessFn return f } // PreprocessTx calls the preprocessing hook with the factory parameters and // returns the result. func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error { if f.preprocessTxHook == nil { // Allow pass-through return nil } key, err := f.Keybase().Key(keyname) if err != nil { return fmt.Errorf("error retrieving key from keyring: %w", err) } return f.preprocessTxHook(f.chainID, key.GetType(), builder) } // WithExtensionOptions returns a Factory with given extension options added to the existing options, // Example to add dynamic fee extension options: // // extOpt := ethermint.ExtensionOptionDynamicFeeTx{ // MaxPriorityPrice: math.NewInt(1000000), // } // // extBytes, _ := extOpt.Marshal() // // extOpts := []*types.Any{ // { // TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx", // Value: extBytes, // }, // } // // txf.WithExtensionOptions(extOpts...) func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory { f.extOptions = extOpts return f } // BuildUnsignedTx builds a transaction to be signed given a set of messages. // Once created, the fee, memo, and messages are set. func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) { if f.offline && f.generateOnly { if f.chainID != "" { return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set") } } else if f.chainID == "" { return nil, fmt.Errorf("chain ID required but not specified") } fees := f.fees if !f.gasPrices.IsZero() { if !fees.IsZero() { return nil, errors.New("cannot provide both fees and gas prices") } glDec := math.LegacyNewDec(int64(f.gas)) // Derive the fees based on the provided gas prices, where // fee = ceil(gasPrice * gasLimit). fees = make(sdk.Coins, len(f.gasPrices)) for i, gp := range f.gasPrices { fee := gp.Amount.Mul(glDec) fees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt()) } } // Prevent simple inclusion of a valid mnemonic in the memo field if f.memo != "" && bip39.IsMnemonicValid(strings.ToLower(f.memo)) { return nil, errors.New("cannot provide a valid mnemonic seed in the memo field") } tx := f.txConfig.NewTxBuilder() if err := tx.SetMsgs(msgs...); err != nil { return nil, err } tx.SetMemo(f.memo) tx.SetFeeAmount(fees) tx.SetGasLimit(f.gas) tx.SetFeeGranter(f.feeGranter) tx.SetFeePayer(f.feePayer) tx.SetTimeoutHeight(f.TimeoutHeight()) if etx, ok := tx.(client.ExtendedTxBuilder); ok { etx.SetExtensionOptions(f.extOptions...) } return tx, nil } // PrintUnsignedTx will generate an unsigned transaction and print it to the writer // specified by ctx.Output. If simulation was requested, the gas will be // simulated and also printed to the same writer before the transaction is // printed. func (f Factory) PrintUnsignedTx(clientCtx client.Context, msgs ...sdk.Msg) error { if f.SimulateAndExecute() { if clientCtx.Offline { return errors.New("cannot estimate gas in offline mode") } // Prepare TxFactory with acc & seq numbers as CalculateGas requires // account and sequence numbers to be set preparedTxf, err := f.Prepare(clientCtx) if err != nil { return err } _, adjusted, err := CalculateGas(clientCtx, preparedTxf, msgs...) if err != nil { return err } f = f.WithGas(adjusted) _, _ = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{GasEstimate: f.Gas()}) } unsignedTx, err := f.BuildUnsignedTx(msgs...) if err != nil { return err } json, err := clientCtx.TxConfig.TxJSONEncoder()(unsignedTx.GetTx()) if err != nil { return err } return clientCtx.PrintString(fmt.Sprintf("%s\n", json)) } // BuildSimTx creates an unsigned tx with an empty single signature and returns // the encoded transaction or an error if the unsigned transaction cannot be // built. func (f Factory) BuildSimTx(msgs ...sdk.Msg) ([]byte, error) { txb, err := f.BuildUnsignedTx(msgs...) if err != nil { return nil, err } pk, err := f.getSimPK() if err != nil { return nil, err } // Create an empty signature literal as the ante handler will populate with a // sentinel pubkey. sig := signing.SignatureV2{ PubKey: pk, Data: &signing.SingleSignatureData{ SignMode: f.signMode, }, Sequence: f.Sequence(), } if err := txb.SetSignatures(sig); err != nil { return nil, err } return f.txConfig.TxEncoder()(txb.GetTx()) } // getSimPK gets the public key to use for building a simulation tx. // Note, we should only check for keys in the keybase if we are in simulate and execute mode, // e.g. when using --gas=auto. // When using --dry-run, we are is simulation mode only and should not check the keybase. // Ref: https://github.com/cosmos/cosmos-sdk/issues/11283 func (f Factory) getSimPK() (cryptotypes.PubKey, error) { var ( ok bool pk cryptotypes.PubKey = &secp256k1.PubKey{} // use default public key type ) // Use the first element from the list of keys in order to generate a valid // pubkey that supports multiple algorithms. if f.simulateAndExecute && f.keybase != nil { records, _ := f.keybase.List() if len(records) == 0 { return nil, errors.New("cannot build signature for simulation, key records slice is empty") } // take the first record just for simulation purposes pk, ok = records[0].PubKey.GetCachedValue().(cryptotypes.PubKey) if !ok { return nil, errors.New("cannot build signature for simulation, failed to convert proto Any to public key") } } return pk, nil } // Prepare ensures the account defined by ctx.GetFromAddress() exists and // if the account number and/or the account sequence number are zero (not set), // they will be queried for and set on the provided Factory. // A new Factory with the updated fields will be returned. // Note: When in offline mode, the Prepare does nothing and returns the original factory. func (f Factory) Prepare(clientCtx client.Context) (Factory, error) { if clientCtx.Offline { return f, nil } fc := f from := clientCtx.GetFromAddress() if err := fc.accountRetriever.EnsureExists(clientCtx, from); err != nil { return fc, err } initNum, initSeq := fc.accountNumber, fc.sequence if initNum == 0 || initSeq == 0 { num, seq, err := fc.accountRetriever.GetAccountNumberSequence(clientCtx, from) if err != nil { return fc, err } if initNum == 0 { fc = fc.WithAccountNumber(num) } if initSeq == 0 { fc = fc.WithSequence(seq) } } return fc, nil }
feeGranter sdk.AccAddress
random_line_split
factory.go
package tx import ( "errors" "fmt" "os" "strings" "github.com/cosmos/go-bip39" "github.com/spf13/pflag" "github.com/spf13/viper" "cosmossdk.io/math" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keyring" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/tx" "github.com/cosmos/cosmos-sdk/types/tx/signing" ) // Factory defines a client transaction factory that facilitates generating and // signing an application-specific transaction. type Factory struct { keybase keyring.Keyring txConfig client.TxConfig accountRetriever client.AccountRetriever accountNumber uint64 sequence uint64 gas uint64 timeoutHeight uint64 gasAdjustment float64 chainID string offline bool generateOnly bool memo string fees sdk.Coins tip *tx.Tip feeGranter sdk.AccAddress feePayer sdk.AccAddress gasPrices sdk.DecCoins extOptions []*codectypes.Any signMode signing.SignMode simulateAndExecute bool preprocessTxHook client.PreprocessTxFn } // NewFactoryCLI creates a new Factory. func NewFactoryCLI(clientCtx client.Context, flagSet *pflag.FlagSet) (Factory, error) { if clientCtx.Viper == nil { clientCtx.Viper = viper.New() } if err := clientCtx.Viper.BindPFlags(flagSet); err != nil { return Factory{}, fmt.Errorf("failed to bind flags to viper: %w", err) } signModeStr := clientCtx.SignModeStr signMode := signing.SignMode_SIGN_MODE_UNSPECIFIED switch signModeStr { case flags.SignModeDirect: signMode = signing.SignMode_SIGN_MODE_DIRECT case flags.SignModeLegacyAminoJSON: signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON case flags.SignModeDirectAux: signMode = signing.SignMode_SIGN_MODE_DIRECT_AUX case flags.SignModeTextual: signMode = signing.SignMode_SIGN_MODE_TEXTUAL case flags.SignModeEIP191: signMode = signing.SignMode_SIGN_MODE_EIP_191 } var accNum, accSeq uint64 if clientCtx.Offline { if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) { accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber) accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence) } else { return Factory{}, errors.New("account-number and sequence must be set in offline mode") } } gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment) memo := clientCtx.Viper.GetString(flags.FlagNote) timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight) gasStr := clientCtx.Viper.GetString(flags.FlagGas) gasSetting, _ := flags.ParseGasSetting(gasStr) f := Factory{ txConfig: clientCtx.TxConfig, accountRetriever: clientCtx.AccountRetriever, keybase: clientCtx.Keyring, chainID: clientCtx.ChainID, offline: clientCtx.Offline, generateOnly: clientCtx.GenerateOnly, gas: gasSetting.Gas, simulateAndExecute: gasSetting.Simulate, accountNumber: accNum, sequence: accSeq, timeoutHeight: timeoutHeight, gasAdjustment: gasAdj, memo: memo, signMode: signMode, feeGranter: clientCtx.FeeGranter, feePayer: clientCtx.FeePayer, } feesStr := clientCtx.Viper.GetString(flags.FlagFees) f = f.WithFees(feesStr) tipsStr := clientCtx.Viper.GetString(flags.FlagTip) // Add tips to factory. The tipper is necessarily the Msg signer, i.e. // the from address. f = f.WithTips(tipsStr, clientCtx.FromAddress.String()) gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices) f = f.WithGasPrices(gasPricesStr) f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook) return f, nil } func (f Factory) AccountNumber() uint64 { return f.accountNumber } func (f Factory) Sequence() uint64 { return f.sequence } func (f Factory) Gas() uint64 { return f.gas } func (f Factory) GasAdjustment() float64 { return f.gasAdjustment } func (f Factory) Keybase() keyring.Keyring { return f.keybase } func (f Factory) ChainID() string { return f.chainID } func (f Factory) Memo() string { return f.memo } func (f Factory) Fees() sdk.Coins { return f.fees } func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices } func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever } func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight } // SimulateAndExecute returns the option to simulate and then execute the transaction // using the gas from the simulation results func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute } // WithTxConfig returns a copy of the Factory with an updated TxConfig. func (f Factory) WithTxConfig(g client.TxConfig) Factory { f.txConfig = g return f } // WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever. func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory { f.accountRetriever = ar return f } // WithChainID returns a copy of the Factory with an updated chainID. func (f Factory) WithChainID(chainID string) Factory { f.chainID = chainID return f } // WithGas returns a copy of the Factory with an updated gas value. func (f Factory) WithGas(gas uint64) Factory { f.gas = gas return f } // WithFees returns a copy of the Factory with an updated fee. func (f Factory) WithFees(fees string) Factory { parsedFees, err := sdk.ParseCoinsNormalized(fees) if err != nil { panic(err) } f.fees = parsedFees return f } // WithTips returns a copy of the Factory with an updated tip. func (f Factory) WithTips(tip, tipper string) Factory { parsedTips, err := sdk.ParseCoinsNormalized(tip) if err != nil { panic(err) } f.tip = &tx.Tip{ Tipper: tipper, Amount: parsedTips, } return f } // WithGasPrices returns a copy of the Factory with updated gas prices. func (f Factory) WithGasPrices(gasPrices string) Factory { parsedGasPrices, err := sdk.ParseDecCoins(gasPrices) if err != nil { panic(err) } f.gasPrices = parsedGasPrices return f } // WithKeybase returns a copy of the Factory with updated Keybase. func (f Factory) WithKeybase(keybase keyring.Keyring) Factory { f.keybase = keybase return f } // WithSequence returns a copy of the Factory with an updated sequence number. func (f Factory) WithSequence(sequence uint64) Factory { f.sequence = sequence return f } // WithMemo returns a copy of the Factory with an updated memo. func (f Factory) WithMemo(memo string) Factory { f.memo = memo return f } // WithAccountNumber returns a copy of the Factory with an updated account number. func (f Factory) WithAccountNumber(accnum uint64) Factory { f.accountNumber = accnum return f } // WithGasAdjustment returns a copy of the Factory with an updated gas adjustment. func (f Factory) WithGasAdjustment(gasAdj float64) Factory { f.gasAdjustment = gasAdj return f } // WithSimulateAndExecute returns a copy of the Factory with an updated gas // simulation value. func (f Factory) WithSimulateAndExecute(sim bool) Factory { f.simulateAndExecute = sim return f } // SignMode returns the sign mode configured in the Factory func (f Factory) SignMode() signing.SignMode { return f.signMode } // WithSignMode returns a copy of the Factory with an updated sign mode value. func (f Factory) WithSignMode(mode signing.SignMode) Factory { f.signMode = mode return f } // WithTimeoutHeight returns a copy of the Factory with an updated timeout height. func (f Factory) WithTimeoutHeight(height uint64) Factory { f.timeoutHeight = height return f } // WithFeeGranter returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory { f.feeGranter = fg return f } // WithFeePayer returns a copy of the Factory with an updated fee granter. func (f Factory) WithFeePayer(fp sdk.AccAddress) Factory { f.feePayer = fp return f } // WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function, // allows for preprocessing of transaction data using the TxBuilder. func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory { f.preprocessTxHook = preprocessFn return f } // PreprocessTx calls the preprocessing hook with the factory parameters and // returns the result. func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error { if f.preprocessTxHook == nil { // Allow pass-through return nil } key, err := f.Keybase().Key(keyname) if err != nil { return fmt.Errorf("error retrieving key from keyring: %w", err) } return f.preprocessTxHook(f.chainID, key.GetType(), builder) } // WithExtensionOptions returns a Factory with given extension options added to the existing options, // Example to add dynamic fee extension options: // // extOpt := ethermint.ExtensionOptionDynamicFeeTx{ // MaxPriorityPrice: math.NewInt(1000000), // } // // extBytes, _ := extOpt.Marshal() // // extOpts := []*types.Any{ // { // TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx", // Value: extBytes, // }, // } // // txf.WithExtensionOptions(extOpts...) func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory { f.extOptions = extOpts return f } // BuildUnsignedTx builds a transaction to be signed given a set of messages. // Once created, the fee, memo, and messages are set. func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) { if f.offline && f.generateOnly { if f.chainID != "" { return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set") } } else if f.chainID == "" { return nil, fmt.Errorf("chain ID required but not specified") } fees := f.fees if !f.gasPrices.IsZero() { if !fees.IsZero() { return nil, errors.New("cannot provide both fees and gas prices") } glDec := math.LegacyNewDec(int64(f.gas)) // Derive the fees based on the provided gas prices, where // fee = ceil(gasPrice * gasLimit). fees = make(sdk.Coins, len(f.gasPrices)) for i, gp := range f.gasPrices { fee := gp.Amount.Mul(glDec) fees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt()) } } // Prevent simple inclusion of a valid mnemonic in the memo field if f.memo != "" && bip39.IsMnemonicValid(strings.ToLower(f.memo)) { return nil, errors.New("cannot provide a valid mnemonic seed in the memo field") } tx := f.txConfig.NewTxBuilder() if err := tx.SetMsgs(msgs...); err != nil { return nil, err } tx.SetMemo(f.memo) tx.SetFeeAmount(fees) tx.SetGasLimit(f.gas) tx.SetFeeGranter(f.feeGranter) tx.SetFeePayer(f.feePayer) tx.SetTimeoutHeight(f.TimeoutHeight()) if etx, ok := tx.(client.ExtendedTxBuilder); ok { etx.SetExtensionOptions(f.extOptions...) } return tx, nil } // PrintUnsignedTx will generate an unsigned transaction and print it to the writer // specified by ctx.Output. If simulation was requested, the gas will be // simulated and also printed to the same writer before the transaction is // printed. func (f Factory) PrintUnsignedTx(clientCtx client.Context, msgs ...sdk.Msg) error { if f.SimulateAndExecute() { if clientCtx.Offline { return errors.New("cannot estimate gas in offline mode") } // Prepare TxFactory with acc & seq numbers as CalculateGas requires // account and sequence numbers to be set preparedTxf, err := f.Prepare(clientCtx) if err != nil { return err } _, adjusted, err := CalculateGas(clientCtx, preparedTxf, msgs...) if err != nil { return err } f = f.WithGas(adjusted) _, _ = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{GasEstimate: f.Gas()}) } unsignedTx, err := f.BuildUnsignedTx(msgs...) if err != nil { return err } json, err := clientCtx.TxConfig.TxJSONEncoder()(unsignedTx.GetTx()) if err != nil { return err } return clientCtx.PrintString(fmt.Sprintf("%s\n", json)) } // BuildSimTx creates an unsigned tx with an empty single signature and returns // the encoded transaction or an error if the unsigned transaction cannot be // built. func (f Factory) BuildSimTx(msgs ...sdk.Msg) ([]byte, error) { txb, err := f.BuildUnsignedTx(msgs...) if err != nil { return nil, err } pk, err := f.getSimPK() if err != nil { return nil, err } // Create an empty signature literal as the ante handler will populate with a // sentinel pubkey. sig := signing.SignatureV2{ PubKey: pk, Data: &signing.SingleSignatureData{ SignMode: f.signMode, }, Sequence: f.Sequence(), } if err := txb.SetSignatures(sig); err != nil { return nil, err } return f.txConfig.TxEncoder()(txb.GetTx()) } // getSimPK gets the public key to use for building a simulation tx. // Note, we should only check for keys in the keybase if we are in simulate and execute mode, // e.g. when using --gas=auto. // When using --dry-run, we are is simulation mode only and should not check the keybase. // Ref: https://github.com/cosmos/cosmos-sdk/issues/11283 func (f Factory) getSimPK() (cryptotypes.PubKey, error) { var ( ok bool pk cryptotypes.PubKey = &secp256k1.PubKey{} // use default public key type ) // Use the first element from the list of keys in order to generate a valid // pubkey that supports multiple algorithms. if f.simulateAndExecute && f.keybase != nil { records, _ := f.keybase.List() if len(records) == 0 { return nil, errors.New("cannot build signature for simulation, key records slice is empty") } // take the first record just for simulation purposes pk, ok = records[0].PubKey.GetCachedValue().(cryptotypes.PubKey) if !ok { return nil, errors.New("cannot build signature for simulation, failed to convert proto Any to public key") } } return pk, nil } // Prepare ensures the account defined by ctx.GetFromAddress() exists and // if the account number and/or the account sequence number are zero (not set), // they will be queried for and set on the provided Factory. // A new Factory with the updated fields will be returned. // Note: When in offline mode, the Prepare does nothing and returns the original factory. func (f Factory) Prepare(clientCtx client.Context) (Factory, error) { if clientCtx.Offline { return f, nil } fc := f from := clientCtx.GetFromAddress() if err := fc.accountRetriever.EnsureExists(clientCtx, from); err != nil { return fc, err } initNum, initSeq := fc.accountNumber, fc.sequence if initNum == 0 || initSeq == 0 { num, seq, err := fc.accountRetriever.GetAccountNumberSequence(clientCtx, from) if err != nil { return fc, err } if initNum == 0 { fc = fc.WithAccountNumber(num) } if initSeq == 0
} return fc, nil }
{ fc = fc.WithSequence(seq) }
conditional_block
mgclarge.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Page heap. // // See malloc.go for the general overview. // // Allocation policy is the subject of this file. All free spans live in // a treap for most of their time being free. See // https://en.wikipedia.org/wiki/Treap or // https://faculty.washington.edu/aragon/pubs/rst89.pdf for an overview. // sema.go also holds an implementation of a treap. // // Each treapNode holds a single span. The treap is sorted by base address // and each span necessarily has a unique base address. // Spans are returned based on a first-fit algorithm, acquiring the span // with the lowest base address which still satisfies the request. // // The first-fit algorithm is possible due to an augmentation of each // treapNode to maintain the size of the largest span in the subtree rooted // at that treapNode. Below we refer to this invariant as the maxPages // invariant. // // The primary routines are // insert: adds a span to the treap // remove: removes the span from that treap that best fits the required size // removeSpan: which removes a specific span from the treap // // Whenever a pointer to a span which is owned by the treap is acquired, that // span must not be mutated. To mutate a span in the treap, remove it first. // // mheap_.lock must be held when manipulating this data structure. package runtime import ( "unsafe" ) //go:notinheap type mTreap struct { treap *treapNode unscavHugePages uintptr // number of unscavenged huge pages in the treap } //go:notinheap type treapNode struct { right *treapNode // all treapNodes > this treap node left *treapNode // all treapNodes < this treap node parent *treapNode // direct parent of this node, nil if root key uintptr // base address of the span, used as primary sort key span *mspan // span at base address key maxPages uintptr // the maximum size of any span in this subtree, including the root priority uint32 // random number used by treap algorithm to keep tree probabilistically balanced types treapIterFilter // the types of spans available in this subtree } // updateInvariants is a helper method which has a node recompute its own // maxPages and types values by looking at its own span as well as the // values of its direct children. // // Returns true if anything changed. func (t *treapNode) updateInvariants() bool { m, i := t.maxPages, t.types t.maxPages = t.span.npages t.types = t.span.treapFilter() if t.left != nil { t.types |= t.left.types if t.maxPages < t.left.maxPages { t.maxPages = t.left.maxPages } } if t.right != nil { t.types |= t.right.types if t.maxPages < t.right.maxPages { t.maxPages = t.right.maxPages } } return m != t.maxPages || i != t.types } // findMinimal finds the minimal (lowest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is functionally the same as (*mTreap).find, so see that // method for more details. func (t *treapNode) findMinimal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.left != nil && f.matches(t.left.types) { t = t.left } else if f.matches(t.span.treapFilter()) { break } else if t.right != nil && f.matches(t.right.types) { t = t.right } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // findMaximal finds the maximal (highest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is the logical inversion of findMinimal and just changes // the order of the left and right tests. func (t *treapNode) findMaximal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.right != nil && f.matches(t.right.types) { t = t.right } else if f.matches(t.span.treapFilter()) { break } else if t.left != nil && f.matches(t.left.types) { t = t.left } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // pred returns the predecessor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such predecessor exists. func (t *treapNode) pred(f treapIterFilter) *treapNode { if t.left != nil && f.matches(t.left.types) { // The node has a left subtree which contains at least one matching // node, find the maximal matching node in that subtree. return t.left.findMaximal(f) } // Lacking a left subtree, look to the parents. p := t // previous node t = t.parent for t != nil { // Walk up the tree until we find a node that has a left subtree // that we haven't already visited. if t.right == p { if f.matches(t.span.treapFilter()) { // If this node matches, then it's guaranteed to be the // predecessor since everything to its left is strictly // greater. return t } else if t.left != nil && f.matches(t.left.types) { // Failing the root of this subtree, if its left subtree has // something, that's where we'll find our predecessor. return t.left.findMaximal(f) } } p = t t = t.parent } // If the parent is nil, then we've hit the root without finding // a suitable left subtree containing the node (and the predecessor // wasn't on the path). Thus, there's no predecessor, so just return // nil. return nil } // succ returns the successor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such successor exists. func (t *treapNode) succ(f treapIterFilter) *treapNode { // See pred. This method is just the logical inversion of it. if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } p := t t = t.parent for t != nil { if t.left == p { if f.matches(t.span.treapFilter()) { return t } else if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } } p = t t = t.parent } return nil } // isSpanInTreap is handy for debugging. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) isSpanInTreap(s *mspan) bool { if t == nil { return false } return t.span == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s) } // walkTreap is handy for debugging and testing. // Starting at some treapnode t, for example the root, do a depth first preorder walk of // the tree executing fn at each treap node. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) walkTreap(fn func(tn *treapNode)) { if t == nil { return } fn(t) t.left.walkTreap(fn) t.right.walkTreap(fn) } // checkTreapNode when used in conjunction with walkTreap can usually detect a // poorly formed treap. func checkTreapNode(t *treapNode) { if t == nil { return } if t.span.next != nil || t.span.prev != nil || t.span.list != nil { throw("span may be on an mSpanList while simultaneously in the treap") } if t.span.base() != t.key { println("runtime: checkTreapNode treapNode t=", t, " t.key=", t.key, "t.span.base()=", t.span.base()) throw("why does span.base() and treap.key do not match?") } if t.left != nil && t.key < t.left.key { throw("found out-of-order spans in treap (left child has greater base address)") } if t.right != nil && t.key > t.right.key { throw("found out-of-order spans in treap (right child has lesser base address)") } } // validateInvariants is handy for debugging and testing. // It ensures that the various invariants on each treap node are // appropriately maintained throughout the treap by walking the // treap in a post-order manner. func (t *treapNode) validateInvariants() (uintptr, treapIterFilter) { if t == nil { return 0, 0 } leftMax, leftTypes := t.left.validateInvariants() rightMax, rightTypes := t.right.validateInvariants() max := t.span.npages if leftMax > max { max = leftMax } if rightMax > max { max = rightMax } if max != t.maxPages { println("runtime: t.maxPages=", t.maxPages, "want=", max) throw("maxPages invariant violated in treap") } typ := t.span.treapFilter() | leftTypes | rightTypes if typ != t.types { println("runtime: t.types=", t.types, "want=", typ) throw("types invariant violated in treap") } return max, typ } // treapIterType represents the type of iteration to perform // over the treap. Each different flag is represented by a bit // in the type, and types may be combined together by a bitwise // or operation. // // Note that only 5 bits are available for treapIterType, do not // use the 3 higher-order bits. This constraint is to allow for // expansion into a treapIterFilter, which is a uint32. type treapIterType uint8 const ( treapIterScav treapIterType = 1 << iota // scavenged spans treapIterHuge // spans containing at least one huge page treapIterBits = iota ) // treapIterFilter is a bitwise filter of different spans by binary // properties. Each bit of a treapIterFilter represents a unique // combination of bits set in a treapIterType, in other words, it // represents the power set of a treapIterType. // // The purpose of this representation is to allow the existence of // a specific span type to bubble up in the treap (see the types // field on treapNode). // // More specifically, any treapIterType may be transformed into a // treapIterFilter for a specific combination of flags via the // following operation: 1 << (0x1f&treapIterType). type treapIterFilter uint32 // treapFilterAll represents the filter which allows all spans. const treapFilterAll = ^treapIterFilter(0) // treapFilter creates a new treapIterFilter from two treapIterTypes. // mask represents a bitmask for which flags we should check against // and match for the expected result after applying the mask. func treapFilter(mask, match treapIterType) treapIterFilter { allow := treapIterFilter(0) for i := treapIterType(0); i < 1<<treapIterBits; i++ { if mask&i == match { allow |= 1 << i } } return allow } // matches returns true if m and f intersect. func (f treapIterFilter) matches(m treapIterFilter) bool { return f&m != 0 } // treapFilter returns the treapIterFilter exactly matching this span, // i.e. popcount(result) == 1. func (s *mspan) treapFilter() treapIterFilter { have := treapIterType(0) if s.scavenged { have |= treapIterScav } if s.hugePages() > 0 { have |= treapIterHuge } return treapIterFilter(uint32(1) << (0x1f & have)) } // treapIter is a bidirectional iterator type which may be used to iterate over a // an mTreap in-order forwards (increasing order) or backwards (decreasing order). // Its purpose is to hide details about the treap from users when trying to iterate // over it. // // To create iterators over the treap, call start or end on an mTreap. type treapIter struct { f treapIterFilter t *treapNode } // span returns the span at the current position in the treap. // If the treap is not valid, span will panic. func (i *treapIter) span() *mspan { return i.t.span } // valid returns whether the iterator represents a valid position // in the mTreap. func (i *treapIter) valid() bool { return i.t != nil } // next moves the iterator forward by one. Once the iterator // ceases to be valid, calling next will panic. func (i treapIter) next() treapIter { i.t = i.t.succ(i.f) return i } // prev moves the iterator backwards by one. Once the iterator // ceases to be valid, calling prev will panic. func (i treapIter) prev() treapIter { i.t = i.t.pred(i.f) return i } // start returns an iterator which points to the start of the treap (the // left-most node in the treap) subject to mask and match constraints. func (root *mTreap) start(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMinimal(f)} } // end returns an iterator which points to the end of the treap (the // right-most node in the treap) subject to mask and match constraints. func (root *mTreap) end(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMaximal(f)} } // mutate allows one to mutate the span without removing it from the treap via a // callback. The span's base and size are allowed to change as long as the span // remains in the same order relative to its predecessor and successor. // // Note however that any operation that causes a treap rebalancing inside of fn // is strictly forbidden, as that may cause treap node metadata to go // out-of-sync. func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) { s := i.span() // Save some state about the span for later inspection. hpages := s.hugePages() scavenged := s.scavenged // Call the mutator. fn(s) // Update unscavHugePages appropriately. if !scavenged { mheap_.free.unscavHugePages -= hpages } if !s.scavenged { mheap_.free.unscavHugePages += s.hugePages() } // Update the key in case the base changed. i.t.key = s.base() // Updating invariants up the tree needs to happen if // anything changed at all, so just go ahead and do it // unconditionally. // // If it turns out nothing changed, it'll exit quickly. t := i.t for t != nil && t.updateInvariants() { t = t.parent } } // insert adds span to the large span treap. func (root *mTreap) insert(span *mspan) { if !span.scavenged { root.unscavHugePages += span.hugePages() } base := span.base() var last *treapNode pt := &root.treap for t := *pt; t != nil; t = *pt { last = t if t.key < base { pt = &t.right } else if t.key > base { pt = &t.left } else { throw("inserting span already in treap") } } // Add t as new leaf in tree of span size and unique addrs. // The balanced tree is a treap using priority as the random heap priority. // That is, it is a binary tree ordered according to the key, // but then among the space of possible binary trees respecting those // keys, it is kept balanced on average by maintaining a heap ordering // on the priority: s.priority <= both s.right.priority and s.right.priority. // https://en.wikipedia.org/wiki/Treap // https://faculty.washington.edu/aragon/pubs/rst89.pdf t := (*treapNode)(mheap_.treapalloc.alloc()) t.key = span.base() t.priority = fastrand() t.span = span t.maxPages = span.npages t.types = span.treapFilter() t.parent = last *pt = t // t now at a leaf. // Update the tree to maintain the various invariants. i := t for i.parent != nil && i.parent.updateInvariants() { i = i.parent } // Rotate up into tree according to priority. for t.parent != nil && t.parent.priority > t.priority { if t != nil && t.span.base() != t.key { println("runtime: insert t=", t, "t.key=", t.key) println("runtime: t.span=", t.span, "t.span.base()=", t.span.base()) throw("span and treap node base addresses do not match") } if t.parent.left == t { root.rotateRight(t.parent) } else { if t.parent.right != t { throw("treap insert finds a broken treap") } root.rotateLeft(t.parent) } } } func (root *mTreap) removeNode(t *treapNode) { if !t.span.scavenged { root.unscavHugePages -= t.span.hugePages() } if t.span.base() != t.key { throw("span and treap node base addresses do not match") } // Rotate t down to be leaf of tree for removal, respecting priorities. for t.right != nil || t.left != nil { if t.right == nil || t.left != nil && t.left.priority < t.right.priority { root.rotateRight(t) } else { root.rotateLeft(t) } } // Remove t, now a leaf. if t.parent != nil { p := t.parent if p.left == t { p.left = nil } else { p.right = nil } // Walk up the tree updating invariants until no updates occur. for p != nil && p.updateInvariants() { p = p.parent } } else { root.treap = nil } // Return the found treapNode's span after freeing the treapNode. mheap_.treapalloc.free(unsafe.Pointer(t)) } // find searches for, finds, and returns the treap iterator over all spans // representing the position of the span with the smallest base address which is // at least npages in size. If no span has at least npages it returns an invalid // iterator. // // This algorithm is as follows: // * If there's a left child and its subtree can satisfy this allocation, // continue down that subtree. // * If there's no such left child, check if the root of this subtree can // satisfy the allocation. If so, we're done. // * If the root cannot satisfy the allocation either, continue down the // right subtree if able. // * Else, break and report that we cannot satisfy the allocation. // // The preference for left, then current, then right, results in us getting // the left-most node which will contain the span with the lowest base // address. // // Note that if a request cannot be satisfied the fourth case will be // reached immediately at the root, since neither the left subtree nor // the right subtree will have a sufficient maxPages, whilst the root // node is also unable to satisfy it. func (root *mTreap) find(npages uintptr) treapIter { t := root.treap for t != nil { if t.span == nil { throw("treap node with nil span found") } // Iterate over the treap trying to go as far left // as possible while simultaneously ensuring that the // subtrees we choose always have a span which can // satisfy the allocation. if t.left != nil && t.left.maxPages >= npages { t = t.left } else if t.span.npages >= npages { // Before going right, if this span can satisfy the // request, stop here. break } else if t.right != nil && t.right.maxPages >= npages { t = t.right } else { t = nil } } return treapIter{treapFilterAll, t} } // removeSpan searches for, finds, deletes span along with // the associated treap node. If the span is not in the treap // then t will eventually be set to nil and the t.span // will throw. func (root *mTreap) removeSpan(span *mspan) { base := span.base() t := root.treap for t.span != span { if t.key < base { t = t.right } else if t.key > base { t = t.left } } root.removeNode(t) } // erase removes the element referred to by the current position of the // iterator. This operation consumes the given iterator, so it should no // longer be used. It is up to the caller to get the next or previous // iterator before calling erase, if need be. func (root *mTreap) erase(i treapIter) { root.removeNode(i.t) } // rotateLeft rotates the tree rooted at node x. // turning (x a (y b c)) into (y (x a b) c). func (root *mTreap) rotateLeft(x *treapNode) { // p -> (x a (y b c)) p := x.parent a, y := x.left, x.right b, c := y.left, y.right y.left = x x.parent = y y.right = c if c != nil { c.parent = y } x.left = a if a != nil { a.parent = x } x.right = b if b != nil { b.parent = x } y.parent = p if p == nil { root.treap = y } else if p.left == x { p.left = y } else { if p.right != x { throw("large span treap rotateLeft") } p.right = y } x.updateInvariants() y.updateInvariants() } // rotateRight rotates the tree rooted at node y. // turning (y (x a b) c) into (x a (y b c)). func (root *mTreap)
(y *treapNode) { // p -> (y (x a b) c) p := y.parent x, c := y.left, y.right a, b := x.left, x.right x.left = a if a != nil { a.parent = x } x.right = y y.parent = x y.left = b if b != nil { b.parent = y } y.right = c if c != nil { c.parent = y } x.parent = p if p == nil { root.treap = x } else if p.left == y { p.left = x } else { if p.right != y { throw("large span treap rotateRight") } p.right = x } y.updateInvariants() x.updateInvariants() }
rotateRight
identifier_name
mgclarge.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Page heap. // // See malloc.go for the general overview. // // Allocation policy is the subject of this file. All free spans live in // a treap for most of their time being free. See // https://en.wikipedia.org/wiki/Treap or // https://faculty.washington.edu/aragon/pubs/rst89.pdf for an overview. // sema.go also holds an implementation of a treap. // // Each treapNode holds a single span. The treap is sorted by base address // and each span necessarily has a unique base address. // Spans are returned based on a first-fit algorithm, acquiring the span // with the lowest base address which still satisfies the request. // // The first-fit algorithm is possible due to an augmentation of each // treapNode to maintain the size of the largest span in the subtree rooted // at that treapNode. Below we refer to this invariant as the maxPages // invariant. // // The primary routines are // insert: adds a span to the treap // remove: removes the span from that treap that best fits the required size // removeSpan: which removes a specific span from the treap // // Whenever a pointer to a span which is owned by the treap is acquired, that // span must not be mutated. To mutate a span in the treap, remove it first. // // mheap_.lock must be held when manipulating this data structure. package runtime import ( "unsafe" ) //go:notinheap type mTreap struct { treap *treapNode unscavHugePages uintptr // number of unscavenged huge pages in the treap } //go:notinheap type treapNode struct { right *treapNode // all treapNodes > this treap node left *treapNode // all treapNodes < this treap node parent *treapNode // direct parent of this node, nil if root key uintptr // base address of the span, used as primary sort key span *mspan // span at base address key maxPages uintptr // the maximum size of any span in this subtree, including the root priority uint32 // random number used by treap algorithm to keep tree probabilistically balanced types treapIterFilter // the types of spans available in this subtree } // updateInvariants is a helper method which has a node recompute its own // maxPages and types values by looking at its own span as well as the // values of its direct children. // // Returns true if anything changed. func (t *treapNode) updateInvariants() bool { m, i := t.maxPages, t.types t.maxPages = t.span.npages t.types = t.span.treapFilter() if t.left != nil { t.types |= t.left.types if t.maxPages < t.left.maxPages { t.maxPages = t.left.maxPages } } if t.right != nil { t.types |= t.right.types if t.maxPages < t.right.maxPages { t.maxPages = t.right.maxPages } } return m != t.maxPages || i != t.types } // findMinimal finds the minimal (lowest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is functionally the same as (*mTreap).find, so see that // method for more details. func (t *treapNode) findMinimal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.left != nil && f.matches(t.left.types) { t = t.left } else if f.matches(t.span.treapFilter()) { break } else if t.right != nil && f.matches(t.right.types) { t = t.right } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // findMaximal finds the maximal (highest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is the logical inversion of findMinimal and just changes // the order of the left and right tests. func (t *treapNode) findMaximal(f treapIterFilter) *treapNode
// pred returns the predecessor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such predecessor exists. func (t *treapNode) pred(f treapIterFilter) *treapNode { if t.left != nil && f.matches(t.left.types) { // The node has a left subtree which contains at least one matching // node, find the maximal matching node in that subtree. return t.left.findMaximal(f) } // Lacking a left subtree, look to the parents. p := t // previous node t = t.parent for t != nil { // Walk up the tree until we find a node that has a left subtree // that we haven't already visited. if t.right == p { if f.matches(t.span.treapFilter()) { // If this node matches, then it's guaranteed to be the // predecessor since everything to its left is strictly // greater. return t } else if t.left != nil && f.matches(t.left.types) { // Failing the root of this subtree, if its left subtree has // something, that's where we'll find our predecessor. return t.left.findMaximal(f) } } p = t t = t.parent } // If the parent is nil, then we've hit the root without finding // a suitable left subtree containing the node (and the predecessor // wasn't on the path). Thus, there's no predecessor, so just return // nil. return nil } // succ returns the successor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such successor exists. func (t *treapNode) succ(f treapIterFilter) *treapNode { // See pred. This method is just the logical inversion of it. if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } p := t t = t.parent for t != nil { if t.left == p { if f.matches(t.span.treapFilter()) { return t } else if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } } p = t t = t.parent } return nil } // isSpanInTreap is handy for debugging. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) isSpanInTreap(s *mspan) bool { if t == nil { return false } return t.span == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s) } // walkTreap is handy for debugging and testing. // Starting at some treapnode t, for example the root, do a depth first preorder walk of // the tree executing fn at each treap node. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) walkTreap(fn func(tn *treapNode)) { if t == nil { return } fn(t) t.left.walkTreap(fn) t.right.walkTreap(fn) } // checkTreapNode when used in conjunction with walkTreap can usually detect a // poorly formed treap. func checkTreapNode(t *treapNode) { if t == nil { return } if t.span.next != nil || t.span.prev != nil || t.span.list != nil { throw("span may be on an mSpanList while simultaneously in the treap") } if t.span.base() != t.key { println("runtime: checkTreapNode treapNode t=", t, " t.key=", t.key, "t.span.base()=", t.span.base()) throw("why does span.base() and treap.key do not match?") } if t.left != nil && t.key < t.left.key { throw("found out-of-order spans in treap (left child has greater base address)") } if t.right != nil && t.key > t.right.key { throw("found out-of-order spans in treap (right child has lesser base address)") } } // validateInvariants is handy for debugging and testing. // It ensures that the various invariants on each treap node are // appropriately maintained throughout the treap by walking the // treap in a post-order manner. func (t *treapNode) validateInvariants() (uintptr, treapIterFilter) { if t == nil { return 0, 0 } leftMax, leftTypes := t.left.validateInvariants() rightMax, rightTypes := t.right.validateInvariants() max := t.span.npages if leftMax > max { max = leftMax } if rightMax > max { max = rightMax } if max != t.maxPages { println("runtime: t.maxPages=", t.maxPages, "want=", max) throw("maxPages invariant violated in treap") } typ := t.span.treapFilter() | leftTypes | rightTypes if typ != t.types { println("runtime: t.types=", t.types, "want=", typ) throw("types invariant violated in treap") } return max, typ } // treapIterType represents the type of iteration to perform // over the treap. Each different flag is represented by a bit // in the type, and types may be combined together by a bitwise // or operation. // // Note that only 5 bits are available for treapIterType, do not // use the 3 higher-order bits. This constraint is to allow for // expansion into a treapIterFilter, which is a uint32. type treapIterType uint8 const ( treapIterScav treapIterType = 1 << iota // scavenged spans treapIterHuge // spans containing at least one huge page treapIterBits = iota ) // treapIterFilter is a bitwise filter of different spans by binary // properties. Each bit of a treapIterFilter represents a unique // combination of bits set in a treapIterType, in other words, it // represents the power set of a treapIterType. // // The purpose of this representation is to allow the existence of // a specific span type to bubble up in the treap (see the types // field on treapNode). // // More specifically, any treapIterType may be transformed into a // treapIterFilter for a specific combination of flags via the // following operation: 1 << (0x1f&treapIterType). type treapIterFilter uint32 // treapFilterAll represents the filter which allows all spans. const treapFilterAll = ^treapIterFilter(0) // treapFilter creates a new treapIterFilter from two treapIterTypes. // mask represents a bitmask for which flags we should check against // and match for the expected result after applying the mask. func treapFilter(mask, match treapIterType) treapIterFilter { allow := treapIterFilter(0) for i := treapIterType(0); i < 1<<treapIterBits; i++ { if mask&i == match { allow |= 1 << i } } return allow } // matches returns true if m and f intersect. func (f treapIterFilter) matches(m treapIterFilter) bool { return f&m != 0 } // treapFilter returns the treapIterFilter exactly matching this span, // i.e. popcount(result) == 1. func (s *mspan) treapFilter() treapIterFilter { have := treapIterType(0) if s.scavenged { have |= treapIterScav } if s.hugePages() > 0 { have |= treapIterHuge } return treapIterFilter(uint32(1) << (0x1f & have)) } // treapIter is a bidirectional iterator type which may be used to iterate over a // an mTreap in-order forwards (increasing order) or backwards (decreasing order). // Its purpose is to hide details about the treap from users when trying to iterate // over it. // // To create iterators over the treap, call start or end on an mTreap. type treapIter struct { f treapIterFilter t *treapNode } // span returns the span at the current position in the treap. // If the treap is not valid, span will panic. func (i *treapIter) span() *mspan { return i.t.span } // valid returns whether the iterator represents a valid position // in the mTreap. func (i *treapIter) valid() bool { return i.t != nil } // next moves the iterator forward by one. Once the iterator // ceases to be valid, calling next will panic. func (i treapIter) next() treapIter { i.t = i.t.succ(i.f) return i } // prev moves the iterator backwards by one. Once the iterator // ceases to be valid, calling prev will panic. func (i treapIter) prev() treapIter { i.t = i.t.pred(i.f) return i } // start returns an iterator which points to the start of the treap (the // left-most node in the treap) subject to mask and match constraints. func (root *mTreap) start(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMinimal(f)} } // end returns an iterator which points to the end of the treap (the // right-most node in the treap) subject to mask and match constraints. func (root *mTreap) end(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMaximal(f)} } // mutate allows one to mutate the span without removing it from the treap via a // callback. The span's base and size are allowed to change as long as the span // remains in the same order relative to its predecessor and successor. // // Note however that any operation that causes a treap rebalancing inside of fn // is strictly forbidden, as that may cause treap node metadata to go // out-of-sync. func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) { s := i.span() // Save some state about the span for later inspection. hpages := s.hugePages() scavenged := s.scavenged // Call the mutator. fn(s) // Update unscavHugePages appropriately. if !scavenged { mheap_.free.unscavHugePages -= hpages } if !s.scavenged { mheap_.free.unscavHugePages += s.hugePages() } // Update the key in case the base changed. i.t.key = s.base() // Updating invariants up the tree needs to happen if // anything changed at all, so just go ahead and do it // unconditionally. // // If it turns out nothing changed, it'll exit quickly. t := i.t for t != nil && t.updateInvariants() { t = t.parent } } // insert adds span to the large span treap. func (root *mTreap) insert(span *mspan) { if !span.scavenged { root.unscavHugePages += span.hugePages() } base := span.base() var last *treapNode pt := &root.treap for t := *pt; t != nil; t = *pt { last = t if t.key < base { pt = &t.right } else if t.key > base { pt = &t.left } else { throw("inserting span already in treap") } } // Add t as new leaf in tree of span size and unique addrs. // The balanced tree is a treap using priority as the random heap priority. // That is, it is a binary tree ordered according to the key, // but then among the space of possible binary trees respecting those // keys, it is kept balanced on average by maintaining a heap ordering // on the priority: s.priority <= both s.right.priority and s.right.priority. // https://en.wikipedia.org/wiki/Treap // https://faculty.washington.edu/aragon/pubs/rst89.pdf t := (*treapNode)(mheap_.treapalloc.alloc()) t.key = span.base() t.priority = fastrand() t.span = span t.maxPages = span.npages t.types = span.treapFilter() t.parent = last *pt = t // t now at a leaf. // Update the tree to maintain the various invariants. i := t for i.parent != nil && i.parent.updateInvariants() { i = i.parent } // Rotate up into tree according to priority. for t.parent != nil && t.parent.priority > t.priority { if t != nil && t.span.base() != t.key { println("runtime: insert t=", t, "t.key=", t.key) println("runtime: t.span=", t.span, "t.span.base()=", t.span.base()) throw("span and treap node base addresses do not match") } if t.parent.left == t { root.rotateRight(t.parent) } else { if t.parent.right != t { throw("treap insert finds a broken treap") } root.rotateLeft(t.parent) } } } func (root *mTreap) removeNode(t *treapNode) { if !t.span.scavenged { root.unscavHugePages -= t.span.hugePages() } if t.span.base() != t.key { throw("span and treap node base addresses do not match") } // Rotate t down to be leaf of tree for removal, respecting priorities. for t.right != nil || t.left != nil { if t.right == nil || t.left != nil && t.left.priority < t.right.priority { root.rotateRight(t) } else { root.rotateLeft(t) } } // Remove t, now a leaf. if t.parent != nil { p := t.parent if p.left == t { p.left = nil } else { p.right = nil } // Walk up the tree updating invariants until no updates occur. for p != nil && p.updateInvariants() { p = p.parent } } else { root.treap = nil } // Return the found treapNode's span after freeing the treapNode. mheap_.treapalloc.free(unsafe.Pointer(t)) } // find searches for, finds, and returns the treap iterator over all spans // representing the position of the span with the smallest base address which is // at least npages in size. If no span has at least npages it returns an invalid // iterator. // // This algorithm is as follows: // * If there's a left child and its subtree can satisfy this allocation, // continue down that subtree. // * If there's no such left child, check if the root of this subtree can // satisfy the allocation. If so, we're done. // * If the root cannot satisfy the allocation either, continue down the // right subtree if able. // * Else, break and report that we cannot satisfy the allocation. // // The preference for left, then current, then right, results in us getting // the left-most node which will contain the span with the lowest base // address. // // Note that if a request cannot be satisfied the fourth case will be // reached immediately at the root, since neither the left subtree nor // the right subtree will have a sufficient maxPages, whilst the root // node is also unable to satisfy it. func (root *mTreap) find(npages uintptr) treapIter { t := root.treap for t != nil { if t.span == nil { throw("treap node with nil span found") } // Iterate over the treap trying to go as far left // as possible while simultaneously ensuring that the // subtrees we choose always have a span which can // satisfy the allocation. if t.left != nil && t.left.maxPages >= npages { t = t.left } else if t.span.npages >= npages { // Before going right, if this span can satisfy the // request, stop here. break } else if t.right != nil && t.right.maxPages >= npages { t = t.right } else { t = nil } } return treapIter{treapFilterAll, t} } // removeSpan searches for, finds, deletes span along with // the associated treap node. If the span is not in the treap // then t will eventually be set to nil and the t.span // will throw. func (root *mTreap) removeSpan(span *mspan) { base := span.base() t := root.treap for t.span != span { if t.key < base { t = t.right } else if t.key > base { t = t.left } } root.removeNode(t) } // erase removes the element referred to by the current position of the // iterator. This operation consumes the given iterator, so it should no // longer be used. It is up to the caller to get the next or previous // iterator before calling erase, if need be. func (root *mTreap) erase(i treapIter) { root.removeNode(i.t) } // rotateLeft rotates the tree rooted at node x. // turning (x a (y b c)) into (y (x a b) c). func (root *mTreap) rotateLeft(x *treapNode) { // p -> (x a (y b c)) p := x.parent a, y := x.left, x.right b, c := y.left, y.right y.left = x x.parent = y y.right = c if c != nil { c.parent = y } x.left = a if a != nil { a.parent = x } x.right = b if b != nil { b.parent = x } y.parent = p if p == nil { root.treap = y } else if p.left == x { p.left = y } else { if p.right != x { throw("large span treap rotateLeft") } p.right = y } x.updateInvariants() y.updateInvariants() } // rotateRight rotates the tree rooted at node y. // turning (y (x a b) c) into (x a (y b c)). func (root *mTreap) rotateRight(y *treapNode) { // p -> (y (x a b) c) p := y.parent x, c := y.left, y.right a, b := x.left, x.right x.left = a if a != nil { a.parent = x } x.right = y y.parent = x y.left = b if b != nil { b.parent = y } y.right = c if c != nil { c.parent = y } x.parent = p if p == nil { root.treap = x } else if p.left == y { p.left = x } else { if p.right != y { throw("large span treap rotateRight") } p.right = x } y.updateInvariants() x.updateInvariants() }
{ if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.right != nil && f.matches(t.right.types) { t = t.right } else if f.matches(t.span.treapFilter()) { break } else if t.left != nil && f.matches(t.left.types) { t = t.left } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t }
identifier_body
mgclarge.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Page heap. // // See malloc.go for the general overview. // // Allocation policy is the subject of this file. All free spans live in // a treap for most of their time being free. See // https://en.wikipedia.org/wiki/Treap or // https://faculty.washington.edu/aragon/pubs/rst89.pdf for an overview. // sema.go also holds an implementation of a treap. // // Each treapNode holds a single span. The treap is sorted by base address // and each span necessarily has a unique base address. // Spans are returned based on a first-fit algorithm, acquiring the span // with the lowest base address which still satisfies the request. // // The first-fit algorithm is possible due to an augmentation of each // treapNode to maintain the size of the largest span in the subtree rooted // at that treapNode. Below we refer to this invariant as the maxPages // invariant. // // The primary routines are // insert: adds a span to the treap // remove: removes the span from that treap that best fits the required size // removeSpan: which removes a specific span from the treap // // Whenever a pointer to a span which is owned by the treap is acquired, that // span must not be mutated. To mutate a span in the treap, remove it first. // // mheap_.lock must be held when manipulating this data structure. package runtime import ( "unsafe" ) //go:notinheap type mTreap struct { treap *treapNode unscavHugePages uintptr // number of unscavenged huge pages in the treap } //go:notinheap type treapNode struct { right *treapNode // all treapNodes > this treap node left *treapNode // all treapNodes < this treap node parent *treapNode // direct parent of this node, nil if root key uintptr // base address of the span, used as primary sort key span *mspan // span at base address key maxPages uintptr // the maximum size of any span in this subtree, including the root priority uint32 // random number used by treap algorithm to keep tree probabilistically balanced types treapIterFilter // the types of spans available in this subtree } // updateInvariants is a helper method which has a node recompute its own // maxPages and types values by looking at its own span as well as the // values of its direct children. // // Returns true if anything changed. func (t *treapNode) updateInvariants() bool { m, i := t.maxPages, t.types t.maxPages = t.span.npages t.types = t.span.treapFilter() if t.left != nil { t.types |= t.left.types if t.maxPages < t.left.maxPages { t.maxPages = t.left.maxPages } } if t.right != nil { t.types |= t.right.types if t.maxPages < t.right.maxPages { t.maxPages = t.right.maxPages } } return m != t.maxPages || i != t.types } // findMinimal finds the minimal (lowest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is functionally the same as (*mTreap).find, so see that // method for more details. func (t *treapNode) findMinimal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.left != nil && f.matches(t.left.types) { t = t.left } else if f.matches(t.span.treapFilter()) { break } else if t.right != nil && f.matches(t.right.types) { t = t.right } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // findMaximal finds the maximal (highest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is the logical inversion of findMinimal and just changes // the order of the left and right tests. func (t *treapNode) findMaximal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.right != nil && f.matches(t.right.types) { t = t.right } else if f.matches(t.span.treapFilter()) { break } else if t.left != nil && f.matches(t.left.types) { t = t.left } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // pred returns the predecessor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such predecessor exists. func (t *treapNode) pred(f treapIterFilter) *treapNode { if t.left != nil && f.matches(t.left.types) { // The node has a left subtree which contains at least one matching // node, find the maximal matching node in that subtree. return t.left.findMaximal(f) } // Lacking a left subtree, look to the parents. p := t // previous node t = t.parent for t != nil { // Walk up the tree until we find a node that has a left subtree // that we haven't already visited. if t.right == p { if f.matches(t.span.treapFilter()) { // If this node matches, then it's guaranteed to be the // predecessor since everything to its left is strictly // greater. return t } else if t.left != nil && f.matches(t.left.types) { // Failing the root of this subtree, if its left subtree has // something, that's where we'll find our predecessor. return t.left.findMaximal(f) } } p = t t = t.parent } // If the parent is nil, then we've hit the root without finding // a suitable left subtree containing the node (and the predecessor // wasn't on the path). Thus, there's no predecessor, so just return // nil. return nil } // succ returns the successor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such successor exists. func (t *treapNode) succ(f treapIterFilter) *treapNode { // See pred. This method is just the logical inversion of it. if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } p := t t = t.parent for t != nil { if t.left == p { if f.matches(t.span.treapFilter()) { return t } else if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } } p = t t = t.parent } return nil } // isSpanInTreap is handy for debugging. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) isSpanInTreap(s *mspan) bool { if t == nil { return false } return t.span == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s) } // walkTreap is handy for debugging and testing. // Starting at some treapnode t, for example the root, do a depth first preorder walk of // the tree executing fn at each treap node. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) walkTreap(fn func(tn *treapNode)) { if t == nil { return } fn(t) t.left.walkTreap(fn) t.right.walkTreap(fn) } // checkTreapNode when used in conjunction with walkTreap can usually detect a // poorly formed treap. func checkTreapNode(t *treapNode) { if t == nil { return } if t.span.next != nil || t.span.prev != nil || t.span.list != nil { throw("span may be on an mSpanList while simultaneously in the treap") } if t.span.base() != t.key { println("runtime: checkTreapNode treapNode t=", t, " t.key=", t.key, "t.span.base()=", t.span.base()) throw("why does span.base() and treap.key do not match?") } if t.left != nil && t.key < t.left.key { throw("found out-of-order spans in treap (left child has greater base address)") } if t.right != nil && t.key > t.right.key { throw("found out-of-order spans in treap (right child has lesser base address)") } } // validateInvariants is handy for debugging and testing. // It ensures that the various invariants on each treap node are // appropriately maintained throughout the treap by walking the // treap in a post-order manner. func (t *treapNode) validateInvariants() (uintptr, treapIterFilter) { if t == nil { return 0, 0 } leftMax, leftTypes := t.left.validateInvariants() rightMax, rightTypes := t.right.validateInvariants() max := t.span.npages if leftMax > max { max = leftMax } if rightMax > max { max = rightMax } if max != t.maxPages { println("runtime: t.maxPages=", t.maxPages, "want=", max) throw("maxPages invariant violated in treap") } typ := t.span.treapFilter() | leftTypes | rightTypes if typ != t.types { println("runtime: t.types=", t.types, "want=", typ) throw("types invariant violated in treap") } return max, typ } // treapIterType represents the type of iteration to perform // over the treap. Each different flag is represented by a bit // in the type, and types may be combined together by a bitwise // or operation. // // Note that only 5 bits are available for treapIterType, do not // use the 3 higher-order bits. This constraint is to allow for // expansion into a treapIterFilter, which is a uint32. type treapIterType uint8 const ( treapIterScav treapIterType = 1 << iota // scavenged spans treapIterHuge // spans containing at least one huge page treapIterBits = iota ) // treapIterFilter is a bitwise filter of different spans by binary // properties. Each bit of a treapIterFilter represents a unique // combination of bits set in a treapIterType, in other words, it // represents the power set of a treapIterType. // // The purpose of this representation is to allow the existence of // a specific span type to bubble up in the treap (see the types // field on treapNode). // // More specifically, any treapIterType may be transformed into a // treapIterFilter for a specific combination of flags via the // following operation: 1 << (0x1f&treapIterType). type treapIterFilter uint32 // treapFilterAll represents the filter which allows all spans. const treapFilterAll = ^treapIterFilter(0) // treapFilter creates a new treapIterFilter from two treapIterTypes. // mask represents a bitmask for which flags we should check against // and match for the expected result after applying the mask. func treapFilter(mask, match treapIterType) treapIterFilter { allow := treapIterFilter(0) for i := treapIterType(0); i < 1<<treapIterBits; i++ { if mask&i == match { allow |= 1 << i } } return allow } // matches returns true if m and f intersect. func (f treapIterFilter) matches(m treapIterFilter) bool { return f&m != 0 } // treapFilter returns the treapIterFilter exactly matching this span, // i.e. popcount(result) == 1. func (s *mspan) treapFilter() treapIterFilter { have := treapIterType(0) if s.scavenged { have |= treapIterScav } if s.hugePages() > 0 { have |= treapIterHuge } return treapIterFilter(uint32(1) << (0x1f & have)) } // treapIter is a bidirectional iterator type which may be used to iterate over a // an mTreap in-order forwards (increasing order) or backwards (decreasing order). // Its purpose is to hide details about the treap from users when trying to iterate // over it. // // To create iterators over the treap, call start or end on an mTreap. type treapIter struct { f treapIterFilter t *treapNode } // span returns the span at the current position in the treap. // If the treap is not valid, span will panic. func (i *treapIter) span() *mspan { return i.t.span } // valid returns whether the iterator represents a valid position // in the mTreap. func (i *treapIter) valid() bool { return i.t != nil } // next moves the iterator forward by one. Once the iterator // ceases to be valid, calling next will panic. func (i treapIter) next() treapIter { i.t = i.t.succ(i.f) return i } // prev moves the iterator backwards by one. Once the iterator // ceases to be valid, calling prev will panic. func (i treapIter) prev() treapIter { i.t = i.t.pred(i.f) return i } // start returns an iterator which points to the start of the treap (the // left-most node in the treap) subject to mask and match constraints. func (root *mTreap) start(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMinimal(f)} } // end returns an iterator which points to the end of the treap (the // right-most node in the treap) subject to mask and match constraints. func (root *mTreap) end(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMaximal(f)} } // mutate allows one to mutate the span without removing it from the treap via a // callback. The span's base and size are allowed to change as long as the span // remains in the same order relative to its predecessor and successor. // // Note however that any operation that causes a treap rebalancing inside of fn // is strictly forbidden, as that may cause treap node metadata to go // out-of-sync. func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) { s := i.span() // Save some state about the span for later inspection. hpages := s.hugePages() scavenged := s.scavenged // Call the mutator. fn(s) // Update unscavHugePages appropriately. if !scavenged { mheap_.free.unscavHugePages -= hpages } if !s.scavenged { mheap_.free.unscavHugePages += s.hugePages() } // Update the key in case the base changed. i.t.key = s.base() // Updating invariants up the tree needs to happen if // anything changed at all, so just go ahead and do it // unconditionally. // // If it turns out nothing changed, it'll exit quickly. t := i.t for t != nil && t.updateInvariants() { t = t.parent } } // insert adds span to the large span treap. func (root *mTreap) insert(span *mspan) { if !span.scavenged { root.unscavHugePages += span.hugePages() } base := span.base() var last *treapNode pt := &root.treap for t := *pt; t != nil; t = *pt { last = t if t.key < base { pt = &t.right } else if t.key > base { pt = &t.left } else { throw("inserting span already in treap") } } // Add t as new leaf in tree of span size and unique addrs. // The balanced tree is a treap using priority as the random heap priority. // That is, it is a binary tree ordered according to the key, // but then among the space of possible binary trees respecting those // keys, it is kept balanced on average by maintaining a heap ordering // on the priority: s.priority <= both s.right.priority and s.right.priority. // https://en.wikipedia.org/wiki/Treap // https://faculty.washington.edu/aragon/pubs/rst89.pdf t := (*treapNode)(mheap_.treapalloc.alloc()) t.key = span.base() t.priority = fastrand() t.span = span t.maxPages = span.npages t.types = span.treapFilter() t.parent = last *pt = t // t now at a leaf. // Update the tree to maintain the various invariants. i := t for i.parent != nil && i.parent.updateInvariants() { i = i.parent } // Rotate up into tree according to priority. for t.parent != nil && t.parent.priority > t.priority { if t != nil && t.span.base() != t.key { println("runtime: insert t=", t, "t.key=", t.key) println("runtime: t.span=", t.span, "t.span.base()=", t.span.base()) throw("span and treap node base addresses do not match") } if t.parent.left == t { root.rotateRight(t.parent) } else { if t.parent.right != t { throw("treap insert finds a broken treap") } root.rotateLeft(t.parent) } } } func (root *mTreap) removeNode(t *treapNode) { if !t.span.scavenged { root.unscavHugePages -= t.span.hugePages() } if t.span.base() != t.key { throw("span and treap node base addresses do not match") } // Rotate t down to be leaf of tree for removal, respecting priorities. for t.right != nil || t.left != nil { if t.right == nil || t.left != nil && t.left.priority < t.right.priority { root.rotateRight(t) } else { root.rotateLeft(t) } } // Remove t, now a leaf. if t.parent != nil { p := t.parent if p.left == t
else { p.right = nil } // Walk up the tree updating invariants until no updates occur. for p != nil && p.updateInvariants() { p = p.parent } } else { root.treap = nil } // Return the found treapNode's span after freeing the treapNode. mheap_.treapalloc.free(unsafe.Pointer(t)) } // find searches for, finds, and returns the treap iterator over all spans // representing the position of the span with the smallest base address which is // at least npages in size. If no span has at least npages it returns an invalid // iterator. // // This algorithm is as follows: // * If there's a left child and its subtree can satisfy this allocation, // continue down that subtree. // * If there's no such left child, check if the root of this subtree can // satisfy the allocation. If so, we're done. // * If the root cannot satisfy the allocation either, continue down the // right subtree if able. // * Else, break and report that we cannot satisfy the allocation. // // The preference for left, then current, then right, results in us getting // the left-most node which will contain the span with the lowest base // address. // // Note that if a request cannot be satisfied the fourth case will be // reached immediately at the root, since neither the left subtree nor // the right subtree will have a sufficient maxPages, whilst the root // node is also unable to satisfy it. func (root *mTreap) find(npages uintptr) treapIter { t := root.treap for t != nil { if t.span == nil { throw("treap node with nil span found") } // Iterate over the treap trying to go as far left // as possible while simultaneously ensuring that the // subtrees we choose always have a span which can // satisfy the allocation. if t.left != nil && t.left.maxPages >= npages { t = t.left } else if t.span.npages >= npages { // Before going right, if this span can satisfy the // request, stop here. break } else if t.right != nil && t.right.maxPages >= npages { t = t.right } else { t = nil } } return treapIter{treapFilterAll, t} } // removeSpan searches for, finds, deletes span along with // the associated treap node. If the span is not in the treap // then t will eventually be set to nil and the t.span // will throw. func (root *mTreap) removeSpan(span *mspan) { base := span.base() t := root.treap for t.span != span { if t.key < base { t = t.right } else if t.key > base { t = t.left } } root.removeNode(t) } // erase removes the element referred to by the current position of the // iterator. This operation consumes the given iterator, so it should no // longer be used. It is up to the caller to get the next or previous // iterator before calling erase, if need be. func (root *mTreap) erase(i treapIter) { root.removeNode(i.t) } // rotateLeft rotates the tree rooted at node x. // turning (x a (y b c)) into (y (x a b) c). func (root *mTreap) rotateLeft(x *treapNode) { // p -> (x a (y b c)) p := x.parent a, y := x.left, x.right b, c := y.left, y.right y.left = x x.parent = y y.right = c if c != nil { c.parent = y } x.left = a if a != nil { a.parent = x } x.right = b if b != nil { b.parent = x } y.parent = p if p == nil { root.treap = y } else if p.left == x { p.left = y } else { if p.right != x { throw("large span treap rotateLeft") } p.right = y } x.updateInvariants() y.updateInvariants() } // rotateRight rotates the tree rooted at node y. // turning (y (x a b) c) into (x a (y b c)). func (root *mTreap) rotateRight(y *treapNode) { // p -> (y (x a b) c) p := y.parent x, c := y.left, y.right a, b := x.left, x.right x.left = a if a != nil { a.parent = x } x.right = y y.parent = x y.left = b if b != nil { b.parent = y } y.right = c if c != nil { c.parent = y } x.parent = p if p == nil { root.treap = x } else if p.left == y { p.left = x } else { if p.right != y { throw("large span treap rotateRight") } p.right = x } y.updateInvariants() x.updateInvariants() }
{ p.left = nil }
conditional_block
mgclarge.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Page heap. // // See malloc.go for the general overview. // // Allocation policy is the subject of this file. All free spans live in // a treap for most of their time being free. See // https://en.wikipedia.org/wiki/Treap or // https://faculty.washington.edu/aragon/pubs/rst89.pdf for an overview. // sema.go also holds an implementation of a treap. // // Each treapNode holds a single span. The treap is sorted by base address // and each span necessarily has a unique base address. // Spans are returned based on a first-fit algorithm, acquiring the span // with the lowest base address which still satisfies the request. // // The first-fit algorithm is possible due to an augmentation of each // treapNode to maintain the size of the largest span in the subtree rooted // at that treapNode. Below we refer to this invariant as the maxPages // invariant. // // The primary routines are // insert: adds a span to the treap // remove: removes the span from that treap that best fits the required size // removeSpan: which removes a specific span from the treap // // Whenever a pointer to a span which is owned by the treap is acquired, that // span must not be mutated. To mutate a span in the treap, remove it first. // // mheap_.lock must be held when manipulating this data structure. package runtime import ( "unsafe" ) //go:notinheap type mTreap struct { treap *treapNode unscavHugePages uintptr // number of unscavenged huge pages in the treap } //go:notinheap type treapNode struct { right *treapNode // all treapNodes > this treap node left *treapNode // all treapNodes < this treap node parent *treapNode // direct parent of this node, nil if root key uintptr // base address of the span, used as primary sort key span *mspan // span at base address key maxPages uintptr // the maximum size of any span in this subtree, including the root priority uint32 // random number used by treap algorithm to keep tree probabilistically balanced types treapIterFilter // the types of spans available in this subtree } // updateInvariants is a helper method which has a node recompute its own // maxPages and types values by looking at its own span as well as the // values of its direct children. // // Returns true if anything changed. func (t *treapNode) updateInvariants() bool { m, i := t.maxPages, t.types t.maxPages = t.span.npages t.types = t.span.treapFilter() if t.left != nil { t.types |= t.left.types if t.maxPages < t.left.maxPages { t.maxPages = t.left.maxPages } } if t.right != nil { t.types |= t.right.types if t.maxPages < t.right.maxPages { t.maxPages = t.right.maxPages } } return m != t.maxPages || i != t.types } // findMinimal finds the minimal (lowest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is functionally the same as (*mTreap).find, so see that // method for more details. func (t *treapNode) findMinimal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.left != nil && f.matches(t.left.types) { t = t.left } else if f.matches(t.span.treapFilter()) { break } else if t.right != nil && f.matches(t.right.types) { t = t.right } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // findMaximal finds the maximal (highest base addressed) node in the treap // which matches the criteria set out by the filter f and returns nil if // none exists. // // This algorithm is the logical inversion of findMinimal and just changes // the order of the left and right tests. func (t *treapNode) findMaximal(f treapIterFilter) *treapNode { if t == nil || !f.matches(t.types) { return nil } for t != nil { if t.right != nil && f.matches(t.right.types) { t = t.right } else if f.matches(t.span.treapFilter()) { break } else if t.left != nil && f.matches(t.left.types) { t = t.left } else { println("runtime: f=", f) throw("failed to find minimal node matching filter") } } return t } // pred returns the predecessor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such predecessor exists. func (t *treapNode) pred(f treapIterFilter) *treapNode { if t.left != nil && f.matches(t.left.types) { // The node has a left subtree which contains at least one matching // node, find the maximal matching node in that subtree. return t.left.findMaximal(f) } // Lacking a left subtree, look to the parents. p := t // previous node t = t.parent for t != nil { // Walk up the tree until we find a node that has a left subtree // that we haven't already visited. if t.right == p { if f.matches(t.span.treapFilter()) { // If this node matches, then it's guaranteed to be the // predecessor since everything to its left is strictly // greater. return t } else if t.left != nil && f.matches(t.left.types) { // Failing the root of this subtree, if its left subtree has // something, that's where we'll find our predecessor. return t.left.findMaximal(f) } } p = t t = t.parent } // If the parent is nil, then we've hit the root without finding // a suitable left subtree containing the node (and the predecessor // wasn't on the path). Thus, there's no predecessor, so just return // nil. return nil } // succ returns the successor of t in the treap subject to the criteria // specified by the filter f. Returns nil if no such successor exists. func (t *treapNode) succ(f treapIterFilter) *treapNode { // See pred. This method is just the logical inversion of it. if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } p := t t = t.parent for t != nil { if t.left == p { if f.matches(t.span.treapFilter()) { return t } else if t.right != nil && f.matches(t.right.types) { return t.right.findMinimal(f) } } p = t t = t.parent } return nil } // isSpanInTreap is handy for debugging. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) isSpanInTreap(s *mspan) bool { if t == nil { return false } return t.span == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s) } // walkTreap is handy for debugging and testing. // Starting at some treapnode t, for example the root, do a depth first preorder walk of // the tree executing fn at each treap node. One should hold the heap lock, usually // mheap_.lock(). func (t *treapNode) walkTreap(fn func(tn *treapNode)) { if t == nil { return } fn(t) t.left.walkTreap(fn) t.right.walkTreap(fn) } // checkTreapNode when used in conjunction with walkTreap can usually detect a // poorly formed treap. func checkTreapNode(t *treapNode) { if t == nil { return } if t.span.next != nil || t.span.prev != nil || t.span.list != nil { throw("span may be on an mSpanList while simultaneously in the treap") } if t.span.base() != t.key { println("runtime: checkTreapNode treapNode t=", t, " t.key=", t.key, "t.span.base()=", t.span.base()) throw("why does span.base() and treap.key do not match?") } if t.left != nil && t.key < t.left.key { throw("found out-of-order spans in treap (left child has greater base address)") } if t.right != nil && t.key > t.right.key { throw("found out-of-order spans in treap (right child has lesser base address)") } } // validateInvariants is handy for debugging and testing. // It ensures that the various invariants on each treap node are // appropriately maintained throughout the treap by walking the // treap in a post-order manner. func (t *treapNode) validateInvariants() (uintptr, treapIterFilter) { if t == nil { return 0, 0 } leftMax, leftTypes := t.left.validateInvariants() rightMax, rightTypes := t.right.validateInvariants() max := t.span.npages if leftMax > max { max = leftMax } if rightMax > max { max = rightMax } if max != t.maxPages { println("runtime: t.maxPages=", t.maxPages, "want=", max) throw("maxPages invariant violated in treap") } typ := t.span.treapFilter() | leftTypes | rightTypes if typ != t.types { println("runtime: t.types=", t.types, "want=", typ) throw("types invariant violated in treap") } return max, typ } // treapIterType represents the type of iteration to perform // over the treap. Each different flag is represented by a bit // in the type, and types may be combined together by a bitwise // or operation. // // Note that only 5 bits are available for treapIterType, do not // use the 3 higher-order bits. This constraint is to allow for // expansion into a treapIterFilter, which is a uint32. type treapIterType uint8 const ( treapIterScav treapIterType = 1 << iota // scavenged spans treapIterHuge // spans containing at least one huge page treapIterBits = iota ) // treapIterFilter is a bitwise filter of different spans by binary // properties. Each bit of a treapIterFilter represents a unique // combination of bits set in a treapIterType, in other words, it // represents the power set of a treapIterType. // // The purpose of this representation is to allow the existence of // a specific span type to bubble up in the treap (see the types // field on treapNode). // // More specifically, any treapIterType may be transformed into a // treapIterFilter for a specific combination of flags via the // following operation: 1 << (0x1f&treapIterType). type treapIterFilter uint32 // treapFilterAll represents the filter which allows all spans. const treapFilterAll = ^treapIterFilter(0) // treapFilter creates a new treapIterFilter from two treapIterTypes. // mask represents a bitmask for which flags we should check against // and match for the expected result after applying the mask. func treapFilter(mask, match treapIterType) treapIterFilter { allow := treapIterFilter(0) for i := treapIterType(0); i < 1<<treapIterBits; i++ { if mask&i == match { allow |= 1 << i } } return allow } // matches returns true if m and f intersect. func (f treapIterFilter) matches(m treapIterFilter) bool { return f&m != 0 } // treapFilter returns the treapIterFilter exactly matching this span, // i.e. popcount(result) == 1. func (s *mspan) treapFilter() treapIterFilter { have := treapIterType(0) if s.scavenged { have |= treapIterScav } if s.hugePages() > 0 { have |= treapIterHuge } return treapIterFilter(uint32(1) << (0x1f & have)) } // treapIter is a bidirectional iterator type which may be used to iterate over a // an mTreap in-order forwards (increasing order) or backwards (decreasing order). // Its purpose is to hide details about the treap from users when trying to iterate // over it. // // To create iterators over the treap, call start or end on an mTreap. type treapIter struct { f treapIterFilter t *treapNode } // span returns the span at the current position in the treap. // If the treap is not valid, span will panic. func (i *treapIter) span() *mspan { return i.t.span } // valid returns whether the iterator represents a valid position // in the mTreap. func (i *treapIter) valid() bool { return i.t != nil } // next moves the iterator forward by one. Once the iterator // ceases to be valid, calling next will panic. func (i treapIter) next() treapIter { i.t = i.t.succ(i.f) return i } // prev moves the iterator backwards by one. Once the iterator // ceases to be valid, calling prev will panic. func (i treapIter) prev() treapIter { i.t = i.t.pred(i.f) return i } // start returns an iterator which points to the start of the treap (the // left-most node in the treap) subject to mask and match constraints. func (root *mTreap) start(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMinimal(f)} } // end returns an iterator which points to the end of the treap (the // right-most node in the treap) subject to mask and match constraints. func (root *mTreap) end(mask, match treapIterType) treapIter { f := treapFilter(mask, match) return treapIter{f, root.treap.findMaximal(f)} } // mutate allows one to mutate the span without removing it from the treap via a // callback. The span's base and size are allowed to change as long as the span // remains in the same order relative to its predecessor and successor. // // Note however that any operation that causes a treap rebalancing inside of fn // is strictly forbidden, as that may cause treap node metadata to go // out-of-sync. func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) { s := i.span() // Save some state about the span for later inspection. hpages := s.hugePages() scavenged := s.scavenged // Call the mutator. fn(s) // Update unscavHugePages appropriately. if !scavenged { mheap_.free.unscavHugePages -= hpages } if !s.scavenged { mheap_.free.unscavHugePages += s.hugePages() } // Update the key in case the base changed. i.t.key = s.base() // Updating invariants up the tree needs to happen if // anything changed at all, so just go ahead and do it // unconditionally. // // If it turns out nothing changed, it'll exit quickly. t := i.t for t != nil && t.updateInvariants() { t = t.parent } } // insert adds span to the large span treap. func (root *mTreap) insert(span *mspan) { if !span.scavenged { root.unscavHugePages += span.hugePages() } base := span.base() var last *treapNode pt := &root.treap for t := *pt; t != nil; t = *pt { last = t if t.key < base { pt = &t.right } else if t.key > base { pt = &t.left } else { throw("inserting span already in treap") } } // Add t as new leaf in tree of span size and unique addrs. // The balanced tree is a treap using priority as the random heap priority. // That is, it is a binary tree ordered according to the key, // but then among the space of possible binary trees respecting those // keys, it is kept balanced on average by maintaining a heap ordering // on the priority: s.priority <= both s.right.priority and s.right.priority. // https://en.wikipedia.org/wiki/Treap // https://faculty.washington.edu/aragon/pubs/rst89.pdf t := (*treapNode)(mheap_.treapalloc.alloc()) t.key = span.base() t.priority = fastrand() t.span = span t.maxPages = span.npages t.types = span.treapFilter() t.parent = last *pt = t // t now at a leaf. // Update the tree to maintain the various invariants. i := t for i.parent != nil && i.parent.updateInvariants() { i = i.parent } // Rotate up into tree according to priority. for t.parent != nil && t.parent.priority > t.priority { if t != nil && t.span.base() != t.key { println("runtime: insert t=", t, "t.key=", t.key) println("runtime: t.span=", t.span, "t.span.base()=", t.span.base()) throw("span and treap node base addresses do not match") } if t.parent.left == t { root.rotateRight(t.parent) } else { if t.parent.right != t { throw("treap insert finds a broken treap") } root.rotateLeft(t.parent) } } } func (root *mTreap) removeNode(t *treapNode) { if !t.span.scavenged { root.unscavHugePages -= t.span.hugePages() } if t.span.base() != t.key { throw("span and treap node base addresses do not match") } // Rotate t down to be leaf of tree for removal, respecting priorities. for t.right != nil || t.left != nil { if t.right == nil || t.left != nil && t.left.priority < t.right.priority { root.rotateRight(t) } else { root.rotateLeft(t) } } // Remove t, now a leaf. if t.parent != nil { p := t.parent if p.left == t { p.left = nil } else { p.right = nil } // Walk up the tree updating invariants until no updates occur. for p != nil && p.updateInvariants() { p = p.parent } } else { root.treap = nil } // Return the found treapNode's span after freeing the treapNode. mheap_.treapalloc.free(unsafe.Pointer(t)) } // find searches for, finds, and returns the treap iterator over all spans // representing the position of the span with the smallest base address which is // at least npages in size. If no span has at least npages it returns an invalid // iterator. // // This algorithm is as follows: // * If there's a left child and its subtree can satisfy this allocation, // continue down that subtree. // * If there's no such left child, check if the root of this subtree can // satisfy the allocation. If so, we're done. // * If the root cannot satisfy the allocation either, continue down the // right subtree if able. // * Else, break and report that we cannot satisfy the allocation. // // The preference for left, then current, then right, results in us getting // the left-most node which will contain the span with the lowest base // address. // // Note that if a request cannot be satisfied the fourth case will be // reached immediately at the root, since neither the left subtree nor // the right subtree will have a sufficient maxPages, whilst the root // node is also unable to satisfy it. func (root *mTreap) find(npages uintptr) treapIter { t := root.treap for t != nil { if t.span == nil { throw("treap node with nil span found") } // Iterate over the treap trying to go as far left // as possible while simultaneously ensuring that the // subtrees we choose always have a span which can // satisfy the allocation. if t.left != nil && t.left.maxPages >= npages { t = t.left } else if t.span.npages >= npages { // Before going right, if this span can satisfy the // request, stop here. break } else if t.right != nil && t.right.maxPages >= npages { t = t.right } else { t = nil } } return treapIter{treapFilterAll, t} } // removeSpan searches for, finds, deletes span along with // the associated treap node. If the span is not in the treap // then t will eventually be set to nil and the t.span // will throw. func (root *mTreap) removeSpan(span *mspan) { base := span.base() t := root.treap for t.span != span { if t.key < base { t = t.right } else if t.key > base { t = t.left } } root.removeNode(t) } // erase removes the element referred to by the current position of the // iterator. This operation consumes the given iterator, so it should no // longer be used. It is up to the caller to get the next or previous // iterator before calling erase, if need be. func (root *mTreap) erase(i treapIter) { root.removeNode(i.t) } // rotateLeft rotates the tree rooted at node x. // turning (x a (y b c)) into (y (x a b) c). func (root *mTreap) rotateLeft(x *treapNode) { // p -> (x a (y b c)) p := x.parent a, y := x.left, x.right b, c := y.left, y.right y.left = x x.parent = y y.right = c if c != nil { c.parent = y } x.left = a if a != nil { a.parent = x } x.right = b if b != nil { b.parent = x } y.parent = p if p == nil { root.treap = y } else if p.left == x { p.left = y } else { if p.right != x { throw("large span treap rotateLeft") } p.right = y } x.updateInvariants() y.updateInvariants() } // rotateRight rotates the tree rooted at node y. // turning (y (x a b) c) into (x a (y b c)). func (root *mTreap) rotateRight(y *treapNode) { // p -> (y (x a b) c) p := y.parent x, c := y.left, y.right a, b := x.left, x.right x.left = a if a != nil { a.parent = x } x.right = y
y.right = c if c != nil { c.parent = y } x.parent = p if p == nil { root.treap = x } else if p.left == y { p.left = x } else { if p.right != y { throw("large span treap rotateRight") } p.right = x } y.updateInvariants() x.updateInvariants() }
y.parent = x y.left = b if b != nil { b.parent = y }
random_line_split
variables.go
/******************************************************************************* * Copyright 2019 Dell Inc. * Copyright 2020 Intel Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. *******************************************************************************/ package environment import ( "fmt" "os" "reflect" "strconv" "strings" "github.com/edgexfoundry/go-mod-core-contracts/v2/clients/logger" "github.com/edgexfoundry/go-mod-core-contracts/v2/models" "github.com/edgexfoundry/go-mod-configuration/v2/pkg/types" "github.com/pelletier/go-toml" ) const ( bootTimeoutSecondsDefault = 60 bootRetrySecondsDefault = 1 defaultConfDirValue = "./res" envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER" envKeyUseRegistry = "EDGEX_USE_REGISTRY" envKeyStartupDuration = "EDGEX_STARTUP_DURATION" envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL" envConfDir = "EDGEX_CONF_DIR" envProfile = "EDGEX_PROFILE" envFile = "EDGEX_CONFIG_FILE" tomlPathSeparator = "." tomlNameSeparator = "-" envNameSeparator = "_" ) // Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field // overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to // // type foo struct { // bar struct { // baz string // } // } type Variables struct { variables map[string]string lc logger.LoggingClient } // NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods. func NewVariables(lc logger.LoggingClient) *Variables { osEnv := os.Environ() e := &Variables{ variables: make(map[string]string, len(osEnv)), lc: lc, } for _, env := range osEnv { // Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index() index := strings.Index(env, "=") if index == -1 { continue } key := env[:index] value := env[index+1:] e.variables[key] = value } return e } // UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used func (e *Variables) UseRegistry() (bool, bool) { value := os.Getenv(envKeyUseRegistry) if len(value) == 0 { return false, false } logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value) return value == "true", true } // OverrideConfiguration method replaces values in the configuration for matching Variables variable keys. // serviceConfig must be pointer to the service configuration. func (e *Variables) OverrideConfiguration(serviceConfig interface{}) (int, error) { var overrideCount = 0 contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface()) if err != nil { return 0, err } configTree, err := toml.LoadBytes(contents) if err != nil { return 0, err } // The toml.Tree API keys() only return to top level keys, rather that paths. // It is also missing a GetPaths so have to spin our own paths := e.buildPaths(configTree.ToMap()) // Now that we have all the paths in the config tree, we need to create map of corresponding override names that // could match override environment variable names. overrideNames := e.buildOverrideNames(paths) for envVar, envValue := range e.variables { path, found := overrideNames[envVar] if !found { continue } oldValue := configTree.Get(path) newValue, err := e.convertToType(oldValue, envValue) if err != nil { return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error()) } configTree.Set(path, newValue) overrideCount++ logEnvironmentOverride(e.lc, path, envVar, envValue) } // Put the configuration back into the services configuration struct with the overridden values err = configTree.Unmarshal(serviceConfig) if err != nil { return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error()) } return overrideCount, nil } // buildPaths create the path strings for all settings in the Config tree's key map func (e *Variables) buildPaths(keyMap map[string]interface{}) []string { var paths []string for key, item := range keyMap { if reflect.TypeOf(item).Kind() != reflect.Map { paths = append(paths, key) continue } subMap := item.(map[string]interface{}) subPaths := e.buildPaths(subMap) for _, path := range subPaths { paths = append(paths, fmt.Sprintf("%s.%s", key, path)) } } return paths } func (e *Variables) buildOverrideNames(paths []string) map[string]string { names := map[string]string{} for _, path := range paths { names[e.getOverrideNameFor(path)] = path } return names } func (_ *Variables) getOverrideNameFor(path string) string { // "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator) override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator) override = strings.ToUpper(override) return override } // OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values // from an Variables variable value (if it exists). func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error)
// convertToType attempts to convert the string value to the specified type of the old value func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) { switch oldValue.(type) { case []string: newValue = parseCommaSeparatedSlice(value) case []interface{}: newValue = parseCommaSeparatedSlice(value) case string: newValue = value case bool: newValue, err = strconv.ParseBool(value) case int: newValue, err = strconv.ParseInt(value, 10, strconv.IntSize) newValue = int(newValue.(int64)) case int8: newValue, err = strconv.ParseInt(value, 10, 8) newValue = int8(newValue.(int64)) case int16: newValue, err = strconv.ParseInt(value, 10, 16) newValue = int16(newValue.(int64)) case int32: newValue, err = strconv.ParseInt(value, 10, 32) newValue = int32(newValue.(int64)) case int64: newValue, err = strconv.ParseInt(value, 10, 64) case uint: newValue, err = strconv.ParseUint(value, 10, strconv.IntSize) newValue = uint(newValue.(uint64)) case uint8: newValue, err = strconv.ParseUint(value, 10, 8) newValue = uint8(newValue.(uint64)) case uint16: newValue, err = strconv.ParseUint(value, 10, 16) newValue = uint16(newValue.(uint64)) case uint32: newValue, err = strconv.ParseUint(value, 10, 32) newValue = uint32(newValue.(uint64)) case uint64: newValue, err = strconv.ParseUint(value, 10, 64) case float32: newValue, err = strconv.ParseFloat(value, 32) newValue = float32(newValue.(float64)) case float64: newValue, err = strconv.ParseFloat(value, 64) default: err = fmt.Errorf( "configuration type of '%s' is not supported for environment variable override", reflect.TypeOf(oldValue).String()) } return newValue, err } // StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot. type StartupInfo struct { Duration int Interval int } // GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists) // or uses the default values. func GetStartupInfo(serviceKey string) StartupInfo { // lc hasn't be created at the time this info is needed so have to create local client. lc := logger.NewClient(serviceKey, models.InfoLog) startup := StartupInfo{ Duration: bootTimeoutSecondsDefault, Interval: bootRetrySecondsDefault, } // Get the startup timer configuration from environment, if provided. value := os.Getenv(envKeyStartupDuration) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Duration = int(n) } } // Get the startup timer interval, if provided. value = os.Getenv(envKeyStartupInterval) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Interval = int(n) } } return startup } // GetConfDir get the config directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetConfDir(lc logger.LoggingClient, configDir string) string { envValue := os.Getenv(envConfDir) if len(envValue) > 0 { configDir = envValue logEnvironmentOverride(lc, "-c/-confdir", envConfDir, envValue) } if len(configDir) == 0 { configDir = defaultConfDirValue } return configDir } // GetProfileDir get the profile directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetProfileDir(lc logger.LoggingClient, profileDir string) string { envValue := os.Getenv(envProfile) if len(envValue) > 0 { profileDir = envValue logEnvironmentOverride(lc, "-p/-profile", envProfile, envValue) } if len(profileDir) > 0 { profileDir += "/" } return profileDir } // GetConfigFileName gets the configuration filename value from an Variables variable value (if it exists) // or uses passed in value. func GetConfigFileName(lc logger.LoggingClient, configFileName string) string { envValue := os.Getenv(envFile) if len(envValue) > 0 { configFileName = envValue logEnvironmentOverride(lc, "-f/-file", envFile, envValue) } return configFileName } // parseCommaSeparatedSlice converts comma separated list to a string slice func parseCommaSeparatedSlice(value string) (values []interface{}) { // Assumption is environment variable value is comma separated // Whitespace can vary so must be trimmed out result := strings.Split(strings.TrimSpace(value), ",") for _, entry := range result { values = append(values, strings.TrimSpace(entry)) } return values } // logEnvironmentOverride logs that an option or configuration has been override by an environment variable. func logEnvironmentOverride(lc logger.LoggingClient, name string, key string, value string) { lc.Info(fmt.Sprintf("Variables override of '%s' by environment variable: %s=%s", name, key, value)) }
{ url := os.Getenv(envKeyConfigUrl) if len(url) > 0 { logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url) if err := configProviderInfo.PopulateFromUrl(url); err != nil { return types.ServiceConfig{}, err } } return configProviderInfo, nil }
identifier_body
variables.go
/******************************************************************************* * Copyright 2019 Dell Inc. * Copyright 2020 Intel Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. *******************************************************************************/ package environment import ( "fmt" "os" "reflect" "strconv" "strings" "github.com/edgexfoundry/go-mod-core-contracts/v2/clients/logger" "github.com/edgexfoundry/go-mod-core-contracts/v2/models" "github.com/edgexfoundry/go-mod-configuration/v2/pkg/types" "github.com/pelletier/go-toml" ) const ( bootTimeoutSecondsDefault = 60 bootRetrySecondsDefault = 1 defaultConfDirValue = "./res" envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER" envKeyUseRegistry = "EDGEX_USE_REGISTRY" envKeyStartupDuration = "EDGEX_STARTUP_DURATION" envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL" envConfDir = "EDGEX_CONF_DIR" envProfile = "EDGEX_PROFILE" envFile = "EDGEX_CONFIG_FILE" tomlPathSeparator = "." tomlNameSeparator = "-" envNameSeparator = "_" ) // Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field // overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to // // type foo struct { // bar struct { // baz string // } // } type Variables struct { variables map[string]string lc logger.LoggingClient } // NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods. func NewVariables(lc logger.LoggingClient) *Variables { osEnv := os.Environ() e := &Variables{ variables: make(map[string]string, len(osEnv)), lc: lc, } for _, env := range osEnv { // Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index() index := strings.Index(env, "=") if index == -1 { continue } key := env[:index] value := env[index+1:] e.variables[key] = value } return e } // UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used func (e *Variables) UseRegistry() (bool, bool) { value := os.Getenv(envKeyUseRegistry) if len(value) == 0 { return false, false } logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value) return value == "true", true } // OverrideConfiguration method replaces values in the configuration for matching Variables variable keys. // serviceConfig must be pointer to the service configuration. func (e *Variables) OverrideConfiguration(serviceConfig interface{}) (int, error) { var overrideCount = 0 contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface()) if err != nil { return 0, err } configTree, err := toml.LoadBytes(contents) if err != nil
// The toml.Tree API keys() only return to top level keys, rather that paths. // It is also missing a GetPaths so have to spin our own paths := e.buildPaths(configTree.ToMap()) // Now that we have all the paths in the config tree, we need to create map of corresponding override names that // could match override environment variable names. overrideNames := e.buildOverrideNames(paths) for envVar, envValue := range e.variables { path, found := overrideNames[envVar] if !found { continue } oldValue := configTree.Get(path) newValue, err := e.convertToType(oldValue, envValue) if err != nil { return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error()) } configTree.Set(path, newValue) overrideCount++ logEnvironmentOverride(e.lc, path, envVar, envValue) } // Put the configuration back into the services configuration struct with the overridden values err = configTree.Unmarshal(serviceConfig) if err != nil { return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error()) } return overrideCount, nil } // buildPaths create the path strings for all settings in the Config tree's key map func (e *Variables) buildPaths(keyMap map[string]interface{}) []string { var paths []string for key, item := range keyMap { if reflect.TypeOf(item).Kind() != reflect.Map { paths = append(paths, key) continue } subMap := item.(map[string]interface{}) subPaths := e.buildPaths(subMap) for _, path := range subPaths { paths = append(paths, fmt.Sprintf("%s.%s", key, path)) } } return paths } func (e *Variables) buildOverrideNames(paths []string) map[string]string { names := map[string]string{} for _, path := range paths { names[e.getOverrideNameFor(path)] = path } return names } func (_ *Variables) getOverrideNameFor(path string) string { // "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator) override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator) override = strings.ToUpper(override) return override } // OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values // from an Variables variable value (if it exists). func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) { url := os.Getenv(envKeyConfigUrl) if len(url) > 0 { logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url) if err := configProviderInfo.PopulateFromUrl(url); err != nil { return types.ServiceConfig{}, err } } return configProviderInfo, nil } // convertToType attempts to convert the string value to the specified type of the old value func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) { switch oldValue.(type) { case []string: newValue = parseCommaSeparatedSlice(value) case []interface{}: newValue = parseCommaSeparatedSlice(value) case string: newValue = value case bool: newValue, err = strconv.ParseBool(value) case int: newValue, err = strconv.ParseInt(value, 10, strconv.IntSize) newValue = int(newValue.(int64)) case int8: newValue, err = strconv.ParseInt(value, 10, 8) newValue = int8(newValue.(int64)) case int16: newValue, err = strconv.ParseInt(value, 10, 16) newValue = int16(newValue.(int64)) case int32: newValue, err = strconv.ParseInt(value, 10, 32) newValue = int32(newValue.(int64)) case int64: newValue, err = strconv.ParseInt(value, 10, 64) case uint: newValue, err = strconv.ParseUint(value, 10, strconv.IntSize) newValue = uint(newValue.(uint64)) case uint8: newValue, err = strconv.ParseUint(value, 10, 8) newValue = uint8(newValue.(uint64)) case uint16: newValue, err = strconv.ParseUint(value, 10, 16) newValue = uint16(newValue.(uint64)) case uint32: newValue, err = strconv.ParseUint(value, 10, 32) newValue = uint32(newValue.(uint64)) case uint64: newValue, err = strconv.ParseUint(value, 10, 64) case float32: newValue, err = strconv.ParseFloat(value, 32) newValue = float32(newValue.(float64)) case float64: newValue, err = strconv.ParseFloat(value, 64) default: err = fmt.Errorf( "configuration type of '%s' is not supported for environment variable override", reflect.TypeOf(oldValue).String()) } return newValue, err } // StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot. type StartupInfo struct { Duration int Interval int } // GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists) // or uses the default values. func GetStartupInfo(serviceKey string) StartupInfo { // lc hasn't be created at the time this info is needed so have to create local client. lc := logger.NewClient(serviceKey, models.InfoLog) startup := StartupInfo{ Duration: bootTimeoutSecondsDefault, Interval: bootRetrySecondsDefault, } // Get the startup timer configuration from environment, if provided. value := os.Getenv(envKeyStartupDuration) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Duration = int(n) } } // Get the startup timer interval, if provided. value = os.Getenv(envKeyStartupInterval) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Interval = int(n) } } return startup } // GetConfDir get the config directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetConfDir(lc logger.LoggingClient, configDir string) string { envValue := os.Getenv(envConfDir) if len(envValue) > 0 { configDir = envValue logEnvironmentOverride(lc, "-c/-confdir", envConfDir, envValue) } if len(configDir) == 0 { configDir = defaultConfDirValue } return configDir } // GetProfileDir get the profile directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetProfileDir(lc logger.LoggingClient, profileDir string) string { envValue := os.Getenv(envProfile) if len(envValue) > 0 { profileDir = envValue logEnvironmentOverride(lc, "-p/-profile", envProfile, envValue) } if len(profileDir) > 0 { profileDir += "/" } return profileDir } // GetConfigFileName gets the configuration filename value from an Variables variable value (if it exists) // or uses passed in value. func GetConfigFileName(lc logger.LoggingClient, configFileName string) string { envValue := os.Getenv(envFile) if len(envValue) > 0 { configFileName = envValue logEnvironmentOverride(lc, "-f/-file", envFile, envValue) } return configFileName } // parseCommaSeparatedSlice converts comma separated list to a string slice func parseCommaSeparatedSlice(value string) (values []interface{}) { // Assumption is environment variable value is comma separated // Whitespace can vary so must be trimmed out result := strings.Split(strings.TrimSpace(value), ",") for _, entry := range result { values = append(values, strings.TrimSpace(entry)) } return values } // logEnvironmentOverride logs that an option or configuration has been override by an environment variable. func logEnvironmentOverride(lc logger.LoggingClient, name string, key string, value string) { lc.Info(fmt.Sprintf("Variables override of '%s' by environment variable: %s=%s", name, key, value)) }
{ return 0, err }
conditional_block
variables.go
/******************************************************************************* * Copyright 2019 Dell Inc. * Copyright 2020 Intel Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. *******************************************************************************/ package environment import ( "fmt" "os" "reflect" "strconv" "strings" "github.com/edgexfoundry/go-mod-core-contracts/v2/clients/logger" "github.com/edgexfoundry/go-mod-core-contracts/v2/models" "github.com/edgexfoundry/go-mod-configuration/v2/pkg/types" "github.com/pelletier/go-toml" ) const ( bootTimeoutSecondsDefault = 60 bootRetrySecondsDefault = 1 defaultConfDirValue = "./res" envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER" envKeyUseRegistry = "EDGEX_USE_REGISTRY" envKeyStartupDuration = "EDGEX_STARTUP_DURATION" envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL" envConfDir = "EDGEX_CONF_DIR" envProfile = "EDGEX_PROFILE" envFile = "EDGEX_CONFIG_FILE" tomlPathSeparator = "." tomlNameSeparator = "-" envNameSeparator = "_" ) // Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field // overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to // // type foo struct { // bar struct { // baz string // } // } type Variables struct { variables map[string]string lc logger.LoggingClient } // NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods. func NewVariables(lc logger.LoggingClient) *Variables { osEnv := os.Environ() e := &Variables{ variables: make(map[string]string, len(osEnv)), lc: lc, } for _, env := range osEnv { // Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index() index := strings.Index(env, "=") if index == -1 { continue } key := env[:index] value := env[index+1:] e.variables[key] = value } return e } // UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used func (e *Variables) UseRegistry() (bool, bool) { value := os.Getenv(envKeyUseRegistry) if len(value) == 0 { return false, false } logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value) return value == "true", true } // OverrideConfiguration method replaces values in the configuration for matching Variables variable keys. // serviceConfig must be pointer to the service configuration. func (e *Variables) OverrideConfiguration(serviceConfig interface{}) (int, error) { var overrideCount = 0 contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface()) if err != nil { return 0, err } configTree, err := toml.LoadBytes(contents) if err != nil { return 0, err } // The toml.Tree API keys() only return to top level keys, rather that paths. // It is also missing a GetPaths so have to spin our own paths := e.buildPaths(configTree.ToMap()) // Now that we have all the paths in the config tree, we need to create map of corresponding override names that // could match override environment variable names. overrideNames := e.buildOverrideNames(paths) for envVar, envValue := range e.variables { path, found := overrideNames[envVar] if !found { continue } oldValue := configTree.Get(path) newValue, err := e.convertToType(oldValue, envValue) if err != nil { return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error()) } configTree.Set(path, newValue) overrideCount++ logEnvironmentOverride(e.lc, path, envVar, envValue) } // Put the configuration back into the services configuration struct with the overridden values err = configTree.Unmarshal(serviceConfig) if err != nil { return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error()) } return overrideCount, nil } // buildPaths create the path strings for all settings in the Config tree's key map func (e *Variables) buildPaths(keyMap map[string]interface{}) []string { var paths []string for key, item := range keyMap { if reflect.TypeOf(item).Kind() != reflect.Map { paths = append(paths, key) continue } subMap := item.(map[string]interface{}) subPaths := e.buildPaths(subMap) for _, path := range subPaths { paths = append(paths, fmt.Sprintf("%s.%s", key, path)) } } return paths } func (e *Variables) buildOverrideNames(paths []string) map[string]string { names := map[string]string{} for _, path := range paths { names[e.getOverrideNameFor(path)] = path
} return names } func (_ *Variables) getOverrideNameFor(path string) string { // "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator) override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator) override = strings.ToUpper(override) return override } // OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values // from an Variables variable value (if it exists). func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) { url := os.Getenv(envKeyConfigUrl) if len(url) > 0 { logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url) if err := configProviderInfo.PopulateFromUrl(url); err != nil { return types.ServiceConfig{}, err } } return configProviderInfo, nil } // convertToType attempts to convert the string value to the specified type of the old value func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) { switch oldValue.(type) { case []string: newValue = parseCommaSeparatedSlice(value) case []interface{}: newValue = parseCommaSeparatedSlice(value) case string: newValue = value case bool: newValue, err = strconv.ParseBool(value) case int: newValue, err = strconv.ParseInt(value, 10, strconv.IntSize) newValue = int(newValue.(int64)) case int8: newValue, err = strconv.ParseInt(value, 10, 8) newValue = int8(newValue.(int64)) case int16: newValue, err = strconv.ParseInt(value, 10, 16) newValue = int16(newValue.(int64)) case int32: newValue, err = strconv.ParseInt(value, 10, 32) newValue = int32(newValue.(int64)) case int64: newValue, err = strconv.ParseInt(value, 10, 64) case uint: newValue, err = strconv.ParseUint(value, 10, strconv.IntSize) newValue = uint(newValue.(uint64)) case uint8: newValue, err = strconv.ParseUint(value, 10, 8) newValue = uint8(newValue.(uint64)) case uint16: newValue, err = strconv.ParseUint(value, 10, 16) newValue = uint16(newValue.(uint64)) case uint32: newValue, err = strconv.ParseUint(value, 10, 32) newValue = uint32(newValue.(uint64)) case uint64: newValue, err = strconv.ParseUint(value, 10, 64) case float32: newValue, err = strconv.ParseFloat(value, 32) newValue = float32(newValue.(float64)) case float64: newValue, err = strconv.ParseFloat(value, 64) default: err = fmt.Errorf( "configuration type of '%s' is not supported for environment variable override", reflect.TypeOf(oldValue).String()) } return newValue, err } // StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot. type StartupInfo struct { Duration int Interval int } // GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists) // or uses the default values. func GetStartupInfo(serviceKey string) StartupInfo { // lc hasn't be created at the time this info is needed so have to create local client. lc := logger.NewClient(serviceKey, models.InfoLog) startup := StartupInfo{ Duration: bootTimeoutSecondsDefault, Interval: bootRetrySecondsDefault, } // Get the startup timer configuration from environment, if provided. value := os.Getenv(envKeyStartupDuration) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Duration = int(n) } } // Get the startup timer interval, if provided. value = os.Getenv(envKeyStartupInterval) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Interval = int(n) } } return startup } // GetConfDir get the config directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetConfDir(lc logger.LoggingClient, configDir string) string { envValue := os.Getenv(envConfDir) if len(envValue) > 0 { configDir = envValue logEnvironmentOverride(lc, "-c/-confdir", envConfDir, envValue) } if len(configDir) == 0 { configDir = defaultConfDirValue } return configDir } // GetProfileDir get the profile directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetProfileDir(lc logger.LoggingClient, profileDir string) string { envValue := os.Getenv(envProfile) if len(envValue) > 0 { profileDir = envValue logEnvironmentOverride(lc, "-p/-profile", envProfile, envValue) } if len(profileDir) > 0 { profileDir += "/" } return profileDir } // GetConfigFileName gets the configuration filename value from an Variables variable value (if it exists) // or uses passed in value. func GetConfigFileName(lc logger.LoggingClient, configFileName string) string { envValue := os.Getenv(envFile) if len(envValue) > 0 { configFileName = envValue logEnvironmentOverride(lc, "-f/-file", envFile, envValue) } return configFileName } // parseCommaSeparatedSlice converts comma separated list to a string slice func parseCommaSeparatedSlice(value string) (values []interface{}) { // Assumption is environment variable value is comma separated // Whitespace can vary so must be trimmed out result := strings.Split(strings.TrimSpace(value), ",") for _, entry := range result { values = append(values, strings.TrimSpace(entry)) } return values } // logEnvironmentOverride logs that an option or configuration has been override by an environment variable. func logEnvironmentOverride(lc logger.LoggingClient, name string, key string, value string) { lc.Info(fmt.Sprintf("Variables override of '%s' by environment variable: %s=%s", name, key, value)) }
random_line_split