file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ethereum-block.ts | import {toBigIntBE, toBufferBE} from 'bigint-buffer';
import {RlpEncode, RlpList} from 'rlp-stream';
import * as secp256k1 from 'secp256k1';
declare var process: {browser: boolean;};
const keccak = require('keccak');
interface NativeInterface {
recoverFromAddress(verifyBlock: Buffer, signature: Buffer, recovery: boolean):
Promise<bigint>;
getPublicAddress(privateKey: bigint): Promise<bigint>;
signTransaction(
transaction: Buffer, privateKey: bigint, chainId: number,
transactionRlp: RlpList): RlpList;
}
let native: NativeInterface;
if (!process.browser) {
try {
native = require('bindings')('block_native');
} catch (e) {
console.log(e);
console.warn(
'Native bindings loading failed, using pure JS implementation');
}
}
/** A deserialized Ethereum block. */
export interface EthereumBlock {
/** The header for the Ethereum block. */
header: EthereumHeader;
/** The transaction list for the Ethereum block. */
transactions: EthereumTransaction[];
/** A list of headers for uncles. */
uncles: EthereumHeader[];
}
export interface EthereumBlockDecoderOptions {
/** For a EIP-155 transaction, which chain to use to replace v. */
chainId: number;
/**
* For decoding a block, which block number EIP-155 semantics automatically
* applies.
*/
eip155Block: bigint;
/**
* For decoding a transaction, whether or not to use EIP-155 semantics to
* decode the transaction.
*/
eip155: boolean;
/**
* If available, use native bindings to do transaction processing.
*/
native: boolean;
}
const defaultOptions: EthereumBlockDecoderOptions = {
chainId: 1,
eip155Block: BigInt(2675000),
eip155: false,
native: true
};
export const CONTRACT_CREATION: bigint = BigInt(-1);
/** A header for an Ethereum block. */
export interface EthereumHeader {
/** The Keccak 256-bit hash of the parent block’s header, in its entirety. */
parentHash: bigint;
/** The Keccak 256-bit hash of the ommers list portion of this block. */
uncleHash: bigint;
/**
* The 160-bit address to which all fees collected from the successful mining
* of this block be transferred.
*/
beneficiary: bigint;
/**
* The Keccak 256-bit hash of the root node of the state trie, after all
* transactions are executed and finalisations applied.
*/
stateRoot: bigint;
/**
* The Keccak 256-bit hash of the root node of the trie structure populated
* with each transaction in the transactions list portion of the block.
*/
transactionsRoot: bigint;
/**
* The Keccak 256-bit hash of the root node of the trie structure populated
* with the receipts of each transaction in the transactions list portion of
* the block.
*/
receiptsRoot: bigint;
/**
* The Bloom filter composed from indexable information (logger address and
* log topics) contained in each log entry from the receipt of each
* transaction in the transactions list.
*/
logsBloom: Buffer;
/**
* A scalar value corresponding to the difficulty level of this block. This
* can be calculated from the previous block’s difficulty level and the
* timestamp.
*/
difficulty: bigint;
/**
* A scalar value equal to the number of ancestor blocks. The genesis block
* has a number of zero.
*/
blockNumber: bigint;
/**
* A scalar value equal to the current limit of gas expenditure per block.
*/
gasLimit: bigint;
/**
* A scalar value equal to the total gas used in transactions in this block.
*/
gasUsed: bigint;
/**
* A scalar value equal to the reasonable output of Unix’s time() at this
* block’s inception.
*/
timestamp: bigint;
/**
* An arbitrary byte array containing data relevant to this block. This must
* be 32 bytes or fewer.
*/
extraData: Buffer;
/**
* A 256-bit hash which proves combined with the nonce that a sufficient
* amount of computation has been carried out on this block.
*/
mixHash: bigint;
/**
* A 64-bit hash which proves combined with the mix-hash that a sufficient
* amount of computation has been carried out on this block.
*/
nonce: bigint;
}
/** The data stored in a block for a signed Ethereum transaction */
export interface EthereumTransaction {
/**
* A scalar value equal to the number of transactions sent from this address
* or, in the case of accounts with associated code, the number of
* contract-creations made by this account.
*/
nonce: bigint;
/**
* A scalar value equal to the number of Wei to be paid per unit of gas for
* all computation costs incurred as a result of the execution of this
* transaction.
*/
gasPrice: bigint;
/**
* A scalar value equal to the maximum amount of gas that should be used in
* executing this transaction.
*/
gasLimit: bigint;
/**
* A scalar value equal to the number of Wei to be transferred to the message
* call’s recipient or, in the case of contract creation, as an endowment to
* the newly created account.
*/
value: bigint;
/**
* The 160-bit address of the message call’s recipient or, for a contract
* creation transaction, CONTRACT_CREATION (-1), to distinguish against
* account 0x0000000000000000000000000000000000000000.
*/
to: bigint;
/**
* An unlimited size byte array specifying the EVM-code for the account
* initialisation procedure, for a contract transaction, or an unlimited size
* byte array specifying the input data of the message call, for a message
* call.
*/
data: Buffer;
/** The 160-bit address of the message caller. */
from: bigint;
}
export class EthereumBlockDecoderError extends Error {
constructor(message: string) {
super(message);
}
}
const HEADER_PARENT_HASH = 0;
const HEADER_UNCLE_HASH = 1;
const HEADER_BENEFICIARY = 2;
const HEADER_STATE_ROOT = 3;
const HEADER_TRANSACTIONS_ROOT = 4;
const HEADER_RECEIPTS_ROOT = 5;
const HEADER_LOGSBLOOM = 6;
const HEADER_DIFFICULTY = 7;
const HEADER_BLOCK_NUMBER = 8;
const HEADER_GAS_LIMIT = 9;
const HEADER_GAS_USED = 10;
const HEADER_TIMESTAMP = 11;
const HEADER_EXTRADATA = 12;
const HEADER_MIXHASH = 13;
const HEADER_NONCE = 14;
/**
* Given a RLP-serialized list with an Ethereum header, decodes the list and
* validates the Ethereum header.
*
* @param header The RLP-encoded list with the header to decode.
*
* @returns A validated and decoded EthereumHeader.
*/
export function decodeHeader(header: RlpList): EthereumHeader {
if (!Array.isArray(header)) {
throw new EthereumBlockDecoderError(
`Expected block header as RLP-encoded list!`);
}
return {
parentHash: toBigIntBE(header[HEADER_PARENT_HASH] as Buffer),
uncleHash: toBigIntBE(header[HEADER_UNCLE_HASH] as Buffer),
beneficiary: toBigIntBE(header[HEADER_BENEFICIARY] as Buffer),
stateRoot: toBigIntBE(header[HEADER_STATE_ROOT] as Buffer),
transactionsRoot: toBigIntBE(header[HEADER_TRANSACTIONS_ROOT] as Buffer),
receiptsRoot: toBigIntBE(header[HEADER_RECEIPTS_ROOT] as Buffer),
logsBloom: header[HEADER_LOGSBLOOM] as Buffer,
difficulty: toBigIntBE(header[HEADER_DIFFICULTY] as Buffer),
blockNumber: toBigIntBE(header[HEADER_BLOCK_NUMBER] as Buffer),
gasLimit: toBigIntBE(header[HEADER_GAS_LIMIT] as Buffer),
gasUsed: toBigIntBE(header[HEADER_GAS_USED] as Buffer),
timestamp: toBigIntBE(header[HEADER_TIMESTAMP] as Buffer),
extraData: (header[HEADER_EXTRADATA] as Buffer),
mixHash: toBigIntBE(header[HEADER_MIXHASH] as Buffer),
nonce: toBigIntBE(header[HEADER_NONCE] as Buffer)
};
}
const TRANSACTION_NONCE = 0;
const TRANSACTION_GASPRICE = 1;
const TRANSACTION_STARTGAS = 2;
const TRANSACTION_TO = 3;
const TRANSACTION_VALUE = 4;
const TRANSACTION_DATA = 5;
const TRANSACTION_V = 6;
const TRANSACTION_R = 7;
const TRANSACTION_S = 8;
/**
* Given a RLP-serialized list with an Ethereum transaction, decodes the list
* and validates the Ethereum transaction.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeTransaction(
transaction: RlpList,
options: EthereumBlockDecoderOptions =
defaultOptions): Promise<EthereumTransaction> {
const v = transaction[TRANSACTION_V] as Buffer;
const r = transaction[TRANSACTION_R] as Buffer;
const s = transaction[TRANSACTION_S] as Buffer;
if (r.length > 32) {
throw new Error(`r > 32 bytes!`);
}
if (s.length > 32) {
throw new Error(`s > 32 bytes!`);
}
const signature = Buffer.alloc(64, 0);
r.copy(signature, 32 - r.length);
s.copy(signature, 64 - s.length);
const chainV = options.chainId * 2 + 35;
const verifySignature =
options.eip155 ? v[0] === chainV || v[0] === chainV + 1 : false;
const recovery =
verifySignature ? v[0] - (options.chainId * 2 + 8) - 27 : v[0] - 27;
if (recovery !== 0 && recovery !== 1) {
throw new EthereumBlockDecoderError(
`Invalid infinite recovery = ${recovery}`);
}
// TODO: Get existing buffer from stream instead of regenerating it.
const toHash = verifySignature ?
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer),
Buffer.from([options.chainId]),
Buffer.from([]),
Buffer.from([]),
]) :
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer)
]);
let from: bigint;
if (process.browser || native === undefined || !options.native) {
const hash = keccak('keccak256').update(toHash).digest();
// Recover and decompress the public key
const pubKey = secp256k1.recover(hash, signature, recovery, false).slice(1);
if (pubKey.length !== 64) {
throw new EthereumBlockDecoderError(
`Incorrect public key length ${pubKey.length}`);
}
from = toBigIntBE(keccak('keccak256').update(pubKey).digest().slice(-20));
if (from === undefined) {
throw new EthereumBlockDecoderError(`Failed to get from account`);
}
} else {
from = await native.recoverFromAddress(toHash, signature, recovery === 1);
}
const toBuffer = transaction[TRANSACTION_TO] as Buffer;
return {
nonce: toBigIntBE(transaction[TRANSACTION_NONCE] as Buffer),
gasPrice: toBigIntBE(transaction[TRANSACTION_GASPRICE] as Buffer),
gasLimit: toBigIntBE(transaction[TRANSACTION_STARTGAS] as Buffer),
to: toBuffer.length === 0 ? BigInt(-1) : toBigIntBE(toBuffer),
value: toBigIntBE(transaction[TRANSACTION_VALUE] as Buffer),
data: transaction[TRANSACTION_DATA] as Buffer,
from
};
}
/**
* Given a RLP-serialized list with an Ethereum block, decodes the list and
* validates the Ethereum block.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeBlock(
rlp: RlpList, options: EthereumBlockDecoderOptions = defaultOptions):
Promise<EthereumBlock> {
// Each incoming block should be an RLP list.
if (!Array.isArray(rlp)) {
throw new EthereumBlockDecoderError(`Expected RLP-encoded list!`);
}
// The RlpList should have 3 parts: the header, the transaction list and the
// uncle list.
const header: EthereumHeader = decodeHeader(rlp[0] as RlpList);
if (header.blockNumber >= defaultOptions.eip155Block) {
defaultOptions.eip155 = true;
}
const transactionPromises: Array<Promise<EthereumTransaction>> =
(rlp[1] as RlpList).map(tx => decodeTransaction(tx as RlpList, options));
const transactions: EthereumTransaction[] =
await Promise.all(transactionPromises);
const uncles: EthereumHeader[] =
(rlp[2] as RlpList).map(buf => decodeHeader(buf as RlpList));
return {header, transactions, uncles} as EthereumBlock;
}
/**
* Remove leading null bytes from a buffer.
*
* @param buf Buffer to remove null bytes from
*
* @returns A slice of the buffer without null bytes.
*/
function removeNullPrefix(buf: Buffer): Buffer {
for (let i = 0; i < buf.length; i++) {
if (buf[i] !== 0) {
return buf.slice(i);
}
}
return Buffer.from([]);
}
/**
* Encodes an Ethereum header as a RLP list
*
* @param header The Ethreum header to encode.
*
* @return A RlpList with the encoded Ethereum header.
*/
export function encodeHeaderAsRLP(header: EthereumHeader): RlpList {
const asRlpList: RlpList = [];
asRlpList[HEADER_PARENT_HASH] = toBufferBE(header.parentHash, 32);
asRlpList[HEADER_UNCLE_HASH] = toBufferBE(header.uncleHash, 32);
asRlpList[HEADER_BENEFICIARY] = toBufferBE(header.beneficiary, 20);
asRlpList[HEADER_STATE_ROOT] = toBufferBE(header.stateRoot, 32);
asRlpList[HEADER_TRANSACTIONS_ROOT] = toBufferBE(header.transactionsRoot, 32);
asRlpList[HEADER_RECEIPTS_ROOT] = toBufferBE(header.receiptsRoot, 32);
asRlpList[HEADER_LOGSBLOOM] = header.logsBloom;
asRlpList[HEADER_DIFFICULTY] =
removeNullPrefix(toBufferBE(header.difficulty, 32));
asRlpList[HEADER_BLOCK_NUMBER] =
removeNullPrefix(toBufferBE(header.blockNumber, 32));
asRlpList[HEADER_GAS_LIMIT] =
removeNullPrefix(toBufferBE(header.gasLimit, 32));
asRlpList[HEADER_GAS_USED] = removeNullPrefix(toBufferBE(header.gasUsed, 32));
asRlpList[HEADER_TIMESTAMP] =
removeNullPrefix(toBufferBE(header.timestamp, 32));
asRlpList[HEADER_EXTRADATA] = header.extraData;
asRlpList[HEADER_MIXHASH] = toBufferBE(header.mixHash, 32);
asRlpList[HEADER_NONCE] = toBufferBE(header.nonce, 8);
return asRlpList;
}
/**
* Encodes a new block. Transactions must be encoded and signed as a RLPList
*
* @param header The Ethreum header to encode.
* @param transactions Encoded, signed transactions to include
* @param uncleList A list of uncles to include
*
* @return A new RLP encoded Ethereum block.
*/
export function encodeBlock( | : EthereumHeader, transactions: RlpList,
uncleList: EthereumHeader[]): Buffer {
const asRlpList: RlpList = [
encodeHeaderAsRLP(header), transactions,
uncleList.map(uncle => encodeHeaderAsRLP(uncle))
];
return RlpEncode(asRlpList);
}
/**
* Get the public address of a given private key.
*
* @param privateKey The private key to obtain an address for. It should be a
* 256-bit bigint which cannot be 0.
* @param useNativeIfAvailable Set to false to force fallback to js-only code.
*
* @return The public address for the given private key.
*/
export function getPublicAddress(
privateKey: bigint, useNativeIfAvailable = true) {
if (process.browser || native === undefined || !useNativeIfAvailable) {
// Public address is last 20 bytes of the hashed public key (bytes 1-65)
const pubKey = secp256k1.publicKeyCreate(toBufferBE(privateKey, 32), false);
const hashed = toBigIntBE(
keccak('keccak256').update(pubKey.slice(1)).digest().slice(-20));
return hashed;
}
return native.getPublicAddress(privateKey);
}
/**
* Sign an [EthereumTransaction] using a private key.
*
* @param transaction The transaction to sign. The from field, if present, is
* ignored (it will be derived from the private key)
* @param privateKey The private key to sign the transaction with.
* @param chainId The chain id to use. 0=pre EIP-155 semantics. 1=mainnet.
* @param useNativeIfAvailable Set to false to force fallback to js-only code.
*
* @return A [RlpList] representing the transaction. Run this list through
* RlpEncode to obtain a [Buffer].
*/
export function signTransaction(
transaction: EthereumTransaction, privateKey: bigint, chainId = 1,
useNativeIfAvailable = true) {
const rlpList: RlpList = [
removeNullPrefix(toBufferBE(transaction.nonce, 32)),
removeNullPrefix(toBufferBE(transaction.gasPrice, 32)),
removeNullPrefix(toBufferBE(transaction.gasLimit, 32)),
transaction.to === CONTRACT_CREATION ? Buffer.from([]) :
toBufferBE(transaction.to, 20),
removeNullPrefix(toBufferBE(transaction.value, 32)), transaction.data
];
// EIP-155 transaction
if (chainId !== 0) {
rlpList[TRANSACTION_V] = Buffer.from([chainId]);
rlpList[TRANSACTION_R] = Buffer.from([]);
rlpList[TRANSACTION_S] = Buffer.from([]);
}
const toHash = RlpEncode(rlpList);
if (process.browser || native === undefined || !useNativeIfAvailable) {
const hash = keccak('keccak256').update(toHash).digest();
const signature = secp256k1.sign(hash, toBufferBE(privateKey, 32));
rlpList[TRANSACTION_R] = removeNullPrefix(signature.signature.slice(0, 32));
rlpList[TRANSACTION_S] =
removeNullPrefix(signature.signature.slice(32, 64));
rlpList[TRANSACTION_V] = Buffer.from(
[chainId > 0 ? signature.recovery + (chainId * 2 + 35) :
signature.recovery + 27]);
return rlpList;
} else {
const ret = native.signTransaction(toHash, privateKey, chainId, rlpList);
ret[TRANSACTION_R] = removeNullPrefix(ret[TRANSACTION_R] as Buffer);
ret[TRANSACTION_S] = removeNullPrefix(ret[TRANSACTION_S] as Buffer);
return ret;
}
} |
header | identifier_name |
ethereum-block.ts | import {toBigIntBE, toBufferBE} from 'bigint-buffer';
import {RlpEncode, RlpList} from 'rlp-stream';
import * as secp256k1 from 'secp256k1';
declare var process: {browser: boolean;};
const keccak = require('keccak');
interface NativeInterface {
recoverFromAddress(verifyBlock: Buffer, signature: Buffer, recovery: boolean):
Promise<bigint>;
getPublicAddress(privateKey: bigint): Promise<bigint>;
signTransaction(
transaction: Buffer, privateKey: bigint, chainId: number,
transactionRlp: RlpList): RlpList;
}
let native: NativeInterface;
if (!process.browser) {
try {
native = require('bindings')('block_native');
} catch (e) {
console.log(e);
console.warn(
'Native bindings loading failed, using pure JS implementation');
}
}
/** A deserialized Ethereum block. */
export interface EthereumBlock {
/** The header for the Ethereum block. */
header: EthereumHeader;
/** The transaction list for the Ethereum block. */
transactions: EthereumTransaction[];
/** A list of headers for uncles. */
uncles: EthereumHeader[];
}
export interface EthereumBlockDecoderOptions {
/** For a EIP-155 transaction, which chain to use to replace v. */
chainId: number;
/**
* For decoding a block, which block number EIP-155 semantics automatically
* applies.
*/
eip155Block: bigint;
/**
* For decoding a transaction, whether or not to use EIP-155 semantics to
* decode the transaction.
*/
eip155: boolean;
/**
* If available, use native bindings to do transaction processing.
*/
native: boolean;
}
const defaultOptions: EthereumBlockDecoderOptions = {
chainId: 1,
eip155Block: BigInt(2675000),
eip155: false,
native: true
};
export const CONTRACT_CREATION: bigint = BigInt(-1);
/** A header for an Ethereum block. */
export interface EthereumHeader {
/** The Keccak 256-bit hash of the parent block’s header, in its entirety. */
parentHash: bigint;
/** The Keccak 256-bit hash of the ommers list portion of this block. */
uncleHash: bigint;
/**
* The 160-bit address to which all fees collected from the successful mining
* of this block be transferred.
*/
beneficiary: bigint;
/**
* The Keccak 256-bit hash of the root node of the state trie, after all
* transactions are executed and finalisations applied.
*/
stateRoot: bigint;
/**
* The Keccak 256-bit hash of the root node of the trie structure populated
* with each transaction in the transactions list portion of the block.
*/
transactionsRoot: bigint;
/**
* The Keccak 256-bit hash of the root node of the trie structure populated
* with the receipts of each transaction in the transactions list portion of
* the block.
*/
receiptsRoot: bigint;
/**
* The Bloom filter composed from indexable information (logger address and
* log topics) contained in each log entry from the receipt of each
* transaction in the transactions list.
*/
logsBloom: Buffer;
/**
* A scalar value corresponding to the difficulty level of this block. This
* can be calculated from the previous block’s difficulty level and the
* timestamp.
*/
difficulty: bigint;
/**
* A scalar value equal to the number of ancestor blocks. The genesis block
* has a number of zero.
*/
blockNumber: bigint;
/**
* A scalar value equal to the current limit of gas expenditure per block.
*/
gasLimit: bigint;
/**
* A scalar value equal to the total gas used in transactions in this block.
*/
gasUsed: bigint;
/**
* A scalar value equal to the reasonable output of Unix’s time() at this
* block’s inception.
*/
timestamp: bigint;
/**
* An arbitrary byte array containing data relevant to this block. This must
* be 32 bytes or fewer.
*/
extraData: Buffer;
/**
* A 256-bit hash which proves combined with the nonce that a sufficient
* amount of computation has been carried out on this block.
*/
mixHash: bigint;
/**
* A 64-bit hash which proves combined with the mix-hash that a sufficient
* amount of computation has been carried out on this block.
*/
nonce: bigint;
}
/** The data stored in a block for a signed Ethereum transaction */
export interface EthereumTransaction {
/**
* A scalar value equal to the number of transactions sent from this address
* or, in the case of accounts with associated code, the number of
* contract-creations made by this account.
*/
nonce: bigint;
/**
* A scalar value equal to the number of Wei to be paid per unit of gas for
* all computation costs incurred as a result of the execution of this
* transaction.
*/
gasPrice: bigint;
/**
* A scalar value equal to the maximum amount of gas that should be used in
* executing this transaction.
*/
gasLimit: bigint;
/**
* A scalar value equal to the number of Wei to be transferred to the message
* call’s recipient or, in the case of contract creation, as an endowment to
* the newly created account.
*/
value: bigint;
/**
* The 160-bit address of the message call’s recipient or, for a contract
* creation transaction, CONTRACT_CREATION (-1), to distinguish against
* account 0x0000000000000000000000000000000000000000.
*/
to: bigint;
/**
* An unlimited size byte array specifying the EVM-code for the account
* initialisation procedure, for a contract transaction, or an unlimited size
* byte array specifying the input data of the message call, for a message
* call.
*/
data: Buffer;
/** The 160-bit address of the message caller. */
from: bigint;
}
export class EthereumBlockDecoderError extends Error {
constructor(message: string) {
super(message);
}
}
const HEADER_PARENT_HASH = 0;
const HEADER_UNCLE_HASH = 1;
const HEADER_BENEFICIARY = 2;
const HEADER_STATE_ROOT = 3;
const HEADER_TRANSACTIONS_ROOT = 4;
const HEADER_RECEIPTS_ROOT = 5;
const HEADER_LOGSBLOOM = 6;
const HEADER_DIFFICULTY = 7;
const HEADER_BLOCK_NUMBER = 8;
const HEADER_GAS_LIMIT = 9;
const HEADER_GAS_USED = 10;
const HEADER_TIMESTAMP = 11;
const HEADER_EXTRADATA = 12;
const HEADER_MIXHASH = 13;
const HEADER_NONCE = 14;
/**
* Given a RLP-serialized list with an Ethereum header, decodes the list and
* validates the Ethereum header.
*
* @param header The RLP-encoded list with the header to decode.
*
* @returns A validated and decoded EthereumHeader.
*/
export function decodeHeader(header: RlpList): EthereumHeader {
if (!Array.isArray(header)) {
throw new EthereumBlockDecoderError(
`Expected block header as RLP-encoded list!`);
}
return {
parentHash: toBigIntBE(header[HEADER_PARENT_HASH] as Buffer),
uncleHash: toBigIntBE(header[HEADER_UNCLE_HASH] as Buffer),
beneficiary: toBigIntBE(header[HEADER_BENEFICIARY] as Buffer),
stateRoot: toBigIntBE(header[HEADER_STATE_ROOT] as Buffer),
transactionsRoot: toBigIntBE(header[HEADER_TRANSACTIONS_ROOT] as Buffer),
receiptsRoot: toBigIntBE(header[HEADER_RECEIPTS_ROOT] as Buffer),
logsBloom: header[HEADER_LOGSBLOOM] as Buffer,
difficulty: toBigIntBE(header[HEADER_DIFFICULTY] as Buffer),
blockNumber: toBigIntBE(header[HEADER_BLOCK_NUMBER] as Buffer),
gasLimit: toBigIntBE(header[HEADER_GAS_LIMIT] as Buffer),
gasUsed: toBigIntBE(header[HEADER_GAS_USED] as Buffer),
timestamp: toBigIntBE(header[HEADER_TIMESTAMP] as Buffer), |
const TRANSACTION_NONCE = 0;
const TRANSACTION_GASPRICE = 1;
const TRANSACTION_STARTGAS = 2;
const TRANSACTION_TO = 3;
const TRANSACTION_VALUE = 4;
const TRANSACTION_DATA = 5;
const TRANSACTION_V = 6;
const TRANSACTION_R = 7;
const TRANSACTION_S = 8;
/**
* Given a RLP-serialized list with an Ethereum transaction, decodes the list
* and validates the Ethereum transaction.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeTransaction(
transaction: RlpList,
options: EthereumBlockDecoderOptions =
defaultOptions): Promise<EthereumTransaction> {
const v = transaction[TRANSACTION_V] as Buffer;
const r = transaction[TRANSACTION_R] as Buffer;
const s = transaction[TRANSACTION_S] as Buffer;
if (r.length > 32) {
throw new Error(`r > 32 bytes!`);
}
if (s.length > 32) {
throw new Error(`s > 32 bytes!`);
}
const signature = Buffer.alloc(64, 0);
r.copy(signature, 32 - r.length);
s.copy(signature, 64 - s.length);
const chainV = options.chainId * 2 + 35;
const verifySignature =
options.eip155 ? v[0] === chainV || v[0] === chainV + 1 : false;
const recovery =
verifySignature ? v[0] - (options.chainId * 2 + 8) - 27 : v[0] - 27;
if (recovery !== 0 && recovery !== 1) {
throw new EthereumBlockDecoderError(
`Invalid infinite recovery = ${recovery}`);
}
// TODO: Get existing buffer from stream instead of regenerating it.
const toHash = verifySignature ?
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer),
Buffer.from([options.chainId]),
Buffer.from([]),
Buffer.from([]),
]) :
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer)
]);
let from: bigint;
if (process.browser || native === undefined || !options.native) {
const hash = keccak('keccak256').update(toHash).digest();
// Recover and decompress the public key
const pubKey = secp256k1.recover(hash, signature, recovery, false).slice(1);
if (pubKey.length !== 64) {
throw new EthereumBlockDecoderError(
`Incorrect public key length ${pubKey.length}`);
}
from = toBigIntBE(keccak('keccak256').update(pubKey).digest().slice(-20));
if (from === undefined) {
throw new EthereumBlockDecoderError(`Failed to get from account`);
}
} else {
from = await native.recoverFromAddress(toHash, signature, recovery === 1);
}
const toBuffer = transaction[TRANSACTION_TO] as Buffer;
return {
nonce: toBigIntBE(transaction[TRANSACTION_NONCE] as Buffer),
gasPrice: toBigIntBE(transaction[TRANSACTION_GASPRICE] as Buffer),
gasLimit: toBigIntBE(transaction[TRANSACTION_STARTGAS] as Buffer),
to: toBuffer.length === 0 ? BigInt(-1) : toBigIntBE(toBuffer),
value: toBigIntBE(transaction[TRANSACTION_VALUE] as Buffer),
data: transaction[TRANSACTION_DATA] as Buffer,
from
};
}
/**
* Given a RLP-serialized list with an Ethereum block, decodes the list and
* validates the Ethereum block.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeBlock(
rlp: RlpList, options: EthereumBlockDecoderOptions = defaultOptions):
Promise<EthereumBlock> {
// Each incoming block should be an RLP list.
if (!Array.isArray(rlp)) {
throw new EthereumBlockDecoderError(`Expected RLP-encoded list!`);
}
// The RlpList should have 3 parts: the header, the transaction list and the
// uncle list.
const header: EthereumHeader = decodeHeader(rlp[0] as RlpList);
if (header.blockNumber >= defaultOptions.eip155Block) {
defaultOptions.eip155 = true;
}
const transactionPromises: Array<Promise<EthereumTransaction>> =
(rlp[1] as RlpList).map(tx => decodeTransaction(tx as RlpList, options));
const transactions: EthereumTransaction[] =
await Promise.all(transactionPromises);
const uncles: EthereumHeader[] =
(rlp[2] as RlpList).map(buf => decodeHeader(buf as RlpList));
return {header, transactions, uncles} as EthereumBlock;
}
/**
* Remove leading null bytes from a buffer.
*
* @param buf Buffer to remove null bytes from
*
* @returns A slice of the buffer without null bytes.
*/
function removeNullPrefix(buf: Buffer): Buffer {
for (let i = 0; i < buf.length; i++) {
if (buf[i] !== 0) {
return buf.slice(i);
}
}
return Buffer.from([]);
}
/**
* Encodes an Ethereum header as a RLP list
*
* @param header The Ethreum header to encode.
*
* @return A RlpList with the encoded Ethereum header.
*/
export function encodeHeaderAsRLP(header: EthereumHeader): RlpList {
const asRlpList: RlpList = [];
asRlpList[HEADER_PARENT_HASH] = toBufferBE(header.parentHash, 32);
asRlpList[HEADER_UNCLE_HASH] = toBufferBE(header.uncleHash, 32);
asRlpList[HEADER_BENEFICIARY] = toBufferBE(header.beneficiary, 20);
asRlpList[HEADER_STATE_ROOT] = toBufferBE(header.stateRoot, 32);
asRlpList[HEADER_TRANSACTIONS_ROOT] = toBufferBE(header.transactionsRoot, 32);
asRlpList[HEADER_RECEIPTS_ROOT] = toBufferBE(header.receiptsRoot, 32);
asRlpList[HEADER_LOGSBLOOM] = header.logsBloom;
asRlpList[HEADER_DIFFICULTY] =
removeNullPrefix(toBufferBE(header.difficulty, 32));
asRlpList[HEADER_BLOCK_NUMBER] =
removeNullPrefix(toBufferBE(header.blockNumber, 32));
asRlpList[HEADER_GAS_LIMIT] =
removeNullPrefix(toBufferBE(header.gasLimit, 32));
asRlpList[HEADER_GAS_USED] = removeNullPrefix(toBufferBE(header.gasUsed, 32));
asRlpList[HEADER_TIMESTAMP] =
removeNullPrefix(toBufferBE(header.timestamp, 32));
asRlpList[HEADER_EXTRADATA] = header.extraData;
asRlpList[HEADER_MIXHASH] = toBufferBE(header.mixHash, 32);
asRlpList[HEADER_NONCE] = toBufferBE(header.nonce, 8);
return asRlpList;
}
/**
* Encodes a new block. Transactions must be encoded and signed as a RLPList
*
* @param header The Ethreum header to encode.
* @param transactions Encoded, signed transactions to include
* @param uncleList A list of uncles to include
*
* @return A new RLP encoded Ethereum block.
*/
export function encodeBlock(
header: EthereumHeader, transactions: RlpList,
uncleList: EthereumHeader[]): Buffer {
const asRlpList: RlpList = [
encodeHeaderAsRLP(header), transactions,
uncleList.map(uncle => encodeHeaderAsRLP(uncle))
];
return RlpEncode(asRlpList);
}
/**
* Get the public address of a given private key.
*
* @param privateKey The private key to obtain an address for. It should be a
* 256-bit bigint which cannot be 0.
* @param useNativeIfAvailable Set to false to force fallback to js-only code.
*
* @return The public address for the given private key.
*/
export function getPublicAddress(
privateKey: bigint, useNativeIfAvailable = true) {
if (process.browser || native === undefined || !useNativeIfAvailable) {
// Public address is last 20 bytes of the hashed public key (bytes 1-65)
const pubKey = secp256k1.publicKeyCreate(toBufferBE(privateKey, 32), false);
const hashed = toBigIntBE(
keccak('keccak256').update(pubKey.slice(1)).digest().slice(-20));
return hashed;
}
return native.getPublicAddress(privateKey);
}
/**
* Sign an [EthereumTransaction] using a private key.
*
* @param transaction The transaction to sign. The from field, if present, is
* ignored (it will be derived from the private key)
* @param privateKey The private key to sign the transaction with.
* @param chainId The chain id to use. 0=pre EIP-155 semantics. 1=mainnet.
* @param useNativeIfAvailable Set to false to force fallback to js-only code.
*
* @return A [RlpList] representing the transaction. Run this list through
* RlpEncode to obtain a [Buffer].
*/
export function signTransaction(
transaction: EthereumTransaction, privateKey: bigint, chainId = 1,
useNativeIfAvailable = true) {
const rlpList: RlpList = [
removeNullPrefix(toBufferBE(transaction.nonce, 32)),
removeNullPrefix(toBufferBE(transaction.gasPrice, 32)),
removeNullPrefix(toBufferBE(transaction.gasLimit, 32)),
transaction.to === CONTRACT_CREATION ? Buffer.from([]) :
toBufferBE(transaction.to, 20),
removeNullPrefix(toBufferBE(transaction.value, 32)), transaction.data
];
// EIP-155 transaction
if (chainId !== 0) {
rlpList[TRANSACTION_V] = Buffer.from([chainId]);
rlpList[TRANSACTION_R] = Buffer.from([]);
rlpList[TRANSACTION_S] = Buffer.from([]);
}
const toHash = RlpEncode(rlpList);
if (process.browser || native === undefined || !useNativeIfAvailable) {
const hash = keccak('keccak256').update(toHash).digest();
const signature = secp256k1.sign(hash, toBufferBE(privateKey, 32));
rlpList[TRANSACTION_R] = removeNullPrefix(signature.signature.slice(0, 32));
rlpList[TRANSACTION_S] =
removeNullPrefix(signature.signature.slice(32, 64));
rlpList[TRANSACTION_V] = Buffer.from(
[chainId > 0 ? signature.recovery + (chainId * 2 + 35) :
signature.recovery + 27]);
return rlpList;
} else {
const ret = native.signTransaction(toHash, privateKey, chainId, rlpList);
ret[TRANSACTION_R] = removeNullPrefix(ret[TRANSACTION_R] as Buffer);
ret[TRANSACTION_S] = removeNullPrefix(ret[TRANSACTION_S] as Buffer);
return ret;
}
} | extraData: (header[HEADER_EXTRADATA] as Buffer),
mixHash: toBigIntBE(header[HEADER_MIXHASH] as Buffer),
nonce: toBigIntBE(header[HEADER_NONCE] as Buffer)
};
} | random_line_split |
ethereum-block.ts | import {toBigIntBE, toBufferBE} from 'bigint-buffer';
import {RlpEncode, RlpList} from 'rlp-stream';
import * as secp256k1 from 'secp256k1';
declare var process: {browser: boolean;};
const keccak = require('keccak');
interface NativeInterface {
recoverFromAddress(verifyBlock: Buffer, signature: Buffer, recovery: boolean):
Promise<bigint>;
getPublicAddress(privateKey: bigint): Promise<bigint>;
signTransaction(
transaction: Buffer, privateKey: bigint, chainId: number,
transactionRlp: RlpList): RlpList;
}
let native: NativeInterface;
if (!process.browser) {
try {
native = require('bindings')('block_native');
} catch (e) {
console.log(e);
console.warn(
'Native bindings loading failed, using pure JS implementation');
}
}
/** A deserialized Ethereum block. */
export interface EthereumBlock {
/** The header for the Ethereum block. */
header: EthereumHeader;
/** The transaction list for the Ethereum block. */
transactions: EthereumTransaction[];
/** A list of headers for uncles. */
uncles: EthereumHeader[];
}
export interface EthereumBlockDecoderOptions {
/** For a EIP-155 transaction, which chain to use to replace v. */
chainId: number;
/**
* For decoding a block, which block number EIP-155 semantics automatically
* applies.
*/
eip155Block: bigint;
/**
* For decoding a transaction, whether or not to use EIP-155 semantics to
* decode the transaction.
*/
eip155: boolean;
/**
* If available, use native bindings to do transaction processing.
*/
native: boolean;
}
const defaultOptions: EthereumBlockDecoderOptions = {
chainId: 1,
eip155Block: BigInt(2675000),
eip155: false,
native: true
};
export const CONTRACT_CREATION: bigint = BigInt(-1);
/** A header for an Ethereum block. */
export interface EthereumHeader {
/** The Keccak 256-bit hash of the parent block’s header, in its entirety. */
parentHash: bigint;
/** The Keccak 256-bit hash of the ommers list portion of this block. */
uncleHash: bigint;
/**
* The 160-bit address to which all fees collected from the successful mining
* of this block be transferred.
*/
beneficiary: bigint;
/**
* The Keccak 256-bit hash of the root node of the state trie, after all
* transactions are executed and finalisations applied.
*/
stateRoot: bigint;
/**
* The Keccak 256-bit hash of the root node of the trie structure populated
* with each transaction in the transactions list portion of the block.
*/
transactionsRoot: bigint;
/**
* The Keccak 256-bit hash of the root node of the trie structure populated
* with the receipts of each transaction in the transactions list portion of
* the block.
*/
receiptsRoot: bigint;
/**
* The Bloom filter composed from indexable information (logger address and
* log topics) contained in each log entry from the receipt of each
* transaction in the transactions list.
*/
logsBloom: Buffer;
/**
* A scalar value corresponding to the difficulty level of this block. This
* can be calculated from the previous block’s difficulty level and the
* timestamp.
*/
difficulty: bigint;
/**
* A scalar value equal to the number of ancestor blocks. The genesis block
* has a number of zero.
*/
blockNumber: bigint;
/**
* A scalar value equal to the current limit of gas expenditure per block.
*/
gasLimit: bigint;
/**
* A scalar value equal to the total gas used in transactions in this block.
*/
gasUsed: bigint;
/**
* A scalar value equal to the reasonable output of Unix’s time() at this
* block’s inception.
*/
timestamp: bigint;
/**
* An arbitrary byte array containing data relevant to this block. This must
* be 32 bytes or fewer.
*/
extraData: Buffer;
/**
* A 256-bit hash which proves combined with the nonce that a sufficient
* amount of computation has been carried out on this block.
*/
mixHash: bigint;
/**
* A 64-bit hash which proves combined with the mix-hash that a sufficient
* amount of computation has been carried out on this block.
*/
nonce: bigint;
}
/** The data stored in a block for a signed Ethereum transaction */
export interface EthereumTransaction {
/**
* A scalar value equal to the number of transactions sent from this address
* or, in the case of accounts with associated code, the number of
* contract-creations made by this account.
*/
nonce: bigint;
/**
* A scalar value equal to the number of Wei to be paid per unit of gas for
* all computation costs incurred as a result of the execution of this
* transaction.
*/
gasPrice: bigint;
/**
* A scalar value equal to the maximum amount of gas that should be used in
* executing this transaction.
*/
gasLimit: bigint;
/**
* A scalar value equal to the number of Wei to be transferred to the message
* call’s recipient or, in the case of contract creation, as an endowment to
* the newly created account.
*/
value: bigint;
/**
* The 160-bit address of the message call’s recipient or, for a contract
* creation transaction, CONTRACT_CREATION (-1), to distinguish against
* account 0x0000000000000000000000000000000000000000.
*/
to: bigint;
/**
* An unlimited size byte array specifying the EVM-code for the account
* initialisation procedure, for a contract transaction, or an unlimited size
* byte array specifying the input data of the message call, for a message
* call.
*/
data: Buffer;
/** The 160-bit address of the message caller. */
from: bigint;
}
export class EthereumBlockDecoderError extends Error {
constructor(message: string) {
super(message);
}
}
const HEADER_PARENT_HASH = 0;
const HEADER_UNCLE_HASH = 1;
const HEADER_BENEFICIARY = 2;
const HEADER_STATE_ROOT = 3;
const HEADER_TRANSACTIONS_ROOT = 4;
const HEADER_RECEIPTS_ROOT = 5;
const HEADER_LOGSBLOOM = 6;
const HEADER_DIFFICULTY = 7;
const HEADER_BLOCK_NUMBER = 8;
const HEADER_GAS_LIMIT = 9;
const HEADER_GAS_USED = 10;
const HEADER_TIMESTAMP = 11;
const HEADER_EXTRADATA = 12;
const HEADER_MIXHASH = 13;
const HEADER_NONCE = 14;
/**
* Given a RLP-serialized list with an Ethereum header, decodes the list and
* validates the Ethereum header.
*
* @param header The RLP-encoded list with the header to decode.
*
* @returns A validated and decoded EthereumHeader.
*/
export function decodeHeader(header: RlpList): EthereumHeader {
if (!Array.isArray(header)) {
throw new EthereumBlockDecoderError(
`Expected block header as RLP-encoded list!`);
}
return {
parentHash: toBigIntBE(header[HEADER_PARENT_HASH] as Buffer),
uncleHash: toBigIntBE(header[HEADER_UNCLE_HASH] as Buffer),
beneficiary: toBigIntBE(header[HEADER_BENEFICIARY] as Buffer),
stateRoot: toBigIntBE(header[HEADER_STATE_ROOT] as Buffer),
transactionsRoot: toBigIntBE(header[HEADER_TRANSACTIONS_ROOT] as Buffer),
receiptsRoot: toBigIntBE(header[HEADER_RECEIPTS_ROOT] as Buffer),
logsBloom: header[HEADER_LOGSBLOOM] as Buffer,
difficulty: toBigIntBE(header[HEADER_DIFFICULTY] as Buffer),
blockNumber: toBigIntBE(header[HEADER_BLOCK_NUMBER] as Buffer),
gasLimit: toBigIntBE(header[HEADER_GAS_LIMIT] as Buffer),
gasUsed: toBigIntBE(header[HEADER_GAS_USED] as Buffer),
timestamp: toBigIntBE(header[HEADER_TIMESTAMP] as Buffer),
extraData: (header[HEADER_EXTRADATA] as Buffer),
mixHash: toBigIntBE(header[HEADER_MIXHASH] as Buffer),
nonce: toBigIntBE(header[HEADER_NONCE] as Buffer)
};
}
const TRANSACTION_NONCE = 0;
const TRANSACTION_GASPRICE = 1;
const TRANSACTION_STARTGAS = 2;
const TRANSACTION_TO = 3;
const TRANSACTION_VALUE = 4;
const TRANSACTION_DATA = 5;
const TRANSACTION_V = 6;
const TRANSACTION_R = 7;
const TRANSACTION_S = 8;
/**
* Given a RLP-serialized list with an Ethereum transaction, decodes the list
* and validates the Ethereum transaction.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeTransaction(
transaction: RlpList,
options: EthereumBlockDecoderOptions =
defaultOptions): Promise<EthereumTransaction> {
const v = transaction[TRANSACTION_V] as Buffer;
const r = transaction[TRANSACTION_R] as Buffer;
const s = transaction[TRANSACTION_S] as Buffer;
if (r.length > 32) {
throw new Error(`r > 32 bytes!`);
}
if (s.length > 32) {
throw new Error(`s > 32 bytes!`);
}
const signature = Buffer.alloc(64, 0);
r.copy(signature, 32 - r.length);
s.copy(signature, 64 - s.length);
const chainV = options.chainId * 2 + 35;
const verifySignature =
options.eip155 ? v[0] === chainV || v[0] === chainV + 1 : false;
const recovery =
verifySignature ? v[0] - (options.chainId * 2 + 8) - 27 : v[0] - 27;
if (recovery !== 0 && recovery !== 1) {
throw new EthereumBlockDecoderError(
`Invalid infinite recovery = ${recovery}`);
}
// TODO: Get existing buffer from stream instead of regenerating it.
const toHash = verifySignature ?
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer),
Buffer.from([options.chainId]),
Buffer.from([]),
Buffer.from([]),
]) :
RlpEncode([
(transaction[TRANSACTION_NONCE] as Buffer),
(transaction[TRANSACTION_GASPRICE] as Buffer),
(transaction[TRANSACTION_STARTGAS] as Buffer),
(transaction[TRANSACTION_TO] as Buffer),
(transaction[TRANSACTION_VALUE] as Buffer),
(transaction[TRANSACTION_DATA] as Buffer)
]);
let from: bigint;
if (process.browser || native === undefined || !options.native) {
const hash = keccak('keccak256').update(toHash).digest();
// Recover and decompress the public key
const pubKey = secp256k1.recover(hash, signature, recovery, false).slice(1);
if (pubKey.length !== 64) {
throw new EthereumBlockDecoderError(
`Incorrect public key length ${pubKey.length}`);
}
from = toBigIntBE(keccak('keccak256').update(pubKey).digest().slice(-20));
if (from === undefined) {
throw new EthereumBlockDecoderError(`Failed to get from account`);
}
} else {
from = await native.recoverFromAddress(toHash, signature, recovery === 1);
}
const toBuffer = transaction[TRANSACTION_TO] as Buffer;
return {
nonce: toBigIntBE(transaction[TRANSACTION_NONCE] as Buffer),
gasPrice: toBigIntBE(transaction[TRANSACTION_GASPRICE] as Buffer),
gasLimit: toBigIntBE(transaction[TRANSACTION_STARTGAS] as Buffer),
to: toBuffer.length === 0 ? BigInt(-1) : toBigIntBE(toBuffer),
value: toBigIntBE(transaction[TRANSACTION_VALUE] as Buffer),
data: transaction[TRANSACTION_DATA] as Buffer,
from
};
}
/**
* Given a RLP-serialized list with an Ethereum block, decodes the list and
* validates the Ethereum block.
*
* @param header The RLP-encoded list with the transaction to decode.
*
* @returns A validated and decoded EthereumTransaction.
*/
export async function decodeBlock(
rlp: RlpList, options: EthereumBlockDecoderOptions = defaultOptions):
Promise<EthereumBlock> {
// Each incoming block should be an RLP list.
if (!Array.isArray(rlp)) {
throw new EthereumBlockDecoderError(`Expected RLP-encoded list!`);
}
// The RlpList should have 3 parts: the header, the transaction list and the
// uncle list.
const header: EthereumHeader = decodeHeader(rlp[0] as RlpList);
if (header.blockNumber >= defaultOptions.eip155Block) {
defaultOptions.eip155 = true;
}
const transactionPromises: Array<Promise<EthereumTransaction>> =
(rlp[1] as RlpList).map(tx => decodeTransaction(tx as RlpList, options));
const transactions: EthereumTransaction[] =
await Promise.all(transactionPromises);
const uncles: EthereumHeader[] =
(rlp[2] as RlpList).map(buf => decodeHeader(buf as RlpList));
return {header, transactions, uncles} as EthereumBlock;
}
/**
* Remove leading null bytes from a buffer.
*
* @param buf Buffer to remove null bytes from
*
* @returns A slice of the buffer without null bytes.
*/
function removeNullPrefix(buf: Buffer): Buffer {
for (let i = 0; i < buf.length; i++) {
if (buf[i] !== 0) {
return buf.slice(i);
}
}
return Buffer.from([]);
}
/**
* Encodes an Ethereum header as a RLP list
*
* @param header The Ethreum header to encode.
*
* @return A RlpList with the encoded Ethereum header.
*/
export function encodeHeaderAsRLP(header: EthereumHeader): RlpList {
const asRlpList: RlpList = [];
asRlpList[HEADER_PARENT_HASH] = toBufferBE(header.parentHash, 32);
asRlpList[HEADER_UNCLE_HASH] = toBufferBE(header.uncleHash, 32);
asRlpList[HEADER_BENEFICIARY] = toBufferBE(header.beneficiary, 20);
asRlpList[HEADER_STATE_ROOT] = toBufferBE(header.stateRoot, 32);
asRlpList[HEADER_TRANSACTIONS_ROOT] = toBufferBE(header.transactionsRoot, 32);
asRlpList[HEADER_RECEIPTS_ROOT] = toBufferBE(header.receiptsRoot, 32);
asRlpList[HEADER_LOGSBLOOM] = header.logsBloom;
asRlpList[HEADER_DIFFICULTY] =
removeNullPrefix(toBufferBE(header.difficulty, 32));
asRlpList[HEADER_BLOCK_NUMBER] =
removeNullPrefix(toBufferBE(header.blockNumber, 32));
asRlpList[HEADER_GAS_LIMIT] =
removeNullPrefix(toBufferBE(header.gasLimit, 32));
asRlpList[HEADER_GAS_USED] = removeNullPrefix(toBufferBE(header.gasUsed, 32));
asRlpList[HEADER_TIMESTAMP] =
removeNullPrefix(toBufferBE(header.timestamp, 32));
asRlpList[HEADER_EXTRADATA] = header.extraData;
asRlpList[HEADER_MIXHASH] = toBufferBE(header.mixHash, 32);
asRlpList[HEADER_NONCE] = toBufferBE(header.nonce, 8);
return asRlpList;
}
/**
* Encodes a new block. Transactions must be encoded and signed as a RLPList
*
* @param header The Ethreum header to encode.
* @param transactions Encoded, signed transactions to include
* @param uncleList A list of uncles to include
*
* @return A new RLP encoded Ethereum block.
*/
export function encodeBlock(
header: EthereumHeader, transactions: RlpList,
uncleList: EthereumHeader[]): Buffer {
const asRlpList: RlpList = [
encodeHeaderAsRLP(header), transactions,
uncleList.map(uncle => encodeHeaderAsRLP(uncle))
];
return RlpEncode(asRlpList);
}
/**
* Get the public address of a given private key.
*
* @param privateKey The private key to obtain an address for. It should be a
* 256-bit bigint which cannot be 0.
* @param useNativeIfAvailable Set to false to force fallback to js-only code.
*
* @return The public address for the given private key.
*/
export function getPublicAddress(
privateKey: bigint, useNativeIfAvailable = true) {
if (process.browser || native === undefined || !useNativeIfAvailable) {
// Public address is last 20 bytes of the hashed public key (bytes 1-65)
const pubKey = secp256k1.publicKeyCreate(toBufferBE(privateKey, 32), false);
const hashed = toBigIntBE(
keccak('keccak256').update(pubKey.slice(1)).digest().slice(-20));
return hashed;
}
return native.getPublicAddress(privateKey);
}
/**
* Sign an [EthereumTransaction] using a private key.
*
* @param transaction The transaction to sign. The from field, if present, is
* ignored (it will be derived from the private key)
* @param privateKey The private key to sign the transaction with.
* @param chainId The chain id to use. 0=pre EIP-155 semantics. 1=mainnet.
* @param useNativeIfAvailable Set to false to force fallback to js-only code.
*
* @return A [RlpList] representing the transaction. Run this list through
* RlpEncode to obtain a [Buffer].
*/
export function signTransaction(
transaction: EthereumTransaction, privateKey: bigint, chainId = 1,
useNativeIfAvailable = true) {
const rl | pList: RlpList = [
removeNullPrefix(toBufferBE(transaction.nonce, 32)),
removeNullPrefix(toBufferBE(transaction.gasPrice, 32)),
removeNullPrefix(toBufferBE(transaction.gasLimit, 32)),
transaction.to === CONTRACT_CREATION ? Buffer.from([]) :
toBufferBE(transaction.to, 20),
removeNullPrefix(toBufferBE(transaction.value, 32)), transaction.data
];
// EIP-155 transaction
if (chainId !== 0) {
rlpList[TRANSACTION_V] = Buffer.from([chainId]);
rlpList[TRANSACTION_R] = Buffer.from([]);
rlpList[TRANSACTION_S] = Buffer.from([]);
}
const toHash = RlpEncode(rlpList);
if (process.browser || native === undefined || !useNativeIfAvailable) {
const hash = keccak('keccak256').update(toHash).digest();
const signature = secp256k1.sign(hash, toBufferBE(privateKey, 32));
rlpList[TRANSACTION_R] = removeNullPrefix(signature.signature.slice(0, 32));
rlpList[TRANSACTION_S] =
removeNullPrefix(signature.signature.slice(32, 64));
rlpList[TRANSACTION_V] = Buffer.from(
[chainId > 0 ? signature.recovery + (chainId * 2 + 35) :
signature.recovery + 27]);
return rlpList;
} else {
const ret = native.signTransaction(toHash, privateKey, chainId, rlpList);
ret[TRANSACTION_R] = removeNullPrefix(ret[TRANSACTION_R] as Buffer);
ret[TRANSACTION_S] = removeNullPrefix(ret[TRANSACTION_S] as Buffer);
return ret;
}
} | identifier_body | |
test_service.py | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import pytest
from tests import testlib
import unittest
import splunklib.client as client
from splunklib.client import AuthenticationError
from splunklib.client import Service
from splunklib.binding import HTTPError
class ServiceTestCase(testlib.SDKTestCase):
def test_autologin(self):
service = client.connect(autologin=True, **self.opts.kwargs)
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_capabilities(self):
capabilities = self.service.capabilities
self.assertTrue(isinstance(capabilities, list))
self.assertTrue(all([isinstance(c, str) for c in capabilities]))
self.assertTrue('change_own_password' in capabilities) # This should always be there...
def test_info(self):
info = self.service.info
keys = ["build", "cpu_arch", "guid", "isFree", "isTrial", "licenseKeys",
"licenseSignature", "licenseState", "master_guid", "mode",
"os_build", "os_name", "os_version", "serverName", "version"]
for key in keys:
self.assertTrue(key in list(info.keys()))
def test_info_with_namespace(self):
# Make sure we're not accessing /servicesNS/admin/search/server/info
# instead of /services/server/info
# Backup the values, which are probably set to None
owner, app = self.service.namespace["owner"], self.service.namespace["app"]
self.service.namespace["owner"] = self.service.username
self.service.namespace["app"] = "search"
try:
self.assertEqual(self.service.info.licenseState, 'OK')
except HTTPError as he:
self.fail("Couldn't get the server info, probably got a 403! %s" % he.message)
self.service.namespace["owner"] = owner
self.service.namespace["app"] = app
def test_without_namespace(self):
service = client.connect(**self.opts.kwargs)
service.apps.list()
def test_app_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': None})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_owner_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "-"})
service_ns = client.connect(**kwargs)
service_ns.apps.list() | def test_default_app(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': None, 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_app_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "-", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_user_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_parse(self):
# At the moment the parse method returns the raw XML. At
# some point this will change and it will return a nice,
# objectified form of the results, but for now there's
# nothing to test but a good response code.
response = self.service.parse('search * abc="def" | dedup abc')
self.assertEqual(response.status, 200)
def test_parse_fail(self):
try:
self.service.parse("xyzzy")
self.fail('Parse on nonsense did not fail')
except HTTPError as e:
self.assertEqual(e.status, 400)
def test_restart(self):
service = client.connect(**self.opts.kwargs)
self.service.restart(timeout=300)
service.login() # Make sure we are awake
def test_read_outputs_with_type(self):
name = testlib.tmpname()
service = client.connect(**self.opts.kwargs)
service.post('data/outputs/tcp/syslog', name=name, type='tcp')
entity = client.Entity(service, 'data/outputs/tcp/syslog/' + name)
self.assertTrue('tcp', entity.content.type)
if service.restart_required:
self.restartSplunk()
service = client.connect(**self.opts.kwargs)
client.Entity(service, 'data/outputs/tcp/syslog/' + name).delete()
if service.restart_required:
self.restartSplunk()
def test_splunk_version(self):
service = client.connect(**self.opts.kwargs)
v = service.splunk_version
self.assertTrue(isinstance(v, tuple))
self.assertTrue(len(v) >= 2)
for p in v:
self.assertTrue(isinstance(p, int) and p >= 0)
for version in [(4, 3, 3), (5,), (5, 0, 1)]:
with self.fake_splunk_version(version):
self.assertEqual(version, self.service.splunk_version)
def test_query_without_login_raises_auth_error(self):
service = self._create_unauthenticated_service()
self.assertRaises(AuthenticationError, lambda: service.indexes.list())
# This behavior is needed for backward compatibility for code
# prior to the introduction of AuthenticationError
def test_query_without_login_raises_http_401(self):
service = self._create_unauthenticated_service()
try:
service.indexes.list()
self.fail('Expected HTTP 401.')
except HTTPError as he:
if he.status == 401:
# Good
pass
else:
raise
def _create_unauthenticated_service(self):
return Service(**{
'host': self.opts.kwargs['host'],
'port': self.opts.kwargs['port'],
'scheme': self.opts.kwargs['scheme']
})
# To check the HEC event endpoint using Endpoint instance
@pytest.mark.smoke
def test_hec_event(self):
import json
service_hec = client.connect(host='localhost', scheme='https', port=8088,
token="11111111-1111-1111-1111-1111111111113")
event_collector_endpoint = client.Endpoint(service_hec, "/services/collector/event")
msg = {"index": "main", "event": "Hello World"}
response = event_collector_endpoint.post("", body=json.dumps(msg))
self.assertEqual(response.status, 200)
class TestCookieAuthentication(unittest.TestCase):
def setUp(self):
self.opts = testlib.parse([], {}, ".env")
self.service = client.Service(**self.opts.kwargs)
if getattr(unittest.TestCase, 'assertIsNotNone', None) is None:
def assertIsNotNone(self, obj, msg=None):
if obj is None:
raise self.failureException(msg or '%r is not None' % obj)
def test_login_and_store_cookie(self):
self.assertIsNotNone(self.service.get_cookies())
self.assertEqual(len(self.service.get_cookies()), 0)
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
self.assertNotEqual(self.service.get_cookies(), {})
self.assertEqual(len(self.service.get_cookies()), 1)
def test_login_with_cookie(self):
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
# Use the cookie from the other service as the only auth param (don't need user/password)
service2 = client.Service(**{"cookie": "%s=%s" % list(self.service.get_cookies().items())[0]})
service2.login()
self.assertEqual(len(service2.get_cookies()), 1)
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), len(self.service.get_cookies()))
self.assertEqual(list(service2.get_cookies().keys())[0][:8], "splunkd_")
self.assertEqual(service2.apps.get().status, 200)
def test_login_fails_with_bad_cookie(self):
bad_cookie = {'bad': 'cookie'}
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
service2.get_cookies().update(bad_cookie)
self.assertEqual(service2.get_cookies(), {'bad': 'cookie'})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_autologin_with_cookie(self):
self.service.login()
self.assertTrue(self.service.has_cookies())
service = client.connect(
autologin=True,
cookie="%s=%s" % list(self.service.get_cookies().items())[0],
**self.opts.kwargs)
self.assertTrue(service.has_cookies())
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_login_fails_with_no_cookie(self):
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
# Should get an error when no authentication method
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_login_with_multiple_cookie_headers(self):
cookies = {
'bad': 'cookie',
'something_else': 'bad'
}
self.service.logout()
self.service.get_cookies().update(cookies)
self.service.login()
self.assertEqual(self.service.apps.get().status, 200)
def test_login_with_multiple_cookies(self):
bad_cookie = 'bad=cookie'
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
service2 = client.Service(**{"cookie": bad_cookie})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
# Add on valid cookies, and try to use all of them
service2.get_cookies().update(self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.service.get_cookies().update({'bad': 'cookie'})
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.assertTrue([cookie for cookie in service2.get_cookies() if "splunkd_" in cookie])
self.assertTrue('bad' in service2.get_cookies())
self.assertEqual(service2.get_cookies()['bad'], 'cookie')
self.assertEqual(set(self.service.get_cookies()), set(service2.get_cookies()))
service2.login()
self.assertEqual(service2.apps.get().status, 200)
class TestSettings(testlib.SDKTestCase):
def test_read_settings(self):
settings = self.service.settings
# Verify that settings contains the keys we expect
keys = [
"SPLUNK_DB", "SPLUNK_HOME", "enableSplunkWebSSL", "host",
"httpport", "mgmtHostPort", "minFreeSpace", "pass4SymmKey",
"serverName", "sessionTimeout", "startwebserver", "trustedIP"
]
for key in keys:
self.assertTrue(key in settings)
def test_update_settings(self):
settings = self.service.settings
# Verify that we can update the settings
original = settings['sessionTimeout']
self.assertTrue(original != "42h")
settings.update(sessionTimeout="42h")
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, "42h")
# Restore (and verify) original value
settings.update(sessionTimeout=original)
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, original)
self.restartSplunk()
class TestTrailing(unittest.TestCase):
template = '/servicesNS/boris/search/another/path/segment/that runs on'
def test_raises_when_not_found_first(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 'boris')
def test_raises_when_not_found_second(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 's is', 'boris')
def test_no_args_is_identity(self):
self.assertEqual(self.template, client._trailing(self.template))
def test_trailing_with_one_arg_works(self):
self.assertEqual('boris/search/another/path/segment/that runs on',
client._trailing(self.template, 'ervicesNS/'))
def test_trailing_with_n_args_works(self):
self.assertEqual(
'another/path/segment/that runs on',
client._trailing(self.template, 'servicesNS/', '/', '/')
)
class TestEntityNamespacing(testlib.SDKTestCase):
def test_proper_namespace_with_arguments(self):
entity = self.service.apps['search']
self.assertEqual((None, None, "global"), entity._proper_namespace(sharing="global"))
self.assertEqual((None, "search", "app"), entity._proper_namespace(sharing="app", app="search"))
self.assertEqual(
("admin", "search", "user"),
entity._proper_namespace(sharing="user", app="search", owner="admin")
)
def test_proper_namespace_with_entity_namespace(self):
entity = self.service.apps['search']
namespace = (entity.access.owner, entity.access.app, entity.access.sharing)
self.assertEqual(namespace, entity._proper_namespace())
def test_proper_namespace_with_service_namespace(self):
entity = client.Entity(self.service, client.PATH_APPS + "search")
del entity._state['access']
namespace = (self.service.namespace.owner,
self.service.namespace.app,
self.service.namespace.sharing)
self.assertEqual(namespace, entity._proper_namespace())
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main() | random_line_split | |
test_service.py | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import pytest
from tests import testlib
import unittest
import splunklib.client as client
from splunklib.client import AuthenticationError
from splunklib.client import Service
from splunklib.binding import HTTPError
class ServiceTestCase(testlib.SDKTestCase):
def test_autologin(self):
service = client.connect(autologin=True, **self.opts.kwargs)
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_capabilities(self):
capabilities = self.service.capabilities
self.assertTrue(isinstance(capabilities, list))
self.assertTrue(all([isinstance(c, str) for c in capabilities]))
self.assertTrue('change_own_password' in capabilities) # This should always be there...
def test_info(self):
info = self.service.info
keys = ["build", "cpu_arch", "guid", "isFree", "isTrial", "licenseKeys",
"licenseSignature", "licenseState", "master_guid", "mode",
"os_build", "os_name", "os_version", "serverName", "version"]
for key in keys:
self.assertTrue(key in list(info.keys()))
def test_info_with_namespace(self):
# Make sure we're not accessing /servicesNS/admin/search/server/info
# instead of /services/server/info
# Backup the values, which are probably set to None
owner, app = self.service.namespace["owner"], self.service.namespace["app"]
self.service.namespace["owner"] = self.service.username
self.service.namespace["app"] = "search"
try:
self.assertEqual(self.service.info.licenseState, 'OK')
except HTTPError as he:
self.fail("Couldn't get the server info, probably got a 403! %s" % he.message)
self.service.namespace["owner"] = owner
self.service.namespace["app"] = app
def test_without_namespace(self):
service = client.connect(**self.opts.kwargs)
service.apps.list()
def | (self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': None})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_owner_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "-"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_default_app(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': None, 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_app_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "-", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_user_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_parse(self):
# At the moment the parse method returns the raw XML. At
# some point this will change and it will return a nice,
# objectified form of the results, but for now there's
# nothing to test but a good response code.
response = self.service.parse('search * abc="def" | dedup abc')
self.assertEqual(response.status, 200)
def test_parse_fail(self):
try:
self.service.parse("xyzzy")
self.fail('Parse on nonsense did not fail')
except HTTPError as e:
self.assertEqual(e.status, 400)
def test_restart(self):
service = client.connect(**self.opts.kwargs)
self.service.restart(timeout=300)
service.login() # Make sure we are awake
def test_read_outputs_with_type(self):
name = testlib.tmpname()
service = client.connect(**self.opts.kwargs)
service.post('data/outputs/tcp/syslog', name=name, type='tcp')
entity = client.Entity(service, 'data/outputs/tcp/syslog/' + name)
self.assertTrue('tcp', entity.content.type)
if service.restart_required:
self.restartSplunk()
service = client.connect(**self.opts.kwargs)
client.Entity(service, 'data/outputs/tcp/syslog/' + name).delete()
if service.restart_required:
self.restartSplunk()
def test_splunk_version(self):
service = client.connect(**self.opts.kwargs)
v = service.splunk_version
self.assertTrue(isinstance(v, tuple))
self.assertTrue(len(v) >= 2)
for p in v:
self.assertTrue(isinstance(p, int) and p >= 0)
for version in [(4, 3, 3), (5,), (5, 0, 1)]:
with self.fake_splunk_version(version):
self.assertEqual(version, self.service.splunk_version)
def test_query_without_login_raises_auth_error(self):
service = self._create_unauthenticated_service()
self.assertRaises(AuthenticationError, lambda: service.indexes.list())
# This behavior is needed for backward compatibility for code
# prior to the introduction of AuthenticationError
def test_query_without_login_raises_http_401(self):
service = self._create_unauthenticated_service()
try:
service.indexes.list()
self.fail('Expected HTTP 401.')
except HTTPError as he:
if he.status == 401:
# Good
pass
else:
raise
def _create_unauthenticated_service(self):
return Service(**{
'host': self.opts.kwargs['host'],
'port': self.opts.kwargs['port'],
'scheme': self.opts.kwargs['scheme']
})
# To check the HEC event endpoint using Endpoint instance
@pytest.mark.smoke
def test_hec_event(self):
import json
service_hec = client.connect(host='localhost', scheme='https', port=8088,
token="11111111-1111-1111-1111-1111111111113")
event_collector_endpoint = client.Endpoint(service_hec, "/services/collector/event")
msg = {"index": "main", "event": "Hello World"}
response = event_collector_endpoint.post("", body=json.dumps(msg))
self.assertEqual(response.status, 200)
class TestCookieAuthentication(unittest.TestCase):
def setUp(self):
self.opts = testlib.parse([], {}, ".env")
self.service = client.Service(**self.opts.kwargs)
if getattr(unittest.TestCase, 'assertIsNotNone', None) is None:
def assertIsNotNone(self, obj, msg=None):
if obj is None:
raise self.failureException(msg or '%r is not None' % obj)
def test_login_and_store_cookie(self):
self.assertIsNotNone(self.service.get_cookies())
self.assertEqual(len(self.service.get_cookies()), 0)
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
self.assertNotEqual(self.service.get_cookies(), {})
self.assertEqual(len(self.service.get_cookies()), 1)
def test_login_with_cookie(self):
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
# Use the cookie from the other service as the only auth param (don't need user/password)
service2 = client.Service(**{"cookie": "%s=%s" % list(self.service.get_cookies().items())[0]})
service2.login()
self.assertEqual(len(service2.get_cookies()), 1)
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), len(self.service.get_cookies()))
self.assertEqual(list(service2.get_cookies().keys())[0][:8], "splunkd_")
self.assertEqual(service2.apps.get().status, 200)
def test_login_fails_with_bad_cookie(self):
bad_cookie = {'bad': 'cookie'}
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
service2.get_cookies().update(bad_cookie)
self.assertEqual(service2.get_cookies(), {'bad': 'cookie'})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_autologin_with_cookie(self):
self.service.login()
self.assertTrue(self.service.has_cookies())
service = client.connect(
autologin=True,
cookie="%s=%s" % list(self.service.get_cookies().items())[0],
**self.opts.kwargs)
self.assertTrue(service.has_cookies())
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_login_fails_with_no_cookie(self):
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
# Should get an error when no authentication method
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_login_with_multiple_cookie_headers(self):
cookies = {
'bad': 'cookie',
'something_else': 'bad'
}
self.service.logout()
self.service.get_cookies().update(cookies)
self.service.login()
self.assertEqual(self.service.apps.get().status, 200)
def test_login_with_multiple_cookies(self):
bad_cookie = 'bad=cookie'
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
service2 = client.Service(**{"cookie": bad_cookie})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
# Add on valid cookies, and try to use all of them
service2.get_cookies().update(self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.service.get_cookies().update({'bad': 'cookie'})
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.assertTrue([cookie for cookie in service2.get_cookies() if "splunkd_" in cookie])
self.assertTrue('bad' in service2.get_cookies())
self.assertEqual(service2.get_cookies()['bad'], 'cookie')
self.assertEqual(set(self.service.get_cookies()), set(service2.get_cookies()))
service2.login()
self.assertEqual(service2.apps.get().status, 200)
class TestSettings(testlib.SDKTestCase):
def test_read_settings(self):
settings = self.service.settings
# Verify that settings contains the keys we expect
keys = [
"SPLUNK_DB", "SPLUNK_HOME", "enableSplunkWebSSL", "host",
"httpport", "mgmtHostPort", "minFreeSpace", "pass4SymmKey",
"serverName", "sessionTimeout", "startwebserver", "trustedIP"
]
for key in keys:
self.assertTrue(key in settings)
def test_update_settings(self):
settings = self.service.settings
# Verify that we can update the settings
original = settings['sessionTimeout']
self.assertTrue(original != "42h")
settings.update(sessionTimeout="42h")
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, "42h")
# Restore (and verify) original value
settings.update(sessionTimeout=original)
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, original)
self.restartSplunk()
class TestTrailing(unittest.TestCase):
template = '/servicesNS/boris/search/another/path/segment/that runs on'
def test_raises_when_not_found_first(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 'boris')
def test_raises_when_not_found_second(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 's is', 'boris')
def test_no_args_is_identity(self):
self.assertEqual(self.template, client._trailing(self.template))
def test_trailing_with_one_arg_works(self):
self.assertEqual('boris/search/another/path/segment/that runs on',
client._trailing(self.template, 'ervicesNS/'))
def test_trailing_with_n_args_works(self):
self.assertEqual(
'another/path/segment/that runs on',
client._trailing(self.template, 'servicesNS/', '/', '/')
)
class TestEntityNamespacing(testlib.SDKTestCase):
def test_proper_namespace_with_arguments(self):
entity = self.service.apps['search']
self.assertEqual((None, None, "global"), entity._proper_namespace(sharing="global"))
self.assertEqual((None, "search", "app"), entity._proper_namespace(sharing="app", app="search"))
self.assertEqual(
("admin", "search", "user"),
entity._proper_namespace(sharing="user", app="search", owner="admin")
)
def test_proper_namespace_with_entity_namespace(self):
entity = self.service.apps['search']
namespace = (entity.access.owner, entity.access.app, entity.access.sharing)
self.assertEqual(namespace, entity._proper_namespace())
def test_proper_namespace_with_service_namespace(self):
entity = client.Entity(self.service, client.PATH_APPS + "search")
del entity._state['access']
namespace = (self.service.namespace.owner,
self.service.namespace.app,
self.service.namespace.sharing)
self.assertEqual(namespace, entity._proper_namespace())
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
| test_app_namespace | identifier_name |
test_service.py | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import pytest
from tests import testlib
import unittest
import splunklib.client as client
from splunklib.client import AuthenticationError
from splunklib.client import Service
from splunklib.binding import HTTPError
class ServiceTestCase(testlib.SDKTestCase):
def test_autologin(self):
service = client.connect(autologin=True, **self.opts.kwargs)
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_capabilities(self):
capabilities = self.service.capabilities
self.assertTrue(isinstance(capabilities, list))
self.assertTrue(all([isinstance(c, str) for c in capabilities]))
self.assertTrue('change_own_password' in capabilities) # This should always be there...
def test_info(self):
info = self.service.info
keys = ["build", "cpu_arch", "guid", "isFree", "isTrial", "licenseKeys",
"licenseSignature", "licenseState", "master_guid", "mode",
"os_build", "os_name", "os_version", "serverName", "version"]
for key in keys:
self.assertTrue(key in list(info.keys()))
def test_info_with_namespace(self):
# Make sure we're not accessing /servicesNS/admin/search/server/info
# instead of /services/server/info
# Backup the values, which are probably set to None
owner, app = self.service.namespace["owner"], self.service.namespace["app"]
self.service.namespace["owner"] = self.service.username
self.service.namespace["app"] = "search"
try:
self.assertEqual(self.service.info.licenseState, 'OK')
except HTTPError as he:
self.fail("Couldn't get the server info, probably got a 403! %s" % he.message)
self.service.namespace["owner"] = owner
self.service.namespace["app"] = app
def test_without_namespace(self):
service = client.connect(**self.opts.kwargs)
service.apps.list()
def test_app_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': None})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_owner_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "-"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_default_app(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': None, 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_app_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "-", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_user_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_parse(self):
# At the moment the parse method returns the raw XML. At
# some point this will change and it will return a nice,
# objectified form of the results, but for now there's
# nothing to test but a good response code.
response = self.service.parse('search * abc="def" | dedup abc')
self.assertEqual(response.status, 200)
def test_parse_fail(self):
try:
self.service.parse("xyzzy")
self.fail('Parse on nonsense did not fail')
except HTTPError as e:
self.assertEqual(e.status, 400)
def test_restart(self):
service = client.connect(**self.opts.kwargs)
self.service.restart(timeout=300)
service.login() # Make sure we are awake
def test_read_outputs_with_type(self):
name = testlib.tmpname()
service = client.connect(**self.opts.kwargs)
service.post('data/outputs/tcp/syslog', name=name, type='tcp')
entity = client.Entity(service, 'data/outputs/tcp/syslog/' + name)
self.assertTrue('tcp', entity.content.type)
if service.restart_required:
self.restartSplunk()
service = client.connect(**self.opts.kwargs)
client.Entity(service, 'data/outputs/tcp/syslog/' + name).delete()
if service.restart_required:
self.restartSplunk()
def test_splunk_version(self):
service = client.connect(**self.opts.kwargs)
v = service.splunk_version
self.assertTrue(isinstance(v, tuple))
self.assertTrue(len(v) >= 2)
for p in v:
self.assertTrue(isinstance(p, int) and p >= 0)
for version in [(4, 3, 3), (5,), (5, 0, 1)]:
with self.fake_splunk_version(version):
self.assertEqual(version, self.service.splunk_version)
def test_query_without_login_raises_auth_error(self):
service = self._create_unauthenticated_service()
self.assertRaises(AuthenticationError, lambda: service.indexes.list())
# This behavior is needed for backward compatibility for code
# prior to the introduction of AuthenticationError
def test_query_without_login_raises_http_401(self):
service = self._create_unauthenticated_service()
try:
service.indexes.list()
self.fail('Expected HTTP 401.')
except HTTPError as he:
if he.status == 401:
# Good
pass
else:
|
def _create_unauthenticated_service(self):
return Service(**{
'host': self.opts.kwargs['host'],
'port': self.opts.kwargs['port'],
'scheme': self.opts.kwargs['scheme']
})
# To check the HEC event endpoint using Endpoint instance
@pytest.mark.smoke
def test_hec_event(self):
import json
service_hec = client.connect(host='localhost', scheme='https', port=8088,
token="11111111-1111-1111-1111-1111111111113")
event_collector_endpoint = client.Endpoint(service_hec, "/services/collector/event")
msg = {"index": "main", "event": "Hello World"}
response = event_collector_endpoint.post("", body=json.dumps(msg))
self.assertEqual(response.status, 200)
class TestCookieAuthentication(unittest.TestCase):
def setUp(self):
self.opts = testlib.parse([], {}, ".env")
self.service = client.Service(**self.opts.kwargs)
if getattr(unittest.TestCase, 'assertIsNotNone', None) is None:
def assertIsNotNone(self, obj, msg=None):
if obj is None:
raise self.failureException(msg or '%r is not None' % obj)
def test_login_and_store_cookie(self):
self.assertIsNotNone(self.service.get_cookies())
self.assertEqual(len(self.service.get_cookies()), 0)
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
self.assertNotEqual(self.service.get_cookies(), {})
self.assertEqual(len(self.service.get_cookies()), 1)
def test_login_with_cookie(self):
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
# Use the cookie from the other service as the only auth param (don't need user/password)
service2 = client.Service(**{"cookie": "%s=%s" % list(self.service.get_cookies().items())[0]})
service2.login()
self.assertEqual(len(service2.get_cookies()), 1)
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), len(self.service.get_cookies()))
self.assertEqual(list(service2.get_cookies().keys())[0][:8], "splunkd_")
self.assertEqual(service2.apps.get().status, 200)
def test_login_fails_with_bad_cookie(self):
bad_cookie = {'bad': 'cookie'}
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
service2.get_cookies().update(bad_cookie)
self.assertEqual(service2.get_cookies(), {'bad': 'cookie'})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_autologin_with_cookie(self):
self.service.login()
self.assertTrue(self.service.has_cookies())
service = client.connect(
autologin=True,
cookie="%s=%s" % list(self.service.get_cookies().items())[0],
**self.opts.kwargs)
self.assertTrue(service.has_cookies())
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_login_fails_with_no_cookie(self):
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
# Should get an error when no authentication method
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_login_with_multiple_cookie_headers(self):
cookies = {
'bad': 'cookie',
'something_else': 'bad'
}
self.service.logout()
self.service.get_cookies().update(cookies)
self.service.login()
self.assertEqual(self.service.apps.get().status, 200)
def test_login_with_multiple_cookies(self):
bad_cookie = 'bad=cookie'
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
service2 = client.Service(**{"cookie": bad_cookie})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
# Add on valid cookies, and try to use all of them
service2.get_cookies().update(self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.service.get_cookies().update({'bad': 'cookie'})
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.assertTrue([cookie for cookie in service2.get_cookies() if "splunkd_" in cookie])
self.assertTrue('bad' in service2.get_cookies())
self.assertEqual(service2.get_cookies()['bad'], 'cookie')
self.assertEqual(set(self.service.get_cookies()), set(service2.get_cookies()))
service2.login()
self.assertEqual(service2.apps.get().status, 200)
class TestSettings(testlib.SDKTestCase):
def test_read_settings(self):
settings = self.service.settings
# Verify that settings contains the keys we expect
keys = [
"SPLUNK_DB", "SPLUNK_HOME", "enableSplunkWebSSL", "host",
"httpport", "mgmtHostPort", "minFreeSpace", "pass4SymmKey",
"serverName", "sessionTimeout", "startwebserver", "trustedIP"
]
for key in keys:
self.assertTrue(key in settings)
def test_update_settings(self):
settings = self.service.settings
# Verify that we can update the settings
original = settings['sessionTimeout']
self.assertTrue(original != "42h")
settings.update(sessionTimeout="42h")
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, "42h")
# Restore (and verify) original value
settings.update(sessionTimeout=original)
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, original)
self.restartSplunk()
class TestTrailing(unittest.TestCase):
template = '/servicesNS/boris/search/another/path/segment/that runs on'
def test_raises_when_not_found_first(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 'boris')
def test_raises_when_not_found_second(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 's is', 'boris')
def test_no_args_is_identity(self):
self.assertEqual(self.template, client._trailing(self.template))
def test_trailing_with_one_arg_works(self):
self.assertEqual('boris/search/another/path/segment/that runs on',
client._trailing(self.template, 'ervicesNS/'))
def test_trailing_with_n_args_works(self):
self.assertEqual(
'another/path/segment/that runs on',
client._trailing(self.template, 'servicesNS/', '/', '/')
)
class TestEntityNamespacing(testlib.SDKTestCase):
def test_proper_namespace_with_arguments(self):
entity = self.service.apps['search']
self.assertEqual((None, None, "global"), entity._proper_namespace(sharing="global"))
self.assertEqual((None, "search", "app"), entity._proper_namespace(sharing="app", app="search"))
self.assertEqual(
("admin", "search", "user"),
entity._proper_namespace(sharing="user", app="search", owner="admin")
)
def test_proper_namespace_with_entity_namespace(self):
entity = self.service.apps['search']
namespace = (entity.access.owner, entity.access.app, entity.access.sharing)
self.assertEqual(namespace, entity._proper_namespace())
def test_proper_namespace_with_service_namespace(self):
entity = client.Entity(self.service, client.PATH_APPS + "search")
del entity._state['access']
namespace = (self.service.namespace.owner,
self.service.namespace.app,
self.service.namespace.sharing)
self.assertEqual(namespace, entity._proper_namespace())
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
| raise | conditional_block |
test_service.py | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import pytest
from tests import testlib
import unittest
import splunklib.client as client
from splunklib.client import AuthenticationError
from splunklib.client import Service
from splunklib.binding import HTTPError
class ServiceTestCase(testlib.SDKTestCase):
def test_autologin(self):
service = client.connect(autologin=True, **self.opts.kwargs)
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_capabilities(self):
capabilities = self.service.capabilities
self.assertTrue(isinstance(capabilities, list))
self.assertTrue(all([isinstance(c, str) for c in capabilities]))
self.assertTrue('change_own_password' in capabilities) # This should always be there...
def test_info(self):
info = self.service.info
keys = ["build", "cpu_arch", "guid", "isFree", "isTrial", "licenseKeys",
"licenseSignature", "licenseState", "master_guid", "mode",
"os_build", "os_name", "os_version", "serverName", "version"]
for key in keys:
self.assertTrue(key in list(info.keys()))
def test_info_with_namespace(self):
# Make sure we're not accessing /servicesNS/admin/search/server/info
# instead of /services/server/info
# Backup the values, which are probably set to None
owner, app = self.service.namespace["owner"], self.service.namespace["app"]
self.service.namespace["owner"] = self.service.username
self.service.namespace["app"] = "search"
try:
self.assertEqual(self.service.info.licenseState, 'OK')
except HTTPError as he:
self.fail("Couldn't get the server info, probably got a 403! %s" % he.message)
self.service.namespace["owner"] = owner
self.service.namespace["app"] = app
def test_without_namespace(self):
service = client.connect(**self.opts.kwargs)
service.apps.list()
def test_app_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': None})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_owner_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "-"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_default_app(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': None, 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_app_wildcard(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "-", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_user_namespace(self):
kwargs = self.opts.kwargs.copy()
kwargs.update({'app': "search", 'owner': "admin"})
service_ns = client.connect(**kwargs)
service_ns.apps.list()
def test_parse(self):
# At the moment the parse method returns the raw XML. At
# some point this will change and it will return a nice,
# objectified form of the results, but for now there's
# nothing to test but a good response code.
response = self.service.parse('search * abc="def" | dedup abc')
self.assertEqual(response.status, 200)
def test_parse_fail(self):
try:
self.service.parse("xyzzy")
self.fail('Parse on nonsense did not fail')
except HTTPError as e:
self.assertEqual(e.status, 400)
def test_restart(self):
service = client.connect(**self.opts.kwargs)
self.service.restart(timeout=300)
service.login() # Make sure we are awake
def test_read_outputs_with_type(self):
name = testlib.tmpname()
service = client.connect(**self.opts.kwargs)
service.post('data/outputs/tcp/syslog', name=name, type='tcp')
entity = client.Entity(service, 'data/outputs/tcp/syslog/' + name)
self.assertTrue('tcp', entity.content.type)
if service.restart_required:
self.restartSplunk()
service = client.connect(**self.opts.kwargs)
client.Entity(service, 'data/outputs/tcp/syslog/' + name).delete()
if service.restart_required:
self.restartSplunk()
def test_splunk_version(self):
service = client.connect(**self.opts.kwargs)
v = service.splunk_version
self.assertTrue(isinstance(v, tuple))
self.assertTrue(len(v) >= 2)
for p in v:
self.assertTrue(isinstance(p, int) and p >= 0)
for version in [(4, 3, 3), (5,), (5, 0, 1)]:
with self.fake_splunk_version(version):
self.assertEqual(version, self.service.splunk_version)
def test_query_without_login_raises_auth_error(self):
service = self._create_unauthenticated_service()
self.assertRaises(AuthenticationError, lambda: service.indexes.list())
# This behavior is needed for backward compatibility for code
# prior to the introduction of AuthenticationError
def test_query_without_login_raises_http_401(self):
service = self._create_unauthenticated_service()
try:
service.indexes.list()
self.fail('Expected HTTP 401.')
except HTTPError as he:
if he.status == 401:
# Good
pass
else:
raise
def _create_unauthenticated_service(self):
return Service(**{
'host': self.opts.kwargs['host'],
'port': self.opts.kwargs['port'],
'scheme': self.opts.kwargs['scheme']
})
# To check the HEC event endpoint using Endpoint instance
@pytest.mark.smoke
def test_hec_event(self):
import json
service_hec = client.connect(host='localhost', scheme='https', port=8088,
token="11111111-1111-1111-1111-1111111111113")
event_collector_endpoint = client.Endpoint(service_hec, "/services/collector/event")
msg = {"index": "main", "event": "Hello World"}
response = event_collector_endpoint.post("", body=json.dumps(msg))
self.assertEqual(response.status, 200)
class TestCookieAuthentication(unittest.TestCase):
def setUp(self):
self.opts = testlib.parse([], {}, ".env")
self.service = client.Service(**self.opts.kwargs)
if getattr(unittest.TestCase, 'assertIsNotNone', None) is None:
def assertIsNotNone(self, obj, msg=None):
if obj is None:
raise self.failureException(msg or '%r is not None' % obj)
def test_login_and_store_cookie(self):
|
def test_login_with_cookie(self):
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
# Use the cookie from the other service as the only auth param (don't need user/password)
service2 = client.Service(**{"cookie": "%s=%s" % list(self.service.get_cookies().items())[0]})
service2.login()
self.assertEqual(len(service2.get_cookies()), 1)
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), len(self.service.get_cookies()))
self.assertEqual(list(service2.get_cookies().keys())[0][:8], "splunkd_")
self.assertEqual(service2.apps.get().status, 200)
def test_login_fails_with_bad_cookie(self):
bad_cookie = {'bad': 'cookie'}
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
service2.get_cookies().update(bad_cookie)
self.assertEqual(service2.get_cookies(), {'bad': 'cookie'})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_autologin_with_cookie(self):
self.service.login()
self.assertTrue(self.service.has_cookies())
service = client.connect(
autologin=True,
cookie="%s=%s" % list(self.service.get_cookies().items())[0],
**self.opts.kwargs)
self.assertTrue(service.has_cookies())
self.service.restart(timeout=120)
reader = service.jobs.oneshot("search index=internal | head 1")
self.assertIsNotNone(reader)
def test_login_fails_with_no_cookie(self):
service2 = client.Service()
self.assertEqual(len(service2.get_cookies()), 0)
# Should get an error when no authentication method
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
def test_login_with_multiple_cookie_headers(self):
cookies = {
'bad': 'cookie',
'something_else': 'bad'
}
self.service.logout()
self.service.get_cookies().update(cookies)
self.service.login()
self.assertEqual(self.service.apps.get().status, 200)
def test_login_with_multiple_cookies(self):
bad_cookie = 'bad=cookie'
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
service2 = client.Service(**{"cookie": bad_cookie})
# Should get an error with a bad cookie
try:
service2.login()
self.fail()
except AuthenticationError as ae:
self.assertEqual(str(ae), "Login failed.")
# Add on valid cookies, and try to use all of them
service2.get_cookies().update(self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.service.get_cookies().update({'bad': 'cookie'})
self.assertEqual(service2.get_cookies(), self.service.get_cookies())
self.assertEqual(len(service2.get_cookies()), 2)
self.assertTrue([cookie for cookie in service2.get_cookies() if "splunkd_" in cookie])
self.assertTrue('bad' in service2.get_cookies())
self.assertEqual(service2.get_cookies()['bad'], 'cookie')
self.assertEqual(set(self.service.get_cookies()), set(service2.get_cookies()))
service2.login()
self.assertEqual(service2.apps.get().status, 200)
class TestSettings(testlib.SDKTestCase):
def test_read_settings(self):
settings = self.service.settings
# Verify that settings contains the keys we expect
keys = [
"SPLUNK_DB", "SPLUNK_HOME", "enableSplunkWebSSL", "host",
"httpport", "mgmtHostPort", "minFreeSpace", "pass4SymmKey",
"serverName", "sessionTimeout", "startwebserver", "trustedIP"
]
for key in keys:
self.assertTrue(key in settings)
def test_update_settings(self):
settings = self.service.settings
# Verify that we can update the settings
original = settings['sessionTimeout']
self.assertTrue(original != "42h")
settings.update(sessionTimeout="42h")
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, "42h")
# Restore (and verify) original value
settings.update(sessionTimeout=original)
settings.refresh()
updated = settings['sessionTimeout']
self.assertEqual(updated, original)
self.restartSplunk()
class TestTrailing(unittest.TestCase):
template = '/servicesNS/boris/search/another/path/segment/that runs on'
def test_raises_when_not_found_first(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 'boris')
def test_raises_when_not_found_second(self):
self.assertRaises(ValueError, client._trailing, 'this is a test', 's is', 'boris')
def test_no_args_is_identity(self):
self.assertEqual(self.template, client._trailing(self.template))
def test_trailing_with_one_arg_works(self):
self.assertEqual('boris/search/another/path/segment/that runs on',
client._trailing(self.template, 'ervicesNS/'))
def test_trailing_with_n_args_works(self):
self.assertEqual(
'another/path/segment/that runs on',
client._trailing(self.template, 'servicesNS/', '/', '/')
)
class TestEntityNamespacing(testlib.SDKTestCase):
def test_proper_namespace_with_arguments(self):
entity = self.service.apps['search']
self.assertEqual((None, None, "global"), entity._proper_namespace(sharing="global"))
self.assertEqual((None, "search", "app"), entity._proper_namespace(sharing="app", app="search"))
self.assertEqual(
("admin", "search", "user"),
entity._proper_namespace(sharing="user", app="search", owner="admin")
)
def test_proper_namespace_with_entity_namespace(self):
entity = self.service.apps['search']
namespace = (entity.access.owner, entity.access.app, entity.access.sharing)
self.assertEqual(namespace, entity._proper_namespace())
def test_proper_namespace_with_service_namespace(self):
entity = client.Entity(self.service, client.PATH_APPS + "search")
del entity._state['access']
namespace = (self.service.namespace.owner,
self.service.namespace.app,
self.service.namespace.sharing)
self.assertEqual(namespace, entity._proper_namespace())
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
| self.assertIsNotNone(self.service.get_cookies())
self.assertEqual(len(self.service.get_cookies()), 0)
self.service.login()
self.assertIsNotNone(self.service.get_cookies())
self.assertNotEqual(self.service.get_cookies(), {})
self.assertEqual(len(self.service.get_cookies()), 1) | identifier_body |
completion.rs | use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::Arc};
use floem::{
peniko::kurbo::Rect,
reactive::{ReadSignal, RwSignal, Scope},
};
use lapce_core::{buffer::rope_text::RopeText, movement::Movement};
use lapce_rpc::{plugin::PluginId, proxy::ProxyRpcHandler};
use lsp_types::{
CompletionItem, CompletionResponse, CompletionTextEdit, InsertTextFormat,
Position,
};
use nucleo::Utf32Str;
use crate::{
config::LapceConfig, doc::Document, editor::view_data::EditorViewData,
id::EditorId, snippet::Snippet,
};
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum CompletionStatus {
Inactive,
Started,
Done,
}
#[derive(Clone, PartialEq)]
pub struct ScoredCompletionItem {
pub item: CompletionItem,
pub plugin_id: PluginId,
pub score: u32,
pub label_score: u32,
pub indices: Vec<usize>,
}
#[derive(Clone)]
pub struct CompletionData {
pub status: CompletionStatus,
/// The current request id. This is used to discard old requests.
pub request_id: usize,
/// An input id that is used for keeping track of whether the input has changed.
pub input_id: usize,
// TODO: A `PathBuf` has the issue that the proxy may not have the same format.
// TODO(minor): It might be nice to not require a path. LSPs cannot operate on scratch buffers
// as of now, but they might be allowed in the future.
pub path: PathBuf,
/// The offset that the completion is/was started at. Used for positioning the completion elem
pub offset: usize,
/// The active completion index in the list of filtered items
pub active: RwSignal<usize>,
/// The current input that the user has typed which is being sent for consideration by the LSP
pub input: String,
/// `(Input, CompletionItems)`
pub input_items: im::HashMap<String, im::Vector<ScoredCompletionItem>>,
/// The filtered items that are being displayed to the user
pub filtered_items: im::Vector<ScoredCompletionItem>,
/// The size of the completion element.
/// This is used for positioning the element.
/// As well, it is needed for some movement commands like page up/down that need to know the
/// height to compute how far to move.
pub layout_rect: Rect,
/// The editor id that was most recently used to trigger a completion.
pub latest_editor_id: Option<EditorId>,
/// Matcher for filtering the completion items
matcher: RwSignal<nucleo::Matcher>,
config: ReadSignal<Arc<LapceConfig>>,
}
impl CompletionData {
pub fn new(cx: Scope, config: ReadSignal<Arc<LapceConfig>>) -> Self {
let active = cx.create_rw_signal(0);
Self {
status: CompletionStatus::Inactive,
request_id: 0,
input_id: 0,
path: PathBuf::new(),
offset: 0,
active,
input: "".to_string(),
input_items: im::HashMap::new(),
filtered_items: im::Vector::new(),
layout_rect: Rect::ZERO,
matcher: cx
.create_rw_signal(nucleo::Matcher::new(nucleo::Config::DEFAULT)),
latest_editor_id: None,
config,
}
}
/// Handle the response to a completion request.
pub fn receive(
&mut self,
request_id: usize,
input: &str,
resp: &CompletionResponse,
plugin_id: PluginId,
) {
// If we've been canceled or the request id is old, ignore the response.
if self.status == CompletionStatus::Inactive || self.request_id != request_id
{
return;
}
let items = match resp {
CompletionResponse::Array(items) => items,
// TODO: Possibly handle the 'is_incomplete' field on List.
CompletionResponse::List(list) => &list.items,
};
let items: im::Vector<ScoredCompletionItem> = items
.iter()
.map(|i| ScoredCompletionItem {
item: i.to_owned(),
plugin_id,
score: 0,
label_score: 0,
indices: Vec::new(),
})
.collect();
self.input_items.insert(input.to_string(), items);
self.filter_items();
}
/// Request for completion items wit the current request id.
pub fn request(
&mut self,
editor_id: EditorId,
proxy_rpc: &ProxyRpcHandler,
path: PathBuf,
input: String,
position: Position,
) {
self.latest_editor_id = Some(editor_id);
self.input_items.insert(input.clone(), im::Vector::new());
proxy_rpc.completion(self.request_id, path, input, position);
}
/// Close the completion, clearing all the data.
pub fn cancel(&mut self) {
if self.status == CompletionStatus::Inactive {
return;
}
self.status = CompletionStatus::Inactive;
self.input_id = 0;
self.latest_editor_id = None;
self.active.set(0);
self.input.clear();
self.input_items.clear();
self.filtered_items.clear();
}
pub fn update_input(&mut self, input: String) {
if self.status == CompletionStatus::Inactive {
return;
}
self.input = input;
// TODO: If the user types a letter that continues the current active item, we should
// try keeping that item active. Possibly give this a setting.
// ex: `p` has `print!` and `println!` has options. If you select the second, then type
// `r` then it should stay on `println!` even as the overall filtering of the list changes.
self.active.set(0);
self.filter_items();
}
fn all_items(&self) -> im::Vector<ScoredCompletionItem> {
self.input_items
.get(&self.input)
.cloned()
.filter(|items| !items.is_empty())
.unwrap_or_else(move || {
self.input_items.get("").cloned().unwrap_or_default()
})
}
pub fn filter_items(&mut self) {
self.input_id += 1;
if self.input.is_empty() {
self.filtered_items = self.all_items();
return;
}
// Filter the items by the fuzzy matching with the input text.
let mut items: im::Vector<ScoredCompletionItem> = self
.matcher
.try_update(|matcher| {
let pattern = nucleo::pattern::Pattern::parse(
&self.input,
nucleo::pattern::CaseMatching::Ignore,
);
self.all_items()
.iter()
.filter_map(|i| {
let filter_text =
i.item.filter_text.as_ref().unwrap_or(&i.item.label);
let shift = i
.item
.label
.match_indices(filter_text)
.next()
.map(|(shift, _)| shift)
.unwrap_or(0);
let mut indices = Vec::new();
let mut filter_text_buf = Vec::new();
let filter_text =
Utf32Str::new(filter_text, &mut filter_text_buf);
if let Some(score) =
pattern.indices(filter_text, matcher, &mut indices)
{
if shift > 0 {
for idx in indices.iter_mut() {
*idx += shift as u32;
}
}
let mut item = i.clone();
item.score = score;
item.label_score = score;
item.indices =
indices.into_iter().map(|i| i as usize).collect();
let mut label_buf = Vec::new();
let label_text =
Utf32Str::new(&i.item.label, &mut label_buf);
if let Some(score) = pattern.score(label_text, matcher) {
item.label_score = score;
}
Some(item)
} else {
None
}
})
.collect()
})
.unwrap();
// Sort all the items by their score, then their label score, then their length.
items.sort_by(|a, b| {
b.score
.cmp(&a.score)
.then_with(|| b.label_score.cmp(&a.label_score))
.then_with(|| a.item.label.len().cmp(&b.item.label.len()))
});
self.filtered_items = items;
}
/// Move down in the list of items.
pub fn next(&mut self) {
let active = self.active.get_untracked();
let new =
Movement::Down.update_index(active, self.filtered_items.len(), 1, true);
self.active.set(new);
}
/// Move up in the list of items.
pub fn previous(&mut self) {
let active = self.active.get_untracked();
let new =
Movement::Up.update_index(active, self.filtered_items.len(), 1, true);
self.active.set(new);
}
/// The amount of items that can be displayed in the current layout.
fn display_count(&self) -> usize {
let config = self.config.get_untracked();
((self.layout_rect.size().height / config.editor.line_height() as f64)
.floor() as usize)
.saturating_sub(1)
}
/// Move to the next page of items.
pub fn next_page(&mut self) {
let count = self.display_count();
let active = self.active.get_untracked();
let new = Movement::Down.update_index(
active,
self.filtered_items.len(),
count,
false,
);
self.active.set(new);
}
/// Move to the previous page of items.
pub fn | (&mut self) {
let count = self.display_count();
let active = self.active.get_untracked();
let new = Movement::Up.update_index(
active,
self.filtered_items.len(),
count,
false,
);
self.active.set(new);
}
/// The currently selected/active item.
pub fn current_item(&self) -> Option<&ScoredCompletionItem> {
self.filtered_items.get(self.active.get_untracked())
}
/// Update the completion lens of the document with the active completion item.
pub fn update_document_completion(
&self,
view: &EditorViewData,
cursor_offset: usize,
) {
let doc = view.doc;
if !doc.with_untracked(|doc| doc.content.is_file()) {
return;
}
let config = self.config.get_untracked();
if !config.editor.enable_completion_lens {
clear_completion_lens(doc);
return;
}
let completion_lens = doc.with_untracked(|doc| {
completion_lens_text(
view.rope_text(),
cursor_offset,
self,
doc.completion_lens(),
)
});
match completion_lens {
Some(Some(lens)) => {
let offset = self.offset + self.input.len();
// TODO: will need to be adjusted to use visual line.
// Could just store the offset in doc.
let (line, col) = view.offset_to_line_col(offset);
doc.update(|doc| {
doc.set_completion_lens(lens, line, col);
});
}
// Unchanged
Some(None) => {}
None => {
clear_completion_lens(doc);
}
}
}
}
/// Clear the current completion lens. Only `update`s if there is a completion lens.
pub fn clear_completion_lens(doc: RwSignal<Document>) {
let has_completion = doc.with_untracked(|doc| doc.completion_lens().is_some());
if has_completion {
doc.update(|doc| {
doc.clear_completion_lens();
});
}
}
/// Get the text of the completion lens for the given completion item.
/// Returns `None` if the completion lens should be hidden.
/// Returns `Some(None)` if the completion lens should be shown, but not changed.
/// Returns `Some(Some(text))` if the completion lens should be shown and changed to the given text.
fn completion_lens_text(
rope_text: impl RopeText,
cursor_offset: usize,
completion: &CompletionData,
current_completion: Option<&str>,
) -> Option<Option<String>> {
let item = &completion.current_item()?.item;
let item: Cow<str> = if let Some(edit) = &item.text_edit {
// A text edit is used, because that is what will actually be inserted.
let text_format = item
.insert_text_format
.unwrap_or(InsertTextFormat::PLAIN_TEXT);
// We don't display insert and replace
let CompletionTextEdit::Edit(edit) = edit else {
return None;
};
// The completion offset can be different from the current cursor offset.
let completion_offset = completion.offset;
let start_offset = rope_text.prev_code_boundary(cursor_offset);
let edit_start = rope_text.offset_of_position(&edit.range.start);
// If the start of the edit isn't where the cursor currently is,
// and it is not at the start of the completion, then we ignore it.
// This captures most cases that we want, even if it skips over some
// displayable edits.
if start_offset != edit_start && completion_offset != edit_start {
return None;
}
match text_format {
InsertTextFormat::PLAIN_TEXT => {
// This is not entirely correct because it assumes that the position is
// `{start,end}_offset` when it may not necessarily be.
Cow::Borrowed(&edit.new_text)
}
InsertTextFormat::SNIPPET => {
// Parse the snippet. Bail if it's invalid.
let snippet = Snippet::from_str(&edit.new_text).ok()?;
let text = snippet.text();
Cow::Owned(text)
}
_ => {
// We don't know how to support this text format.
return None;
}
}
} else {
// There's no specific text edit, so we just use the label.
Cow::Borrowed(&item.label)
};
// We strip the prefix of the current input from the label.
// So that, for example, `p` with a completion of `println` only sets the lens text to `rintln`.
// If the text does not include a prefix in the expected position, then we do not display it.
let item = item.as_ref().strip_prefix(&completion.input)?;
// Get only the first line of text, because Lapce does not currently support
// multi-line phantom text.
let item = item.lines().next().unwrap_or(item);
if Some(item) == current_completion {
// If the item is the same as the current completion, then we don't display it.
Some(None)
} else {
Some(Some(item.to_string()))
}
}
| previous_page | identifier_name |
completion.rs | use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::Arc};
use floem::{
peniko::kurbo::Rect,
reactive::{ReadSignal, RwSignal, Scope},
};
use lapce_core::{buffer::rope_text::RopeText, movement::Movement};
use lapce_rpc::{plugin::PluginId, proxy::ProxyRpcHandler};
use lsp_types::{
CompletionItem, CompletionResponse, CompletionTextEdit, InsertTextFormat,
Position,
};
use nucleo::Utf32Str;
use crate::{
config::LapceConfig, doc::Document, editor::view_data::EditorViewData,
id::EditorId, snippet::Snippet,
};
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum CompletionStatus {
Inactive,
Started,
Done,
}
#[derive(Clone, PartialEq)]
pub struct ScoredCompletionItem {
pub item: CompletionItem,
pub plugin_id: PluginId,
pub score: u32,
pub label_score: u32,
pub indices: Vec<usize>,
}
#[derive(Clone)]
pub struct CompletionData {
pub status: CompletionStatus,
/// The current request id. This is used to discard old requests.
pub request_id: usize,
/// An input id that is used for keeping track of whether the input has changed.
pub input_id: usize,
// TODO: A `PathBuf` has the issue that the proxy may not have the same format.
// TODO(minor): It might be nice to not require a path. LSPs cannot operate on scratch buffers
// as of now, but they might be allowed in the future.
pub path: PathBuf,
/// The offset that the completion is/was started at. Used for positioning the completion elem
pub offset: usize,
/// The active completion index in the list of filtered items
pub active: RwSignal<usize>,
/// The current input that the user has typed which is being sent for consideration by the LSP
pub input: String,
/// `(Input, CompletionItems)`
pub input_items: im::HashMap<String, im::Vector<ScoredCompletionItem>>,
/// The filtered items that are being displayed to the user
pub filtered_items: im::Vector<ScoredCompletionItem>,
/// The size of the completion element.
/// This is used for positioning the element.
/// As well, it is needed for some movement commands like page up/down that need to know the
/// height to compute how far to move.
pub layout_rect: Rect,
/// The editor id that was most recently used to trigger a completion.
pub latest_editor_id: Option<EditorId>,
/// Matcher for filtering the completion items
matcher: RwSignal<nucleo::Matcher>,
config: ReadSignal<Arc<LapceConfig>>,
}
impl CompletionData {
pub fn new(cx: Scope, config: ReadSignal<Arc<LapceConfig>>) -> Self {
let active = cx.create_rw_signal(0);
Self {
status: CompletionStatus::Inactive,
request_id: 0,
input_id: 0,
path: PathBuf::new(),
offset: 0,
active,
input: "".to_string(),
input_items: im::HashMap::new(),
filtered_items: im::Vector::new(),
layout_rect: Rect::ZERO,
matcher: cx
.create_rw_signal(nucleo::Matcher::new(nucleo::Config::DEFAULT)),
latest_editor_id: None,
config,
}
}
/// Handle the response to a completion request.
pub fn receive(
&mut self,
request_id: usize,
input: &str,
resp: &CompletionResponse,
plugin_id: PluginId,
) {
// If we've been canceled or the request id is old, ignore the response.
if self.status == CompletionStatus::Inactive || self.request_id != request_id
{
return;
}
let items = match resp {
CompletionResponse::Array(items) => items, | // TODO: Possibly handle the 'is_incomplete' field on List.
CompletionResponse::List(list) => &list.items,
};
let items: im::Vector<ScoredCompletionItem> = items
.iter()
.map(|i| ScoredCompletionItem {
item: i.to_owned(),
plugin_id,
score: 0,
label_score: 0,
indices: Vec::new(),
})
.collect();
self.input_items.insert(input.to_string(), items);
self.filter_items();
}
/// Request for completion items wit the current request id.
pub fn request(
&mut self,
editor_id: EditorId,
proxy_rpc: &ProxyRpcHandler,
path: PathBuf,
input: String,
position: Position,
) {
self.latest_editor_id = Some(editor_id);
self.input_items.insert(input.clone(), im::Vector::new());
proxy_rpc.completion(self.request_id, path, input, position);
}
/// Close the completion, clearing all the data.
pub fn cancel(&mut self) {
if self.status == CompletionStatus::Inactive {
return;
}
self.status = CompletionStatus::Inactive;
self.input_id = 0;
self.latest_editor_id = None;
self.active.set(0);
self.input.clear();
self.input_items.clear();
self.filtered_items.clear();
}
pub fn update_input(&mut self, input: String) {
if self.status == CompletionStatus::Inactive {
return;
}
self.input = input;
// TODO: If the user types a letter that continues the current active item, we should
// try keeping that item active. Possibly give this a setting.
// ex: `p` has `print!` and `println!` has options. If you select the second, then type
// `r` then it should stay on `println!` even as the overall filtering of the list changes.
self.active.set(0);
self.filter_items();
}
fn all_items(&self) -> im::Vector<ScoredCompletionItem> {
self.input_items
.get(&self.input)
.cloned()
.filter(|items| !items.is_empty())
.unwrap_or_else(move || {
self.input_items.get("").cloned().unwrap_or_default()
})
}
pub fn filter_items(&mut self) {
self.input_id += 1;
if self.input.is_empty() {
self.filtered_items = self.all_items();
return;
}
// Filter the items by the fuzzy matching with the input text.
let mut items: im::Vector<ScoredCompletionItem> = self
.matcher
.try_update(|matcher| {
let pattern = nucleo::pattern::Pattern::parse(
&self.input,
nucleo::pattern::CaseMatching::Ignore,
);
self.all_items()
.iter()
.filter_map(|i| {
let filter_text =
i.item.filter_text.as_ref().unwrap_or(&i.item.label);
let shift = i
.item
.label
.match_indices(filter_text)
.next()
.map(|(shift, _)| shift)
.unwrap_or(0);
let mut indices = Vec::new();
let mut filter_text_buf = Vec::new();
let filter_text =
Utf32Str::new(filter_text, &mut filter_text_buf);
if let Some(score) =
pattern.indices(filter_text, matcher, &mut indices)
{
if shift > 0 {
for idx in indices.iter_mut() {
*idx += shift as u32;
}
}
let mut item = i.clone();
item.score = score;
item.label_score = score;
item.indices =
indices.into_iter().map(|i| i as usize).collect();
let mut label_buf = Vec::new();
let label_text =
Utf32Str::new(&i.item.label, &mut label_buf);
if let Some(score) = pattern.score(label_text, matcher) {
item.label_score = score;
}
Some(item)
} else {
None
}
})
.collect()
})
.unwrap();
// Sort all the items by their score, then their label score, then their length.
items.sort_by(|a, b| {
b.score
.cmp(&a.score)
.then_with(|| b.label_score.cmp(&a.label_score))
.then_with(|| a.item.label.len().cmp(&b.item.label.len()))
});
self.filtered_items = items;
}
/// Move down in the list of items.
pub fn next(&mut self) {
let active = self.active.get_untracked();
let new =
Movement::Down.update_index(active, self.filtered_items.len(), 1, true);
self.active.set(new);
}
/// Move up in the list of items.
pub fn previous(&mut self) {
let active = self.active.get_untracked();
let new =
Movement::Up.update_index(active, self.filtered_items.len(), 1, true);
self.active.set(new);
}
/// The amount of items that can be displayed in the current layout.
fn display_count(&self) -> usize {
let config = self.config.get_untracked();
((self.layout_rect.size().height / config.editor.line_height() as f64)
.floor() as usize)
.saturating_sub(1)
}
/// Move to the next page of items.
pub fn next_page(&mut self) {
let count = self.display_count();
let active = self.active.get_untracked();
let new = Movement::Down.update_index(
active,
self.filtered_items.len(),
count,
false,
);
self.active.set(new);
}
/// Move to the previous page of items.
pub fn previous_page(&mut self) {
let count = self.display_count();
let active = self.active.get_untracked();
let new = Movement::Up.update_index(
active,
self.filtered_items.len(),
count,
false,
);
self.active.set(new);
}
/// The currently selected/active item.
pub fn current_item(&self) -> Option<&ScoredCompletionItem> {
self.filtered_items.get(self.active.get_untracked())
}
/// Update the completion lens of the document with the active completion item.
pub fn update_document_completion(
&self,
view: &EditorViewData,
cursor_offset: usize,
) {
let doc = view.doc;
if !doc.with_untracked(|doc| doc.content.is_file()) {
return;
}
let config = self.config.get_untracked();
if !config.editor.enable_completion_lens {
clear_completion_lens(doc);
return;
}
let completion_lens = doc.with_untracked(|doc| {
completion_lens_text(
view.rope_text(),
cursor_offset,
self,
doc.completion_lens(),
)
});
match completion_lens {
Some(Some(lens)) => {
let offset = self.offset + self.input.len();
// TODO: will need to be adjusted to use visual line.
// Could just store the offset in doc.
let (line, col) = view.offset_to_line_col(offset);
doc.update(|doc| {
doc.set_completion_lens(lens, line, col);
});
}
// Unchanged
Some(None) => {}
None => {
clear_completion_lens(doc);
}
}
}
}
/// Clear the current completion lens. Only `update`s if there is a completion lens.
pub fn clear_completion_lens(doc: RwSignal<Document>) {
let has_completion = doc.with_untracked(|doc| doc.completion_lens().is_some());
if has_completion {
doc.update(|doc| {
doc.clear_completion_lens();
});
}
}
/// Get the text of the completion lens for the given completion item.
/// Returns `None` if the completion lens should be hidden.
/// Returns `Some(None)` if the completion lens should be shown, but not changed.
/// Returns `Some(Some(text))` if the completion lens should be shown and changed to the given text.
fn completion_lens_text(
rope_text: impl RopeText,
cursor_offset: usize,
completion: &CompletionData,
current_completion: Option<&str>,
) -> Option<Option<String>> {
let item = &completion.current_item()?.item;
let item: Cow<str> = if let Some(edit) = &item.text_edit {
// A text edit is used, because that is what will actually be inserted.
let text_format = item
.insert_text_format
.unwrap_or(InsertTextFormat::PLAIN_TEXT);
// We don't display insert and replace
let CompletionTextEdit::Edit(edit) = edit else {
return None;
};
// The completion offset can be different from the current cursor offset.
let completion_offset = completion.offset;
let start_offset = rope_text.prev_code_boundary(cursor_offset);
let edit_start = rope_text.offset_of_position(&edit.range.start);
// If the start of the edit isn't where the cursor currently is,
// and it is not at the start of the completion, then we ignore it.
// This captures most cases that we want, even if it skips over some
// displayable edits.
if start_offset != edit_start && completion_offset != edit_start {
return None;
}
match text_format {
InsertTextFormat::PLAIN_TEXT => {
// This is not entirely correct because it assumes that the position is
// `{start,end}_offset` when it may not necessarily be.
Cow::Borrowed(&edit.new_text)
}
InsertTextFormat::SNIPPET => {
// Parse the snippet. Bail if it's invalid.
let snippet = Snippet::from_str(&edit.new_text).ok()?;
let text = snippet.text();
Cow::Owned(text)
}
_ => {
// We don't know how to support this text format.
return None;
}
}
} else {
// There's no specific text edit, so we just use the label.
Cow::Borrowed(&item.label)
};
// We strip the prefix of the current input from the label.
// So that, for example, `p` with a completion of `println` only sets the lens text to `rintln`.
// If the text does not include a prefix in the expected position, then we do not display it.
let item = item.as_ref().strip_prefix(&completion.input)?;
// Get only the first line of text, because Lapce does not currently support
// multi-line phantom text.
let item = item.lines().next().unwrap_or(item);
if Some(item) == current_completion {
// If the item is the same as the current completion, then we don't display it.
Some(None)
} else {
Some(Some(item.to_string()))
}
} | random_line_split | |
completion.rs | use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::Arc};
use floem::{
peniko::kurbo::Rect,
reactive::{ReadSignal, RwSignal, Scope},
};
use lapce_core::{buffer::rope_text::RopeText, movement::Movement};
use lapce_rpc::{plugin::PluginId, proxy::ProxyRpcHandler};
use lsp_types::{
CompletionItem, CompletionResponse, CompletionTextEdit, InsertTextFormat,
Position,
};
use nucleo::Utf32Str;
use crate::{
config::LapceConfig, doc::Document, editor::view_data::EditorViewData,
id::EditorId, snippet::Snippet,
};
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum CompletionStatus {
Inactive,
Started,
Done,
}
#[derive(Clone, PartialEq)]
pub struct ScoredCompletionItem {
pub item: CompletionItem,
pub plugin_id: PluginId,
pub score: u32,
pub label_score: u32,
pub indices: Vec<usize>,
}
#[derive(Clone)]
pub struct CompletionData {
pub status: CompletionStatus,
/// The current request id. This is used to discard old requests.
pub request_id: usize,
/// An input id that is used for keeping track of whether the input has changed.
pub input_id: usize,
// TODO: A `PathBuf` has the issue that the proxy may not have the same format.
// TODO(minor): It might be nice to not require a path. LSPs cannot operate on scratch buffers
// as of now, but they might be allowed in the future.
pub path: PathBuf,
/// The offset that the completion is/was started at. Used for positioning the completion elem
pub offset: usize,
/// The active completion index in the list of filtered items
pub active: RwSignal<usize>,
/// The current input that the user has typed which is being sent for consideration by the LSP
pub input: String,
/// `(Input, CompletionItems)`
pub input_items: im::HashMap<String, im::Vector<ScoredCompletionItem>>,
/// The filtered items that are being displayed to the user
pub filtered_items: im::Vector<ScoredCompletionItem>,
/// The size of the completion element.
/// This is used for positioning the element.
/// As well, it is needed for some movement commands like page up/down that need to know the
/// height to compute how far to move.
pub layout_rect: Rect,
/// The editor id that was most recently used to trigger a completion.
pub latest_editor_id: Option<EditorId>,
/// Matcher for filtering the completion items
matcher: RwSignal<nucleo::Matcher>,
config: ReadSignal<Arc<LapceConfig>>,
}
impl CompletionData {
pub fn new(cx: Scope, config: ReadSignal<Arc<LapceConfig>>) -> Self {
let active = cx.create_rw_signal(0);
Self {
status: CompletionStatus::Inactive,
request_id: 0,
input_id: 0,
path: PathBuf::new(),
offset: 0,
active,
input: "".to_string(),
input_items: im::HashMap::new(),
filtered_items: im::Vector::new(),
layout_rect: Rect::ZERO,
matcher: cx
.create_rw_signal(nucleo::Matcher::new(nucleo::Config::DEFAULT)),
latest_editor_id: None,
config,
}
}
/// Handle the response to a completion request.
pub fn receive(
&mut self,
request_id: usize,
input: &str,
resp: &CompletionResponse,
plugin_id: PluginId,
) {
// If we've been canceled or the request id is old, ignore the response.
if self.status == CompletionStatus::Inactive || self.request_id != request_id
{
return;
}
let items = match resp {
CompletionResponse::Array(items) => items,
// TODO: Possibly handle the 'is_incomplete' field on List.
CompletionResponse::List(list) => &list.items,
};
let items: im::Vector<ScoredCompletionItem> = items
.iter()
.map(|i| ScoredCompletionItem {
item: i.to_owned(),
plugin_id,
score: 0,
label_score: 0,
indices: Vec::new(),
})
.collect();
self.input_items.insert(input.to_string(), items);
self.filter_items();
}
/// Request for completion items wit the current request id.
pub fn request(
&mut self,
editor_id: EditorId,
proxy_rpc: &ProxyRpcHandler,
path: PathBuf,
input: String,
position: Position,
) {
self.latest_editor_id = Some(editor_id);
self.input_items.insert(input.clone(), im::Vector::new());
proxy_rpc.completion(self.request_id, path, input, position);
}
/// Close the completion, clearing all the data.
pub fn cancel(&mut self) {
if self.status == CompletionStatus::Inactive {
return;
}
self.status = CompletionStatus::Inactive;
self.input_id = 0;
self.latest_editor_id = None;
self.active.set(0);
self.input.clear();
self.input_items.clear();
self.filtered_items.clear();
}
pub fn update_input(&mut self, input: String) |
fn all_items(&self) -> im::Vector<ScoredCompletionItem> {
self.input_items
.get(&self.input)
.cloned()
.filter(|items| !items.is_empty())
.unwrap_or_else(move || {
self.input_items.get("").cloned().unwrap_or_default()
})
}
pub fn filter_items(&mut self) {
self.input_id += 1;
if self.input.is_empty() {
self.filtered_items = self.all_items();
return;
}
// Filter the items by the fuzzy matching with the input text.
let mut items: im::Vector<ScoredCompletionItem> = self
.matcher
.try_update(|matcher| {
let pattern = nucleo::pattern::Pattern::parse(
&self.input,
nucleo::pattern::CaseMatching::Ignore,
);
self.all_items()
.iter()
.filter_map(|i| {
let filter_text =
i.item.filter_text.as_ref().unwrap_or(&i.item.label);
let shift = i
.item
.label
.match_indices(filter_text)
.next()
.map(|(shift, _)| shift)
.unwrap_or(0);
let mut indices = Vec::new();
let mut filter_text_buf = Vec::new();
let filter_text =
Utf32Str::new(filter_text, &mut filter_text_buf);
if let Some(score) =
pattern.indices(filter_text, matcher, &mut indices)
{
if shift > 0 {
for idx in indices.iter_mut() {
*idx += shift as u32;
}
}
let mut item = i.clone();
item.score = score;
item.label_score = score;
item.indices =
indices.into_iter().map(|i| i as usize).collect();
let mut label_buf = Vec::new();
let label_text =
Utf32Str::new(&i.item.label, &mut label_buf);
if let Some(score) = pattern.score(label_text, matcher) {
item.label_score = score;
}
Some(item)
} else {
None
}
})
.collect()
})
.unwrap();
// Sort all the items by their score, then their label score, then their length.
items.sort_by(|a, b| {
b.score
.cmp(&a.score)
.then_with(|| b.label_score.cmp(&a.label_score))
.then_with(|| a.item.label.len().cmp(&b.item.label.len()))
});
self.filtered_items = items;
}
/// Move down in the list of items.
pub fn next(&mut self) {
let active = self.active.get_untracked();
let new =
Movement::Down.update_index(active, self.filtered_items.len(), 1, true);
self.active.set(new);
}
/// Move up in the list of items.
pub fn previous(&mut self) {
let active = self.active.get_untracked();
let new =
Movement::Up.update_index(active, self.filtered_items.len(), 1, true);
self.active.set(new);
}
/// The amount of items that can be displayed in the current layout.
fn display_count(&self) -> usize {
let config = self.config.get_untracked();
((self.layout_rect.size().height / config.editor.line_height() as f64)
.floor() as usize)
.saturating_sub(1)
}
/// Move to the next page of items.
pub fn next_page(&mut self) {
let count = self.display_count();
let active = self.active.get_untracked();
let new = Movement::Down.update_index(
active,
self.filtered_items.len(),
count,
false,
);
self.active.set(new);
}
/// Move to the previous page of items.
pub fn previous_page(&mut self) {
let count = self.display_count();
let active = self.active.get_untracked();
let new = Movement::Up.update_index(
active,
self.filtered_items.len(),
count,
false,
);
self.active.set(new);
}
/// The currently selected/active item.
pub fn current_item(&self) -> Option<&ScoredCompletionItem> {
self.filtered_items.get(self.active.get_untracked())
}
/// Update the completion lens of the document with the active completion item.
pub fn update_document_completion(
&self,
view: &EditorViewData,
cursor_offset: usize,
) {
let doc = view.doc;
if !doc.with_untracked(|doc| doc.content.is_file()) {
return;
}
let config = self.config.get_untracked();
if !config.editor.enable_completion_lens {
clear_completion_lens(doc);
return;
}
let completion_lens = doc.with_untracked(|doc| {
completion_lens_text(
view.rope_text(),
cursor_offset,
self,
doc.completion_lens(),
)
});
match completion_lens {
Some(Some(lens)) => {
let offset = self.offset + self.input.len();
// TODO: will need to be adjusted to use visual line.
// Could just store the offset in doc.
let (line, col) = view.offset_to_line_col(offset);
doc.update(|doc| {
doc.set_completion_lens(lens, line, col);
});
}
// Unchanged
Some(None) => {}
None => {
clear_completion_lens(doc);
}
}
}
}
/// Clear the current completion lens. Only `update`s if there is a completion lens.
pub fn clear_completion_lens(doc: RwSignal<Document>) {
let has_completion = doc.with_untracked(|doc| doc.completion_lens().is_some());
if has_completion {
doc.update(|doc| {
doc.clear_completion_lens();
});
}
}
/// Get the text of the completion lens for the given completion item.
/// Returns `None` if the completion lens should be hidden.
/// Returns `Some(None)` if the completion lens should be shown, but not changed.
/// Returns `Some(Some(text))` if the completion lens should be shown and changed to the given text.
fn completion_lens_text(
rope_text: impl RopeText,
cursor_offset: usize,
completion: &CompletionData,
current_completion: Option<&str>,
) -> Option<Option<String>> {
let item = &completion.current_item()?.item;
let item: Cow<str> = if let Some(edit) = &item.text_edit {
// A text edit is used, because that is what will actually be inserted.
let text_format = item
.insert_text_format
.unwrap_or(InsertTextFormat::PLAIN_TEXT);
// We don't display insert and replace
let CompletionTextEdit::Edit(edit) = edit else {
return None;
};
// The completion offset can be different from the current cursor offset.
let completion_offset = completion.offset;
let start_offset = rope_text.prev_code_boundary(cursor_offset);
let edit_start = rope_text.offset_of_position(&edit.range.start);
// If the start of the edit isn't where the cursor currently is,
// and it is not at the start of the completion, then we ignore it.
// This captures most cases that we want, even if it skips over some
// displayable edits.
if start_offset != edit_start && completion_offset != edit_start {
return None;
}
match text_format {
InsertTextFormat::PLAIN_TEXT => {
// This is not entirely correct because it assumes that the position is
// `{start,end}_offset` when it may not necessarily be.
Cow::Borrowed(&edit.new_text)
}
InsertTextFormat::SNIPPET => {
// Parse the snippet. Bail if it's invalid.
let snippet = Snippet::from_str(&edit.new_text).ok()?;
let text = snippet.text();
Cow::Owned(text)
}
_ => {
// We don't know how to support this text format.
return None;
}
}
} else {
// There's no specific text edit, so we just use the label.
Cow::Borrowed(&item.label)
};
// We strip the prefix of the current input from the label.
// So that, for example, `p` with a completion of `println` only sets the lens text to `rintln`.
// If the text does not include a prefix in the expected position, then we do not display it.
let item = item.as_ref().strip_prefix(&completion.input)?;
// Get only the first line of text, because Lapce does not currently support
// multi-line phantom text.
let item = item.lines().next().unwrap_or(item);
if Some(item) == current_completion {
// If the item is the same as the current completion, then we don't display it.
Some(None)
} else {
Some(Some(item.to_string()))
}
}
| {
if self.status == CompletionStatus::Inactive {
return;
}
self.input = input;
// TODO: If the user types a letter that continues the current active item, we should
// try keeping that item active. Possibly give this a setting.
// ex: `p` has `print!` and `println!` has options. If you select the second, then type
// `r` then it should stay on `println!` even as the overall filtering of the list changes.
self.active.set(0);
self.filter_items();
} | identifier_body |
feature_engineer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger('code_submission')
import random
import numpy as np
import pandas as pd
from pandas import Series
import torch
import time
from torch_geometric.data import Data
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from torch_scatter import scatter_add
import subprocess | return data
pca=PCA(n_components=pca_threshold, svd_solver ='full')
data = pca.fit_transform(data)
return data
def _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_U.npy')) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_V.npy')):
return True
elif (not flag_directed_graph) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_u_V.npy')):
return True
else:
return False
def check_monotony(x, tol=5):
dx = np.diff(x[1:-1])
return np.all(dx < tol) or np.all(dx > -tol)
def get_value_counts_with_moving_average(x, use_moving_average=False, n=3):
x_dict = dict(Series(x).value_counts())
x = [x_dict[i] for i in sorted(x_dict.keys())]
if use_moving_average:
ret = np.cumsum(x, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
print(ret[n-1:]/n)
return ret[n - 1:] / n
else:
return x
def check_continuous(x, tol=5):
if len(np.unique(x)) > len(x) * 0.5:
return True
x = get_value_counts_with_moving_average(x)
max_index = np.argmax(x)
min_index = np.argmax(-np.array(x))
if check_monotony(x, tol):
return True
elif check_monotony(x[:max_index+1], tol) and check_monotony(x[max_index:], tol):
return True
elif check_monotony(x[:min_index+1], tol) and check_monotony(x[min_index:], tol):
return True
else:
return False
def normalize(x):
norm_time = time.time()
tol = min(int(1e-3*x.shape[1]), 5)
normal_funs = ['l2', 'minmax', 'z-score']
normal_fun = normal_funs[0]
cont_feature_idx = [i for i in range(len(x)) if len(np.unique(x[i])) > 5 and check_continuous(x[i], tol)]
cate_feature_idx = [i for i in range(len(x)) if i not in cont_feature_idx]
logger.info('# continous features: {}, # categorical features: {}'.format(len(cont_feature_idx), len(cate_feature_idx)))
cate_feature = x[cate_feature_idx]
cont_feature = x[cont_feature_idx]
if len(cont_feature) > 0:
if normal_fun == 'l2':
norm = list(map(lambda y: np.linalg.norm(y,keepdims=True), cont_feature))
cont_feature = cont_feature/np.array(norm, dtype=np.float32)
elif normal_fun == 'min-max':
min_value = np.min(cont_feature, 1, keepdims=True)
max_value = np.max(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-min_value) / (max_value-min_value)
elif normal_fun == 'z-score':
mean_value = np.mean(cont_feature, 1, keepdims=True)
std_value = np.std(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-mean_value) / std_value
normalized_features = np.concatenate([cate_feature, cont_feature], axis=0)
logger.info('normlization time cost: {}'.format(time.time()-norm_time))
return normalized_features.transpose(1,0)
def get_neighbor_label_distribution(edges, y, n_class):
EPSILON = 1e-8
num_nodes = len(y)
distribution= np.zeros([num_nodes, n_class+1], dtype=np.float32)
edges = edges.numpy().transpose([1,0])
for edge in edges:
src_idx = edge[0]
dst_idx = edge[1]
distribution[src_idx][y[dst_idx]] += 1.0
# the last dimension is 'unknow' (the labels of test nodes)
norm_matrix = np.sum(distribution[:,:-1], axis=1, keepdims=True) + EPSILON
distribution = distribution[:,:-1] / norm_matrix
return distribution
def get_one_hot_label(y, n_class):
n_nodes = len(y)
# y is a tensor having shape [#nodes]
categorical_y = y.view(-1, 1)
categorical_y = categorical_y * (categorical_y < n_class).type(torch.int64)
one_hot_label = torch.zeros([n_nodes, n_class], dtype=torch.int32, device=categorical_y.device)
one_hot_label.scatter_(1, categorical_y, 1)
# mask test sample
one_hot_label = one_hot_label.float() * (categorical_y < n_class).float()
# return ndarray to fit to the following "np.concatnation"
return one_hot_label.numpy()
#num_nodes = len(y)
#one_hot_label = np.zeros([num_nodes, n_class], dtype=np.float32)
#for i in range(num_nodes):
# if y[i] < n_class:
# one_hot_label[i][y[i]] = 1.0
#return one_hot_label
def get_node_degree(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes).numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes).numpy()
degree = np.concatenate([np.expand_dims(in_deg,-1), np.expand_dims(out_deg,-1)], axis=-1)
return degree
def get_node_degree_binary(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes)
in_deg_binary = torch.ones_like(in_deg)
in_deg_binary[torch.nonzero(in_deg).reshape(-1)] = 0.0
in_deg_binary = in_deg_binary.numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes)
out_deg_binary = torch.ones_like(out_deg)
out_deg_binary[torch.nonzero(out_deg).reshape(-1)] = 0.0
out_deg_binary = out_deg_binary.numpy()
degree_binary = np.concatenate([np.expand_dims(in_deg_binary,-1), np.expand_dims(out_deg_binary,-1)], axis=-1)
return degree_binary
def run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget, epsilon=1e6, dims=128):
file_path = os.path.dirname(__file__)
data_dir = os.path.join(file_path, 'NR_Dataset')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
embed_dir = os.path.join(file_path, 'NR_EB')
if not os.path.exists(embed_dir):
os.makedirs(embed_dir)
edges = edges.numpy().transpose([1,0]).astype(np.int32)
num_edges = len(edges)
if num_edges < epsilon and num_nodes<1e5:
STRAP_epsilon = 1e-4
timeout = int(0.35*time_budget)
elif num_edges < 10*epsilon and num_nodes<1e5:
STRAP_epsilon = 5e-4
timeout = int(0.35*time_budget)
else:
STRAP_epsilon = 1e-3
timeout = int(0.35*time_budget)
np.save(os.path.join(data_dir,'STRAP.npy'), edges)
#run_commands = "./code_submission/temp_STRAP_FRPCA_U STRAP ./code_submission/NR_Dataset/ ./code_submission/NR_EB/ 0.5 12 0.0001 24"
STRAP_file = 'STRAP_FRPCA_D' if flag_directed_graph else 'STRAP_FRPCA_U'
try:
run_commands = ' '.join(['chmod','u+x',os.path.join(file_path,STRAP_file)])
cmd_return = subprocess.run(run_commands, shell=True, timeout=5)
#logger.info('chomod commands return: {}'.format(proc.returncode))
run_commands = ' '.join([os.path.join(file_path,STRAP_file),
'STRAP', data_dir+'/', embed_dir+'/',
'0.5 12', str(STRAP_epsilon), '8', str(dims), str(num_nodes)])
cmd_return = subprocess.run(run_commands.split(' '), shell=False, timeout=timeout)
flag_error = False
#logger.info('chomod commands return: {}'.format(proc.returncode))
except subprocess.TimeoutExpired as timeout_msg:
flag_error = True
logger.info('STRAP timeout! error msg: {}'.format(timeout_msg))
except Exception as err_msg:
flag_error = True
logger.info('STRAP failed with other errors! error msg: {}'.format(err_msg))
finally:
if not flag_error and _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph:
node_embed_u = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_U.npy'))
if np.isnan(node_embed_u).any():
node_embed_u[np.isnan(node_embed_u)] = 0.0
logger.info('find nan in node_embed_U')
node_embed_v = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_V.npy'))
if np.isnan(node_embed_v).any():
node_embed_v[np.isnan(node_embed_v)] = 0.0
logger.info('find nan in node_embed_V')
node_embed = np.concatenate([node_embed_u, node_embed_v], axis=1)
else:
node_embed = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_u_U.npy'))
if np.isnan(node_embed).any():
node_embed[np.isnan(node_embed)] = 0.0
logger.info('find nan in node_embed_U')
else:
logger.warn('Error: no such file!')
flag_error = True
node_embed = []
return flag_error, node_embed
def dim_reduction(x, use_normalizer=False):
#remove uninformative col
drop_col = [col for col in x.columns if x[col].var() == 0]
#all the features are uninformative except node_index
flag_none_feature = (len(drop_col) == len(x.columns)-1)
x = x.drop(drop_col,axis=1).to_numpy()
if not flag_none_feature and use_normalizer:
x = np.concatenate([x[:,0:1], normalize(x[:,1:].transpose(1,0))], axis=1)
return x, flag_none_feature
def feature_generation(x, y, n_class, edges, weights, flag_none_feature, flag_directed_graph, time_budget, use_label_distribution=False, use_node_degree=False, use_node_degree_binary=False, use_node_embed=True, use_one_hot_label=False):
added_features = list()
start_time = time.time()
num_nodes = x.shape[0]
if flag_none_feature and use_label_distribution:
label_distribution = get_neighbor_label_distribution(edges, y, n_class)
added_features.append(label_distribution)
logger.info('neighbor_label_distribution time cost: {}'.format(time.time() - start_time))
if use_node_degree:
node_degree = get_node_degree(edges, weights, num_nodes)
added_features.append(node_degree)
logger.info('degree time_cost: '.format(time.time() - start_time))
if use_node_degree_binary:
node_degree_binary = get_node_degree_binary(edges, weights, num_nodes)
added_features.append(node_degree_binary)
logger.info('degree_binary time_cost: {}'.format(time.time() - start_time))
if use_one_hot_label:
one_hot_label = get_one_hot_label(y, n_class)
added_features.append(one_hot_label)
logger.info('one_hot_label time_cost: '.format(time.time() - start_time))
if use_node_embed:
flag_error, node_embed = run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget)
if not flag_error:
added_features.append(node_embed)
logger.info('node_embed time cost: {}'.format(time.time() - start_time))
return added_features | import os
def _pca_processing(data, pca_threshold=0.75):
if data.shape[1] == 0: | random_line_split |
feature_engineer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger('code_submission')
import random
import numpy as np
import pandas as pd
from pandas import Series
import torch
import time
from torch_geometric.data import Data
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from torch_scatter import scatter_add
import subprocess
import os
def _pca_processing(data, pca_threshold=0.75):
if data.shape[1] == 0:
return data
pca=PCA(n_components=pca_threshold, svd_solver ='full')
data = pca.fit_transform(data)
return data
def _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_U.npy')) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_V.npy')):
return True
elif (not flag_directed_graph) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_u_V.npy')):
return True
else:
return False
def check_monotony(x, tol=5):
dx = np.diff(x[1:-1])
return np.all(dx < tol) or np.all(dx > -tol)
def get_value_counts_with_moving_average(x, use_moving_average=False, n=3):
x_dict = dict(Series(x).value_counts())
x = [x_dict[i] for i in sorted(x_dict.keys())]
if use_moving_average:
ret = np.cumsum(x, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
print(ret[n-1:]/n)
return ret[n - 1:] / n
else:
return x
def check_continuous(x, tol=5):
if len(np.unique(x)) > len(x) * 0.5:
return True
x = get_value_counts_with_moving_average(x)
max_index = np.argmax(x)
min_index = np.argmax(-np.array(x))
if check_monotony(x, tol):
return True
elif check_monotony(x[:max_index+1], tol) and check_monotony(x[max_index:], tol):
return True
elif check_monotony(x[:min_index+1], tol) and check_monotony(x[min_index:], tol):
return True
else:
return False
def normalize(x):
norm_time = time.time()
tol = min(int(1e-3*x.shape[1]), 5)
normal_funs = ['l2', 'minmax', 'z-score']
normal_fun = normal_funs[0]
cont_feature_idx = [i for i in range(len(x)) if len(np.unique(x[i])) > 5 and check_continuous(x[i], tol)]
cate_feature_idx = [i for i in range(len(x)) if i not in cont_feature_idx]
logger.info('# continous features: {}, # categorical features: {}'.format(len(cont_feature_idx), len(cate_feature_idx)))
cate_feature = x[cate_feature_idx]
cont_feature = x[cont_feature_idx]
if len(cont_feature) > 0:
if normal_fun == 'l2':
norm = list(map(lambda y: np.linalg.norm(y,keepdims=True), cont_feature))
cont_feature = cont_feature/np.array(norm, dtype=np.float32)
elif normal_fun == 'min-max':
min_value = np.min(cont_feature, 1, keepdims=True)
max_value = np.max(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-min_value) / (max_value-min_value)
elif normal_fun == 'z-score':
mean_value = np.mean(cont_feature, 1, keepdims=True)
std_value = np.std(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-mean_value) / std_value
normalized_features = np.concatenate([cate_feature, cont_feature], axis=0)
logger.info('normlization time cost: {}'.format(time.time()-norm_time))
return normalized_features.transpose(1,0)
def get_neighbor_label_distribution(edges, y, n_class):
EPSILON = 1e-8
num_nodes = len(y)
distribution= np.zeros([num_nodes, n_class+1], dtype=np.float32)
edges = edges.numpy().transpose([1,0])
for edge in edges:
src_idx = edge[0]
dst_idx = edge[1]
distribution[src_idx][y[dst_idx]] += 1.0
# the last dimension is 'unknow' (the labels of test nodes)
norm_matrix = np.sum(distribution[:,:-1], axis=1, keepdims=True) + EPSILON
distribution = distribution[:,:-1] / norm_matrix
return distribution
def get_one_hot_label(y, n_class):
n_nodes = len(y)
# y is a tensor having shape [#nodes]
categorical_y = y.view(-1, 1)
categorical_y = categorical_y * (categorical_y < n_class).type(torch.int64)
one_hot_label = torch.zeros([n_nodes, n_class], dtype=torch.int32, device=categorical_y.device)
one_hot_label.scatter_(1, categorical_y, 1)
# mask test sample
one_hot_label = one_hot_label.float() * (categorical_y < n_class).float()
# return ndarray to fit to the following "np.concatnation"
return one_hot_label.numpy()
#num_nodes = len(y)
#one_hot_label = np.zeros([num_nodes, n_class], dtype=np.float32)
#for i in range(num_nodes):
# if y[i] < n_class:
# one_hot_label[i][y[i]] = 1.0
#return one_hot_label
def | (edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes).numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes).numpy()
degree = np.concatenate([np.expand_dims(in_deg,-1), np.expand_dims(out_deg,-1)], axis=-1)
return degree
def get_node_degree_binary(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes)
in_deg_binary = torch.ones_like(in_deg)
in_deg_binary[torch.nonzero(in_deg).reshape(-1)] = 0.0
in_deg_binary = in_deg_binary.numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes)
out_deg_binary = torch.ones_like(out_deg)
out_deg_binary[torch.nonzero(out_deg).reshape(-1)] = 0.0
out_deg_binary = out_deg_binary.numpy()
degree_binary = np.concatenate([np.expand_dims(in_deg_binary,-1), np.expand_dims(out_deg_binary,-1)], axis=-1)
return degree_binary
def run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget, epsilon=1e6, dims=128):
file_path = os.path.dirname(__file__)
data_dir = os.path.join(file_path, 'NR_Dataset')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
embed_dir = os.path.join(file_path, 'NR_EB')
if not os.path.exists(embed_dir):
os.makedirs(embed_dir)
edges = edges.numpy().transpose([1,0]).astype(np.int32)
num_edges = len(edges)
if num_edges < epsilon and num_nodes<1e5:
STRAP_epsilon = 1e-4
timeout = int(0.35*time_budget)
elif num_edges < 10*epsilon and num_nodes<1e5:
STRAP_epsilon = 5e-4
timeout = int(0.35*time_budget)
else:
STRAP_epsilon = 1e-3
timeout = int(0.35*time_budget)
np.save(os.path.join(data_dir,'STRAP.npy'), edges)
#run_commands = "./code_submission/temp_STRAP_FRPCA_U STRAP ./code_submission/NR_Dataset/ ./code_submission/NR_EB/ 0.5 12 0.0001 24"
STRAP_file = 'STRAP_FRPCA_D' if flag_directed_graph else 'STRAP_FRPCA_U'
try:
run_commands = ' '.join(['chmod','u+x',os.path.join(file_path,STRAP_file)])
cmd_return = subprocess.run(run_commands, shell=True, timeout=5)
#logger.info('chomod commands return: {}'.format(proc.returncode))
run_commands = ' '.join([os.path.join(file_path,STRAP_file),
'STRAP', data_dir+'/', embed_dir+'/',
'0.5 12', str(STRAP_epsilon), '8', str(dims), str(num_nodes)])
cmd_return = subprocess.run(run_commands.split(' '), shell=False, timeout=timeout)
flag_error = False
#logger.info('chomod commands return: {}'.format(proc.returncode))
except subprocess.TimeoutExpired as timeout_msg:
flag_error = True
logger.info('STRAP timeout! error msg: {}'.format(timeout_msg))
except Exception as err_msg:
flag_error = True
logger.info('STRAP failed with other errors! error msg: {}'.format(err_msg))
finally:
if not flag_error and _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph:
node_embed_u = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_U.npy'))
if np.isnan(node_embed_u).any():
node_embed_u[np.isnan(node_embed_u)] = 0.0
logger.info('find nan in node_embed_U')
node_embed_v = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_V.npy'))
if np.isnan(node_embed_v).any():
node_embed_v[np.isnan(node_embed_v)] = 0.0
logger.info('find nan in node_embed_V')
node_embed = np.concatenate([node_embed_u, node_embed_v], axis=1)
else:
node_embed = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_u_U.npy'))
if np.isnan(node_embed).any():
node_embed[np.isnan(node_embed)] = 0.0
logger.info('find nan in node_embed_U')
else:
logger.warn('Error: no such file!')
flag_error = True
node_embed = []
return flag_error, node_embed
def dim_reduction(x, use_normalizer=False):
#remove uninformative col
drop_col = [col for col in x.columns if x[col].var() == 0]
#all the features are uninformative except node_index
flag_none_feature = (len(drop_col) == len(x.columns)-1)
x = x.drop(drop_col,axis=1).to_numpy()
if not flag_none_feature and use_normalizer:
x = np.concatenate([x[:,0:1], normalize(x[:,1:].transpose(1,0))], axis=1)
return x, flag_none_feature
def feature_generation(x, y, n_class, edges, weights, flag_none_feature, flag_directed_graph, time_budget, use_label_distribution=False, use_node_degree=False, use_node_degree_binary=False, use_node_embed=True, use_one_hot_label=False):
added_features = list()
start_time = time.time()
num_nodes = x.shape[0]
if flag_none_feature and use_label_distribution:
label_distribution = get_neighbor_label_distribution(edges, y, n_class)
added_features.append(label_distribution)
logger.info('neighbor_label_distribution time cost: {}'.format(time.time() - start_time))
if use_node_degree:
node_degree = get_node_degree(edges, weights, num_nodes)
added_features.append(node_degree)
logger.info('degree time_cost: '.format(time.time() - start_time))
if use_node_degree_binary:
node_degree_binary = get_node_degree_binary(edges, weights, num_nodes)
added_features.append(node_degree_binary)
logger.info('degree_binary time_cost: {}'.format(time.time() - start_time))
if use_one_hot_label:
one_hot_label = get_one_hot_label(y, n_class)
added_features.append(one_hot_label)
logger.info('one_hot_label time_cost: '.format(time.time() - start_time))
if use_node_embed:
flag_error, node_embed = run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget)
if not flag_error:
added_features.append(node_embed)
logger.info('node_embed time cost: {}'.format(time.time() - start_time))
return added_features
| get_node_degree | identifier_name |
feature_engineer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger('code_submission')
import random
import numpy as np
import pandas as pd
from pandas import Series
import torch
import time
from torch_geometric.data import Data
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from torch_scatter import scatter_add
import subprocess
import os
def _pca_processing(data, pca_threshold=0.75):
if data.shape[1] == 0:
return data
pca=PCA(n_components=pca_threshold, svd_solver ='full')
data = pca.fit_transform(data)
return data
def _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_U.npy')) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_V.npy')):
return True
elif (not flag_directed_graph) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_u_V.npy')):
return True
else:
return False
def check_monotony(x, tol=5):
dx = np.diff(x[1:-1])
return np.all(dx < tol) or np.all(dx > -tol)
def get_value_counts_with_moving_average(x, use_moving_average=False, n=3):
x_dict = dict(Series(x).value_counts())
x = [x_dict[i] for i in sorted(x_dict.keys())]
if use_moving_average:
ret = np.cumsum(x, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
print(ret[n-1:]/n)
return ret[n - 1:] / n
else:
return x
def check_continuous(x, tol=5):
if len(np.unique(x)) > len(x) * 0.5:
return True
x = get_value_counts_with_moving_average(x)
max_index = np.argmax(x)
min_index = np.argmax(-np.array(x))
if check_monotony(x, tol):
return True
elif check_monotony(x[:max_index+1], tol) and check_monotony(x[max_index:], tol):
return True
elif check_monotony(x[:min_index+1], tol) and check_monotony(x[min_index:], tol):
return True
else:
return False
def normalize(x):
norm_time = time.time()
tol = min(int(1e-3*x.shape[1]), 5)
normal_funs = ['l2', 'minmax', 'z-score']
normal_fun = normal_funs[0]
cont_feature_idx = [i for i in range(len(x)) if len(np.unique(x[i])) > 5 and check_continuous(x[i], tol)]
cate_feature_idx = [i for i in range(len(x)) if i not in cont_feature_idx]
logger.info('# continous features: {}, # categorical features: {}'.format(len(cont_feature_idx), len(cate_feature_idx)))
cate_feature = x[cate_feature_idx]
cont_feature = x[cont_feature_idx]
if len(cont_feature) > 0:
if normal_fun == 'l2':
norm = list(map(lambda y: np.linalg.norm(y,keepdims=True), cont_feature))
cont_feature = cont_feature/np.array(norm, dtype=np.float32)
elif normal_fun == 'min-max':
min_value = np.min(cont_feature, 1, keepdims=True)
max_value = np.max(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-min_value) / (max_value-min_value)
elif normal_fun == 'z-score':
mean_value = np.mean(cont_feature, 1, keepdims=True)
std_value = np.std(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-mean_value) / std_value
normalized_features = np.concatenate([cate_feature, cont_feature], axis=0)
logger.info('normlization time cost: {}'.format(time.time()-norm_time))
return normalized_features.transpose(1,0)
def get_neighbor_label_distribution(edges, y, n_class):
EPSILON = 1e-8
num_nodes = len(y)
distribution= np.zeros([num_nodes, n_class+1], dtype=np.float32)
edges = edges.numpy().transpose([1,0])
for edge in edges:
src_idx = edge[0]
dst_idx = edge[1]
distribution[src_idx][y[dst_idx]] += 1.0
# the last dimension is 'unknow' (the labels of test nodes)
norm_matrix = np.sum(distribution[:,:-1], axis=1, keepdims=True) + EPSILON
distribution = distribution[:,:-1] / norm_matrix
return distribution
def get_one_hot_label(y, n_class):
n_nodes = len(y)
# y is a tensor having shape [#nodes]
categorical_y = y.view(-1, 1)
categorical_y = categorical_y * (categorical_y < n_class).type(torch.int64)
one_hot_label = torch.zeros([n_nodes, n_class], dtype=torch.int32, device=categorical_y.device)
one_hot_label.scatter_(1, categorical_y, 1)
# mask test sample
one_hot_label = one_hot_label.float() * (categorical_y < n_class).float()
# return ndarray to fit to the following "np.concatnation"
return one_hot_label.numpy()
#num_nodes = len(y)
#one_hot_label = np.zeros([num_nodes, n_class], dtype=np.float32)
#for i in range(num_nodes):
# if y[i] < n_class:
# one_hot_label[i][y[i]] = 1.0
#return one_hot_label
def get_node_degree(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes).numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes).numpy()
degree = np.concatenate([np.expand_dims(in_deg,-1), np.expand_dims(out_deg,-1)], axis=-1)
return degree
def get_node_degree_binary(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes)
in_deg_binary = torch.ones_like(in_deg)
in_deg_binary[torch.nonzero(in_deg).reshape(-1)] = 0.0
in_deg_binary = in_deg_binary.numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes)
out_deg_binary = torch.ones_like(out_deg)
out_deg_binary[torch.nonzero(out_deg).reshape(-1)] = 0.0
out_deg_binary = out_deg_binary.numpy()
degree_binary = np.concatenate([np.expand_dims(in_deg_binary,-1), np.expand_dims(out_deg_binary,-1)], axis=-1)
return degree_binary
def run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget, epsilon=1e6, dims=128):
file_path = os.path.dirname(__file__)
data_dir = os.path.join(file_path, 'NR_Dataset')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
embed_dir = os.path.join(file_path, 'NR_EB')
if not os.path.exists(embed_dir):
os.makedirs(embed_dir)
edges = edges.numpy().transpose([1,0]).astype(np.int32)
num_edges = len(edges)
if num_edges < epsilon and num_nodes<1e5:
STRAP_epsilon = 1e-4
timeout = int(0.35*time_budget)
elif num_edges < 10*epsilon and num_nodes<1e5:
STRAP_epsilon = 5e-4
timeout = int(0.35*time_budget)
else:
STRAP_epsilon = 1e-3
timeout = int(0.35*time_budget)
np.save(os.path.join(data_dir,'STRAP.npy'), edges)
#run_commands = "./code_submission/temp_STRAP_FRPCA_U STRAP ./code_submission/NR_Dataset/ ./code_submission/NR_EB/ 0.5 12 0.0001 24"
STRAP_file = 'STRAP_FRPCA_D' if flag_directed_graph else 'STRAP_FRPCA_U'
try:
run_commands = ' '.join(['chmod','u+x',os.path.join(file_path,STRAP_file)])
cmd_return = subprocess.run(run_commands, shell=True, timeout=5)
#logger.info('chomod commands return: {}'.format(proc.returncode))
run_commands = ' '.join([os.path.join(file_path,STRAP_file),
'STRAP', data_dir+'/', embed_dir+'/',
'0.5 12', str(STRAP_epsilon), '8', str(dims), str(num_nodes)])
cmd_return = subprocess.run(run_commands.split(' '), shell=False, timeout=timeout)
flag_error = False
#logger.info('chomod commands return: {}'.format(proc.returncode))
except subprocess.TimeoutExpired as timeout_msg:
flag_error = True
logger.info('STRAP timeout! error msg: {}'.format(timeout_msg))
except Exception as err_msg:
flag_error = True
logger.info('STRAP failed with other errors! error msg: {}'.format(err_msg))
finally:
if not flag_error and _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph:
node_embed_u = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_U.npy'))
if np.isnan(node_embed_u).any():
node_embed_u[np.isnan(node_embed_u)] = 0.0
logger.info('find nan in node_embed_U')
node_embed_v = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_V.npy'))
if np.isnan(node_embed_v).any():
|
node_embed = np.concatenate([node_embed_u, node_embed_v], axis=1)
else:
node_embed = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_u_U.npy'))
if np.isnan(node_embed).any():
node_embed[np.isnan(node_embed)] = 0.0
logger.info('find nan in node_embed_U')
else:
logger.warn('Error: no such file!')
flag_error = True
node_embed = []
return flag_error, node_embed
def dim_reduction(x, use_normalizer=False):
#remove uninformative col
drop_col = [col for col in x.columns if x[col].var() == 0]
#all the features are uninformative except node_index
flag_none_feature = (len(drop_col) == len(x.columns)-1)
x = x.drop(drop_col,axis=1).to_numpy()
if not flag_none_feature and use_normalizer:
x = np.concatenate([x[:,0:1], normalize(x[:,1:].transpose(1,0))], axis=1)
return x, flag_none_feature
def feature_generation(x, y, n_class, edges, weights, flag_none_feature, flag_directed_graph, time_budget, use_label_distribution=False, use_node_degree=False, use_node_degree_binary=False, use_node_embed=True, use_one_hot_label=False):
added_features = list()
start_time = time.time()
num_nodes = x.shape[0]
if flag_none_feature and use_label_distribution:
label_distribution = get_neighbor_label_distribution(edges, y, n_class)
added_features.append(label_distribution)
logger.info('neighbor_label_distribution time cost: {}'.format(time.time() - start_time))
if use_node_degree:
node_degree = get_node_degree(edges, weights, num_nodes)
added_features.append(node_degree)
logger.info('degree time_cost: '.format(time.time() - start_time))
if use_node_degree_binary:
node_degree_binary = get_node_degree_binary(edges, weights, num_nodes)
added_features.append(node_degree_binary)
logger.info('degree_binary time_cost: {}'.format(time.time() - start_time))
if use_one_hot_label:
one_hot_label = get_one_hot_label(y, n_class)
added_features.append(one_hot_label)
logger.info('one_hot_label time_cost: '.format(time.time() - start_time))
if use_node_embed:
flag_error, node_embed = run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget)
if not flag_error:
added_features.append(node_embed)
logger.info('node_embed time cost: {}'.format(time.time() - start_time))
return added_features
| node_embed_v[np.isnan(node_embed_v)] = 0.0
logger.info('find nan in node_embed_V') | conditional_block |
feature_engineer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
logger = logging.getLogger('code_submission')
import random
import numpy as np
import pandas as pd
from pandas import Series
import torch
import time
from torch_geometric.data import Data
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from torch_scatter import scatter_add
import subprocess
import os
def _pca_processing(data, pca_threshold=0.75):
if data.shape[1] == 0:
return data
pca=PCA(n_components=pca_threshold, svd_solver ='full')
data = pca.fit_transform(data)
return data
def _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_U.npy')) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_d_V.npy')):
return True
elif (not flag_directed_graph) and os.path.exists(os.path.join(file_path,'NR_EB/STRAP_strap_frpca_u_V.npy')):
return True
else:
return False
def check_monotony(x, tol=5):
dx = np.diff(x[1:-1])
return np.all(dx < tol) or np.all(dx > -tol)
def get_value_counts_with_moving_average(x, use_moving_average=False, n=3):
x_dict = dict(Series(x).value_counts())
x = [x_dict[i] for i in sorted(x_dict.keys())]
if use_moving_average:
ret = np.cumsum(x, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
print(ret[n-1:]/n)
return ret[n - 1:] / n
else:
return x
def check_continuous(x, tol=5):
|
def normalize(x):
norm_time = time.time()
tol = min(int(1e-3*x.shape[1]), 5)
normal_funs = ['l2', 'minmax', 'z-score']
normal_fun = normal_funs[0]
cont_feature_idx = [i for i in range(len(x)) if len(np.unique(x[i])) > 5 and check_continuous(x[i], tol)]
cate_feature_idx = [i for i in range(len(x)) if i not in cont_feature_idx]
logger.info('# continous features: {}, # categorical features: {}'.format(len(cont_feature_idx), len(cate_feature_idx)))
cate_feature = x[cate_feature_idx]
cont_feature = x[cont_feature_idx]
if len(cont_feature) > 0:
if normal_fun == 'l2':
norm = list(map(lambda y: np.linalg.norm(y,keepdims=True), cont_feature))
cont_feature = cont_feature/np.array(norm, dtype=np.float32)
elif normal_fun == 'min-max':
min_value = np.min(cont_feature, 1, keepdims=True)
max_value = np.max(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-min_value) / (max_value-min_value)
elif normal_fun == 'z-score':
mean_value = np.mean(cont_feature, 1, keepdims=True)
std_value = np.std(cont_feature, 1, keepdims=True)
cont_feature = (cont_feature-mean_value) / std_value
normalized_features = np.concatenate([cate_feature, cont_feature], axis=0)
logger.info('normlization time cost: {}'.format(time.time()-norm_time))
return normalized_features.transpose(1,0)
def get_neighbor_label_distribution(edges, y, n_class):
EPSILON = 1e-8
num_nodes = len(y)
distribution= np.zeros([num_nodes, n_class+1], dtype=np.float32)
edges = edges.numpy().transpose([1,0])
for edge in edges:
src_idx = edge[0]
dst_idx = edge[1]
distribution[src_idx][y[dst_idx]] += 1.0
# the last dimension is 'unknow' (the labels of test nodes)
norm_matrix = np.sum(distribution[:,:-1], axis=1, keepdims=True) + EPSILON
distribution = distribution[:,:-1] / norm_matrix
return distribution
def get_one_hot_label(y, n_class):
n_nodes = len(y)
# y is a tensor having shape [#nodes]
categorical_y = y.view(-1, 1)
categorical_y = categorical_y * (categorical_y < n_class).type(torch.int64)
one_hot_label = torch.zeros([n_nodes, n_class], dtype=torch.int32, device=categorical_y.device)
one_hot_label.scatter_(1, categorical_y, 1)
# mask test sample
one_hot_label = one_hot_label.float() * (categorical_y < n_class).float()
# return ndarray to fit to the following "np.concatnation"
return one_hot_label.numpy()
#num_nodes = len(y)
#one_hot_label = np.zeros([num_nodes, n_class], dtype=np.float32)
#for i in range(num_nodes):
# if y[i] < n_class:
# one_hot_label[i][y[i]] = 1.0
#return one_hot_label
def get_node_degree(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes).numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes).numpy()
degree = np.concatenate([np.expand_dims(in_deg,-1), np.expand_dims(out_deg,-1)], axis=-1)
return degree
def get_node_degree_binary(edge_index, edge_weight, num_nodes):
row, col = edge_index
in_deg = scatter_add(edge_weight, col, dim_size=num_nodes)
in_deg_binary = torch.ones_like(in_deg)
in_deg_binary[torch.nonzero(in_deg).reshape(-1)] = 0.0
in_deg_binary = in_deg_binary.numpy()
out_deg = scatter_add(edge_weight, row, dim_size=num_nodes)
out_deg_binary = torch.ones_like(out_deg)
out_deg_binary[torch.nonzero(out_deg).reshape(-1)] = 0.0
out_deg_binary = out_deg_binary.numpy()
degree_binary = np.concatenate([np.expand_dims(in_deg_binary,-1), np.expand_dims(out_deg_binary,-1)], axis=-1)
return degree_binary
def run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget, epsilon=1e6, dims=128):
file_path = os.path.dirname(__file__)
data_dir = os.path.join(file_path, 'NR_Dataset')
if not os.path.exists(data_dir):
os.makedirs(data_dir)
embed_dir = os.path.join(file_path, 'NR_EB')
if not os.path.exists(embed_dir):
os.makedirs(embed_dir)
edges = edges.numpy().transpose([1,0]).astype(np.int32)
num_edges = len(edges)
if num_edges < epsilon and num_nodes<1e5:
STRAP_epsilon = 1e-4
timeout = int(0.35*time_budget)
elif num_edges < 10*epsilon and num_nodes<1e5:
STRAP_epsilon = 5e-4
timeout = int(0.35*time_budget)
else:
STRAP_epsilon = 1e-3
timeout = int(0.35*time_budget)
np.save(os.path.join(data_dir,'STRAP.npy'), edges)
#run_commands = "./code_submission/temp_STRAP_FRPCA_U STRAP ./code_submission/NR_Dataset/ ./code_submission/NR_EB/ 0.5 12 0.0001 24"
STRAP_file = 'STRAP_FRPCA_D' if flag_directed_graph else 'STRAP_FRPCA_U'
try:
run_commands = ' '.join(['chmod','u+x',os.path.join(file_path,STRAP_file)])
cmd_return = subprocess.run(run_commands, shell=True, timeout=5)
#logger.info('chomod commands return: {}'.format(proc.returncode))
run_commands = ' '.join([os.path.join(file_path,STRAP_file),
'STRAP', data_dir+'/', embed_dir+'/',
'0.5 12', str(STRAP_epsilon), '8', str(dims), str(num_nodes)])
cmd_return = subprocess.run(run_commands.split(' '), shell=False, timeout=timeout)
flag_error = False
#logger.info('chomod commands return: {}'.format(proc.returncode))
except subprocess.TimeoutExpired as timeout_msg:
flag_error = True
logger.info('STRAP timeout! error msg: {}'.format(timeout_msg))
except Exception as err_msg:
flag_error = True
logger.info('STRAP failed with other errors! error msg: {}'.format(err_msg))
finally:
if not flag_error and _check_file_exist(file_path, flag_directed_graph):
if flag_directed_graph:
node_embed_u = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_U.npy'))
if np.isnan(node_embed_u).any():
node_embed_u[np.isnan(node_embed_u)] = 0.0
logger.info('find nan in node_embed_U')
node_embed_v = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_d_V.npy'))
if np.isnan(node_embed_v).any():
node_embed_v[np.isnan(node_embed_v)] = 0.0
logger.info('find nan in node_embed_V')
node_embed = np.concatenate([node_embed_u, node_embed_v], axis=1)
else:
node_embed = np.load(os.path.join(file_path, 'NR_EB/STRAP_strap_frpca_u_U.npy'))
if np.isnan(node_embed).any():
node_embed[np.isnan(node_embed)] = 0.0
logger.info('find nan in node_embed_U')
else:
logger.warn('Error: no such file!')
flag_error = True
node_embed = []
return flag_error, node_embed
def dim_reduction(x, use_normalizer=False):
#remove uninformative col
drop_col = [col for col in x.columns if x[col].var() == 0]
#all the features are uninformative except node_index
flag_none_feature = (len(drop_col) == len(x.columns)-1)
x = x.drop(drop_col,axis=1).to_numpy()
if not flag_none_feature and use_normalizer:
x = np.concatenate([x[:,0:1], normalize(x[:,1:].transpose(1,0))], axis=1)
return x, flag_none_feature
def feature_generation(x, y, n_class, edges, weights, flag_none_feature, flag_directed_graph, time_budget, use_label_distribution=False, use_node_degree=False, use_node_degree_binary=False, use_node_embed=True, use_one_hot_label=False):
added_features = list()
start_time = time.time()
num_nodes = x.shape[0]
if flag_none_feature and use_label_distribution:
label_distribution = get_neighbor_label_distribution(edges, y, n_class)
added_features.append(label_distribution)
logger.info('neighbor_label_distribution time cost: {}'.format(time.time() - start_time))
if use_node_degree:
node_degree = get_node_degree(edges, weights, num_nodes)
added_features.append(node_degree)
logger.info('degree time_cost: '.format(time.time() - start_time))
if use_node_degree_binary:
node_degree_binary = get_node_degree_binary(edges, weights, num_nodes)
added_features.append(node_degree_binary)
logger.info('degree_binary time_cost: {}'.format(time.time() - start_time))
if use_one_hot_label:
one_hot_label = get_one_hot_label(y, n_class)
added_features.append(one_hot_label)
logger.info('one_hot_label time_cost: '.format(time.time() - start_time))
if use_node_embed:
flag_error, node_embed = run_STRAP(num_nodes, edges, weights, flag_directed_graph, flag_none_feature, time_budget)
if not flag_error:
added_features.append(node_embed)
logger.info('node_embed time cost: {}'.format(time.time() - start_time))
return added_features
| if len(np.unique(x)) > len(x) * 0.5:
return True
x = get_value_counts_with_moving_average(x)
max_index = np.argmax(x)
min_index = np.argmax(-np.array(x))
if check_monotony(x, tol):
return True
elif check_monotony(x[:max_index+1], tol) and check_monotony(x[max_index:], tol):
return True
elif check_monotony(x[:min_index+1], tol) and check_monotony(x[min_index:], tol):
return True
else:
return False | identifier_body |
lib.rs | // lib.rs -- RUST wasm interface for Conways game of life
mod utils;
use quad_rand;
use js_sys;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
use web_sys::console;
// A macro to provide `println!(..)`-style syntax for `console.log` logging.
// allows use of log! macro ==> e.g.
// log!("cell[{}, {}] is initially {:?} and has {} neighbors",
// row, col, cell, neighbors);
// log!(" it becomes {:?}", next_cell);
macro_rules! log {
( $( $t:tt )* ) => {
console::log_1(&format!( $( $t )* ).into());
}
}
// Timer generic for using web_sys::console::time and timeEnd.
// Use new() constructor to call time and
// use drop(&mut self) to call timeEnd.
// So function wrapped with Timer will automatically be timed.
// Then let _timer = Timer::new("Universe::tick");
// will cause every call to tick() to be timed and logged on console
pub struct Timer<'a> {
name: &'a str,
}
impl<'a> Timer<'a> {
pub fn new(name: &'a str) -> Timer<'a> {
console::time_with_label(name);
Timer { name }
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
console::time_end_with_label(self.name);
}
}
// Define a cell for the 'Universe', each 1 byte
// use repr(u8) to ensure 1 byte unsigned values
//
// NOTE: Define Dead value as zero and alive as one allow simple summing
// to determine how many live cells.
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Cell {
Dead = 0,
Alive = 1
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
};
}
fn set_cell(&mut self, cell_state: Cell) {
//log!("set_cell ({:?})", cell_state);
*self = cell_state;
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum InitialPattern {
Complex1 = 0,
Random5050 = 1
}
// Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def)
// Give the width of the universe, each row of the universe is the next set
// of 'width' cells, starting with the first row from indexes 0:<width>
#[wasm_bindgen]
pub struct | {
width: u32, // width of each row
height: u32, // number of rows
cells: Vec<Cell>, // width*height cells, each one byte
prevcells: Vec<Cell>, // cells from previous tick
mousedown: bool // set when shift-click event, so that associated click ignored
}
// methods for Universe, but not exposed to JS
impl Universe
{
// get_index - Return 1D array index of Cell at position (row,column) in Universe
fn get_index(&self, row: u32, column: u32) -> usize
{
(row * self.width + column) as usize
}
// Count live neighbors of cell at (row, column)
fn live_neighbor_count(&self, row: u32, col: u32) -> u8
{
// avoid modulus, division slows us down as seen in profiling
let up = if row == 0 { self.height - 1 } else { row - 1 };
let down = if row == self.height - 1 { 0 } else { row + 1 };
let left = if col == 0 { self.width - 1 } else { col - 1 };
let right = if col == self.width - 1 { 0 } else { col + 1 };
let neighbors =
if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 };
neighbors
}
}
// standalone method, not part of Universe directly
fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> {
// expression generating Vec<Cell>
let cells = (0..width * height).map(|_i|
{
//if pattern == InitialPattern::Complex1 {
// // hardcode-pattern, depends on 8x8 definition
// if i % 2 == 0 || i % 7 == 0 {
// Cell::Alive
// } else {
// Cell::Dead
// }
// } else { // InitialPattern::Random5050
if quad_rand::gen_range(0, 20) == 0 {
Cell::Alive
} else {
Cell::Dead
}
// }
}).collect();
cells
}
fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> {
let count = cells.len();
let inverted_cells = (0..count).map(|i|
{
if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive }
}).collect();
inverted_cells
}
// Public methods, exposed to JS
#[wasm_bindgen]
impl Universe
{
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
// set_width -- set width of Universe, set all cells to Dead state
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells =
(0..width * self.height)
.map(|_i| Cell::Dead).collect();
}
// Set the height of the Universe, set all cells to Dead state
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells =
(0..self.width * height)
.map(|_i| Cell::Dead).collect();
}
pub fn get_cell_index(&self, row: u32, column: u32) -> u32
{
row * self.width + column
}
// return pointer to 1D array of byte Cell values to JS
// NOTE: *const Cell syntax
// => pointer to non-mutable array???
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn prevcells(&self) -> *const Cell {
self.prevcells.as_ptr()
}
pub fn tick(&mut self)
{
let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console
// NOTE: timing ended when _timer falls out of scope at end of method
let mut next = self.cells.clone(); // copy of current cells, modify ==> next state
self.prevcells = next.clone(); // previous cell values
// Determine next state of Universe by applying conways' 4 rules
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0)
let neighbors = self.live_neighbor_count(row, col);
let next_cell = match (cell, neighbors)
{
// Rule 1: any live cell with < 2 live neighbors dies, (loneliness)
(Cell::Alive, x) if x < 2 => Cell::Dead,
// Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable)
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// Rule 3: any live cell with > 3 live neighbors dies (overpopulation)
(Cell::Alive, x) if x > 3 => Cell::Dead,
// Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction)
(Cell::Dead, 3) => Cell::Alive,
// Otherwise -- no change
(otherwise, _) => otherwise
};
next[idx] = next_cell;
}
}
self.cells = next; // next state for Universe determined
}
// toggle cell (row, column)
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
self.cells[idx].toggle();
}
pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) {
let idx = self.get_index(row, column);
self.cells[idx].set_cell(valu);
}
// allow JS to determine if mousedown event occurring (shift-click)
pub fn is_mousedown(&self) -> bool {
return self.mousedown;
}
// allow JS to reset the mousedown value
pub fn set_mousedown_value(&mut self, valu: bool) {
self.mousedown = valu;
}
// Constructor, initialize the universe to hard-coded pattern
pub fn new() -> Universe
{
utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message
let now = js_sys::Date::now();
let now_date = js_sys::Date::new(&JsValue::from_f64(now));
let ms_u64: u64 = now_date.get_milliseconds() as u64;
quad_rand::srand(ms_u64); // u64
let width = 128; // was 64
let height = 128;
// Randomly decide whether to use Complex1 or Random5050
let _pattern: InitialPattern =
if quad_rand::gen_range(0, 2) == 0 {
InitialPattern::Complex1
} else {
InitialPattern::Random5050
};
let pattern = InitialPattern::Random5050;
let cells = generate_cells(width, height, pattern);
let prevcells = invert_cells(&cells);
let mousedown = false;
Universe
{
width,
height,
cells,
prevcells,
mousedown
}
}
pub fn reset_board(&mut self, pattern: InitialPattern) {
log!("reset_board() : {:?}", pattern);
let width = self.width();
let height = self.height();
self.prevcells = self.cells.clone(); // current grid, needed for correct redraw
self.cells = generate_cells(width, height, pattern);
}
}
// impl Universe block w/o wasm_bindgen attribute
// Needed for testing -- don't expose to our JS.
// Rust-generated WebAsm functions cannot return borrowed references.
// NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with
// the wasm_bindgen attribute and examine errors.
// NOTE: get_cells returns borrowed reference &self.cells
impl Universe {
/// Get the dead and alive values of the entire universe.
pub fn get_cells(&self) -> &[Cell] {
&self.cells
}
/// Set specific cells in a universe to Alive, give slice of (row,col) Tuples.
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
// NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive
// claims immutable borrow on self.get_index call and
// mutable borrow later used here. (I don't follow personally.)
}
}
}
| Universe | identifier_name |
lib.rs | // lib.rs -- RUST wasm interface for Conways game of life
mod utils;
use quad_rand;
use js_sys;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
use web_sys::console;
// A macro to provide `println!(..)`-style syntax for `console.log` logging.
// allows use of log! macro ==> e.g.
// log!("cell[{}, {}] is initially {:?} and has {} neighbors",
// row, col, cell, neighbors);
// log!(" it becomes {:?}", next_cell);
macro_rules! log {
( $( $t:tt )* ) => {
console::log_1(&format!( $( $t )* ).into());
}
}
// Timer generic for using web_sys::console::time and timeEnd.
// Use new() constructor to call time and
// use drop(&mut self) to call timeEnd.
// So function wrapped with Timer will automatically be timed.
// Then let _timer = Timer::new("Universe::tick");
// will cause every call to tick() to be timed and logged on console
pub struct Timer<'a> {
name: &'a str,
}
impl<'a> Timer<'a> {
pub fn new(name: &'a str) -> Timer<'a> {
console::time_with_label(name);
Timer { name }
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
console::time_end_with_label(self.name);
}
}
// Define a cell for the 'Universe', each 1 byte
// use repr(u8) to ensure 1 byte unsigned values
//
// NOTE: Define Dead value as zero and alive as one allow simple summing
// to determine how many live cells.
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Cell {
Dead = 0,
Alive = 1
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
};
}
fn set_cell(&mut self, cell_state: Cell) {
//log!("set_cell ({:?})", cell_state);
*self = cell_state;
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum InitialPattern {
Complex1 = 0,
Random5050 = 1
}
// Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def)
// Give the width of the universe, each row of the universe is the next set
// of 'width' cells, starting with the first row from indexes 0:<width>
#[wasm_bindgen]
pub struct Universe {
width: u32, // width of each row
height: u32, // number of rows
cells: Vec<Cell>, // width*height cells, each one byte
prevcells: Vec<Cell>, // cells from previous tick
mousedown: bool // set when shift-click event, so that associated click ignored
}
// methods for Universe, but not exposed to JS
impl Universe
{
// get_index - Return 1D array index of Cell at position (row,column) in Universe
fn get_index(&self, row: u32, column: u32) -> usize
{
(row * self.width + column) as usize
}
// Count live neighbors of cell at (row, column)
fn live_neighbor_count(&self, row: u32, col: u32) -> u8
{
// avoid modulus, division slows us down as seen in profiling
let up = if row == 0 { self.height - 1 } else { row - 1 };
let down = if row == self.height - 1 { 0 } else { row + 1 };
let left = if col == 0 { self.width - 1 } else { col - 1 };
let right = if col == self.width - 1 { 0 } else { col + 1 };
let neighbors =
if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 };
neighbors
}
}
// standalone method, not part of Universe directly
fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> {
// expression generating Vec<Cell>
let cells = (0..width * height).map(|_i|
{
//if pattern == InitialPattern::Complex1 {
// // hardcode-pattern, depends on 8x8 definition
// if i % 2 == 0 || i % 7 == 0 {
// Cell::Alive
// } else {
// Cell::Dead
// }
// } else { // InitialPattern::Random5050
if quad_rand::gen_range(0, 20) == 0 {
Cell::Alive
} else {
Cell::Dead
}
// }
}).collect();
cells
}
fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> {
let count = cells.len();
let inverted_cells = (0..count).map(|i|
{
if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive }
}).collect();
inverted_cells
}
// Public methods, exposed to JS
#[wasm_bindgen]
impl Universe
{
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
// set_width -- set width of Universe, set all cells to Dead state
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells =
(0..width * self.height)
.map(|_i| Cell::Dead).collect();
}
// Set the height of the Universe, set all cells to Dead state
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells =
(0..self.width * height)
.map(|_i| Cell::Dead).collect();
}
pub fn get_cell_index(&self, row: u32, column: u32) -> u32
{
row * self.width + column
}
// return pointer to 1D array of byte Cell values to JS
// NOTE: *const Cell syntax
// => pointer to non-mutable array???
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn prevcells(&self) -> *const Cell {
self.prevcells.as_ptr()
}
pub fn tick(&mut self)
{
let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console
// NOTE: timing ended when _timer falls out of scope at end of method
let mut next = self.cells.clone(); // copy of current cells, modify ==> next state
self.prevcells = next.clone(); // previous cell values
// Determine next state of Universe by applying conways' 4 rules
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0)
let neighbors = self.live_neighbor_count(row, col);
let next_cell = match (cell, neighbors)
{
// Rule 1: any live cell with < 2 live neighbors dies, (loneliness)
(Cell::Alive, x) if x < 2 => Cell::Dead,
// Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable)
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// Rule 3: any live cell with > 3 live neighbors dies (overpopulation)
(Cell::Alive, x) if x > 3 => Cell::Dead,
// Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction)
(Cell::Dead, 3) => Cell::Alive,
// Otherwise -- no change
(otherwise, _) => otherwise
};
next[idx] = next_cell;
}
}
self.cells = next; // next state for Universe determined
}
// toggle cell (row, column)
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
self.cells[idx].toggle();
}
pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) {
let idx = self.get_index(row, column);
self.cells[idx].set_cell(valu);
}
// allow JS to determine if mousedown event occurring (shift-click)
pub fn is_mousedown(&self) -> bool {
return self.mousedown;
}
// allow JS to reset the mousedown value
pub fn set_mousedown_value(&mut self, valu: bool) {
self.mousedown = valu;
}
// Constructor, initialize the universe to hard-coded pattern
pub fn new() -> Universe
{
utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message
let now = js_sys::Date::now();
let now_date = js_sys::Date::new(&JsValue::from_f64(now));
let ms_u64: u64 = now_date.get_milliseconds() as u64;
quad_rand::srand(ms_u64); // u64
let width = 128; // was 64
let height = 128;
// Randomly decide whether to use Complex1 or Random5050
let _pattern: InitialPattern =
if quad_rand::gen_range(0, 2) == 0 {
InitialPattern::Complex1
} else {
InitialPattern::Random5050
};
let pattern = InitialPattern::Random5050;
let cells = generate_cells(width, height, pattern);
let prevcells = invert_cells(&cells);
let mousedown = false;
Universe
{
width,
height,
cells,
prevcells,
mousedown
}
}
pub fn reset_board(&mut self, pattern: InitialPattern) |
}
// impl Universe block w/o wasm_bindgen attribute
// Needed for testing -- don't expose to our JS.
// Rust-generated WebAsm functions cannot return borrowed references.
// NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with
// the wasm_bindgen attribute and examine errors.
// NOTE: get_cells returns borrowed reference &self.cells
impl Universe {
/// Get the dead and alive values of the entire universe.
pub fn get_cells(&self) -> &[Cell] {
&self.cells
}
/// Set specific cells in a universe to Alive, give slice of (row,col) Tuples.
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
// NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive
// claims immutable borrow on self.get_index call and
// mutable borrow later used here. (I don't follow personally.)
}
}
}
| {
log!("reset_board() : {:?}", pattern);
let width = self.width();
let height = self.height();
self.prevcells = self.cells.clone(); // current grid, needed for correct redraw
self.cells = generate_cells(width, height, pattern);
} | identifier_body |
lib.rs | // lib.rs -- RUST wasm interface for Conways game of life
mod utils;
use quad_rand;
use js_sys;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
use web_sys::console;
// A macro to provide `println!(..)`-style syntax for `console.log` logging.
// allows use of log! macro ==> e.g.
// log!("cell[{}, {}] is initially {:?} and has {} neighbors",
// row, col, cell, neighbors);
// log!(" it becomes {:?}", next_cell);
macro_rules! log {
( $( $t:tt )* ) => {
console::log_1(&format!( $( $t )* ).into());
}
}
// Timer generic for using web_sys::console::time and timeEnd.
// Use new() constructor to call time and
// use drop(&mut self) to call timeEnd.
// So function wrapped with Timer will automatically be timed.
// Then let _timer = Timer::new("Universe::tick");
// will cause every call to tick() to be timed and logged on console
pub struct Timer<'a> {
name: &'a str,
}
impl<'a> Timer<'a> {
pub fn new(name: &'a str) -> Timer<'a> {
console::time_with_label(name);
Timer { name }
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
console::time_end_with_label(self.name);
}
}
// Define a cell for the 'Universe', each 1 byte
// use repr(u8) to ensure 1 byte unsigned values
//
// NOTE: Define Dead value as zero and alive as one allow simple summing
// to determine how many live cells.
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Cell {
Dead = 0,
Alive = 1
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
};
}
fn set_cell(&mut self, cell_state: Cell) {
//log!("set_cell ({:?})", cell_state);
*self = cell_state;
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum InitialPattern {
Complex1 = 0,
Random5050 = 1
}
// Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def)
// Give the width of the universe, each row of the universe is the next set
// of 'width' cells, starting with the first row from indexes 0:<width>
#[wasm_bindgen]
pub struct Universe {
width: u32, // width of each row
height: u32, // number of rows
cells: Vec<Cell>, // width*height cells, each one byte
prevcells: Vec<Cell>, // cells from previous tick
mousedown: bool // set when shift-click event, so that associated click ignored
}
// methods for Universe, but not exposed to JS
impl Universe
{
// get_index - Return 1D array index of Cell at position (row,column) in Universe
fn get_index(&self, row: u32, column: u32) -> usize
{
(row * self.width + column) as usize
}
// Count live neighbors of cell at (row, column)
fn live_neighbor_count(&self, row: u32, col: u32) -> u8
{
// avoid modulus, division slows us down as seen in profiling
let up = if row == 0 { self.height - 1 } else { row - 1 };
let down = if row == self.height - 1 { 0 } else { row + 1 };
let left = if col == 0 { self.width - 1 } else { col - 1 };
let right = if col == self.width - 1 { 0 } else { col + 1 };
let neighbors =
if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else |
+ if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 };
neighbors
}
}
// standalone method, not part of Universe directly
fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> {
// expression generating Vec<Cell>
let cells = (0..width * height).map(|_i|
{
//if pattern == InitialPattern::Complex1 {
// // hardcode-pattern, depends on 8x8 definition
// if i % 2 == 0 || i % 7 == 0 {
// Cell::Alive
// } else {
// Cell::Dead
// }
// } else { // InitialPattern::Random5050
if quad_rand::gen_range(0, 20) == 0 {
Cell::Alive
} else {
Cell::Dead
}
// }
}).collect();
cells
}
fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> {
let count = cells.len();
let inverted_cells = (0..count).map(|i|
{
if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive }
}).collect();
inverted_cells
}
// Public methods, exposed to JS
#[wasm_bindgen]
impl Universe
{
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
// set_width -- set width of Universe, set all cells to Dead state
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells =
(0..width * self.height)
.map(|_i| Cell::Dead).collect();
}
// Set the height of the Universe, set all cells to Dead state
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells =
(0..self.width * height)
.map(|_i| Cell::Dead).collect();
}
pub fn get_cell_index(&self, row: u32, column: u32) -> u32
{
row * self.width + column
}
// return pointer to 1D array of byte Cell values to JS
// NOTE: *const Cell syntax
// => pointer to non-mutable array???
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn prevcells(&self) -> *const Cell {
self.prevcells.as_ptr()
}
pub fn tick(&mut self)
{
let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console
// NOTE: timing ended when _timer falls out of scope at end of method
let mut next = self.cells.clone(); // copy of current cells, modify ==> next state
self.prevcells = next.clone(); // previous cell values
// Determine next state of Universe by applying conways' 4 rules
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0)
let neighbors = self.live_neighbor_count(row, col);
let next_cell = match (cell, neighbors)
{
// Rule 1: any live cell with < 2 live neighbors dies, (loneliness)
(Cell::Alive, x) if x < 2 => Cell::Dead,
// Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable)
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// Rule 3: any live cell with > 3 live neighbors dies (overpopulation)
(Cell::Alive, x) if x > 3 => Cell::Dead,
// Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction)
(Cell::Dead, 3) => Cell::Alive,
// Otherwise -- no change
(otherwise, _) => otherwise
};
next[idx] = next_cell;
}
}
self.cells = next; // next state for Universe determined
}
// toggle cell (row, column)
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
self.cells[idx].toggle();
}
pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) {
let idx = self.get_index(row, column);
self.cells[idx].set_cell(valu);
}
// allow JS to determine if mousedown event occurring (shift-click)
pub fn is_mousedown(&self) -> bool {
return self.mousedown;
}
// allow JS to reset the mousedown value
pub fn set_mousedown_value(&mut self, valu: bool) {
self.mousedown = valu;
}
// Constructor, initialize the universe to hard-coded pattern
pub fn new() -> Universe
{
utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message
let now = js_sys::Date::now();
let now_date = js_sys::Date::new(&JsValue::from_f64(now));
let ms_u64: u64 = now_date.get_milliseconds() as u64;
quad_rand::srand(ms_u64); // u64
let width = 128; // was 64
let height = 128;
// Randomly decide whether to use Complex1 or Random5050
let _pattern: InitialPattern =
if quad_rand::gen_range(0, 2) == 0 {
InitialPattern::Complex1
} else {
InitialPattern::Random5050
};
let pattern = InitialPattern::Random5050;
let cells = generate_cells(width, height, pattern);
let prevcells = invert_cells(&cells);
let mousedown = false;
Universe
{
width,
height,
cells,
prevcells,
mousedown
}
}
pub fn reset_board(&mut self, pattern: InitialPattern) {
log!("reset_board() : {:?}", pattern);
let width = self.width();
let height = self.height();
self.prevcells = self.cells.clone(); // current grid, needed for correct redraw
self.cells = generate_cells(width, height, pattern);
}
}
// impl Universe block w/o wasm_bindgen attribute
// Needed for testing -- don't expose to our JS.
// Rust-generated WebAsm functions cannot return borrowed references.
// NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with
// the wasm_bindgen attribute and examine errors.
// NOTE: get_cells returns borrowed reference &self.cells
impl Universe {
/// Get the dead and alive values of the entire universe.
pub fn get_cells(&self) -> &[Cell] {
&self.cells
}
/// Set specific cells in a universe to Alive, give slice of (row,col) Tuples.
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
// NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive
// claims immutable borrow on self.get_index call and
// mutable borrow later used here. (I don't follow personally.)
}
}
}
| { 0 } | conditional_block |
lib.rs | // lib.rs -- RUST wasm interface for Conways game of life
mod utils;
use quad_rand;
use js_sys;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
use web_sys::console;
// A macro to provide `println!(..)`-style syntax for `console.log` logging.
// allows use of log! macro ==> e.g.
// log!("cell[{}, {}] is initially {:?} and has {} neighbors",
// row, col, cell, neighbors);
// log!(" it becomes {:?}", next_cell);
macro_rules! log {
( $( $t:tt )* ) => {
console::log_1(&format!( $( $t )* ).into());
}
}
// Timer generic for using web_sys::console::time and timeEnd.
// Use new() constructor to call time and
// use drop(&mut self) to call timeEnd.
// So function wrapped with Timer will automatically be timed.
// Then let _timer = Timer::new("Universe::tick");
// will cause every call to tick() to be timed and logged on console
pub struct Timer<'a> {
name: &'a str,
}
impl<'a> Timer<'a> {
pub fn new(name: &'a str) -> Timer<'a> {
console::time_with_label(name);
Timer { name }
}
}
impl<'a> Drop for Timer<'a> {
fn drop(&mut self) {
console::time_end_with_label(self.name);
}
}
// Define a cell for the 'Universe', each 1 byte
// use repr(u8) to ensure 1 byte unsigned values
//
// NOTE: Define Dead value as zero and alive as one allow simple summing
// to determine how many live cells.
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Cell {
Dead = 0,
Alive = 1
}
impl Cell {
fn toggle(&mut self) {
*self = match *self {
Cell::Dead => Cell::Alive,
Cell::Alive => Cell::Dead,
};
}
fn set_cell(&mut self, cell_state: Cell) {
//log!("set_cell ({:?})", cell_state);
*self = cell_state;
}
}
#[wasm_bindgen]
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum InitialPattern {
Complex1 = 0,
Random5050 = 1
}
// Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def)
// Give the width of the universe, each row of the universe is the next set
// of 'width' cells, starting with the first row from indexes 0:<width>
#[wasm_bindgen]
pub struct Universe {
width: u32, // width of each row
height: u32, // number of rows
cells: Vec<Cell>, // width*height cells, each one byte
prevcells: Vec<Cell>, // cells from previous tick
mousedown: bool // set when shift-click event, so that associated click ignored
}
// methods for Universe, but not exposed to JS
impl Universe
{
// get_index - Return 1D array index of Cell at position (row,column) in Universe
fn get_index(&self, row: u32, column: u32) -> usize
{
(row * self.width + column) as usize
}
// Count live neighbors of cell at (row, column)
fn live_neighbor_count(&self, row: u32, col: u32) -> u8
{
// avoid modulus, division slows us down as seen in profiling
let up = if row == 0 { self.height - 1 } else { row - 1 };
let down = if row == self.height - 1 { 0 } else { row + 1 };
let left = if col == 0 { self.width - 1 } else { col - 1 };
let right = if col == self.width - 1 { 0 } else { col + 1 };
let neighbors =
if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 }
+ if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 };
neighbors
}
}
// standalone method, not part of Universe directly
fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> {
// expression generating Vec<Cell>
let cells = (0..width * height).map(|_i|
{
//if pattern == InitialPattern::Complex1 {
// // hardcode-pattern, depends on 8x8 definition
// if i % 2 == 0 || i % 7 == 0 {
// Cell::Alive
// } else {
// Cell::Dead
// }
// } else { // InitialPattern::Random5050
if quad_rand::gen_range(0, 20) == 0 {
Cell::Alive
} else {
Cell::Dead
}
// }
}).collect();
cells
}
fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> {
let count = cells.len();
| }).collect();
inverted_cells
}
// Public methods, exposed to JS
#[wasm_bindgen]
impl Universe
{
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
// set_width -- set width of Universe, set all cells to Dead state
pub fn set_width(&mut self, width: u32) {
self.width = width;
self.cells =
(0..width * self.height)
.map(|_i| Cell::Dead).collect();
}
// Set the height of the Universe, set all cells to Dead state
pub fn set_height(&mut self, height: u32) {
self.height = height;
self.cells =
(0..self.width * height)
.map(|_i| Cell::Dead).collect();
}
pub fn get_cell_index(&self, row: u32, column: u32) -> u32
{
row * self.width + column
}
// return pointer to 1D array of byte Cell values to JS
// NOTE: *const Cell syntax
// => pointer to non-mutable array???
pub fn cells(&self) -> *const Cell {
self.cells.as_ptr()
}
pub fn prevcells(&self) -> *const Cell {
self.prevcells.as_ptr()
}
pub fn tick(&mut self)
{
let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console
// NOTE: timing ended when _timer falls out of scope at end of method
let mut next = self.cells.clone(); // copy of current cells, modify ==> next state
self.prevcells = next.clone(); // previous cell values
// Determine next state of Universe by applying conways' 4 rules
for row in 0..self.height {
for col in 0..self.width {
let idx = self.get_index(row, col);
let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0)
let neighbors = self.live_neighbor_count(row, col);
let next_cell = match (cell, neighbors)
{
// Rule 1: any live cell with < 2 live neighbors dies, (loneliness)
(Cell::Alive, x) if x < 2 => Cell::Dead,
// Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable)
(Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive,
// Rule 3: any live cell with > 3 live neighbors dies (overpopulation)
(Cell::Alive, x) if x > 3 => Cell::Dead,
// Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction)
(Cell::Dead, 3) => Cell::Alive,
// Otherwise -- no change
(otherwise, _) => otherwise
};
next[idx] = next_cell;
}
}
self.cells = next; // next state for Universe determined
}
// toggle cell (row, column)
pub fn toggle_cell(&mut self, row: u32, column: u32) {
let idx = self.get_index(row, column);
self.cells[idx].toggle();
}
pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) {
let idx = self.get_index(row, column);
self.cells[idx].set_cell(valu);
}
// allow JS to determine if mousedown event occurring (shift-click)
pub fn is_mousedown(&self) -> bool {
return self.mousedown;
}
// allow JS to reset the mousedown value
pub fn set_mousedown_value(&mut self, valu: bool) {
self.mousedown = valu;
}
// Constructor, initialize the universe to hard-coded pattern
pub fn new() -> Universe
{
utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message
let now = js_sys::Date::now();
let now_date = js_sys::Date::new(&JsValue::from_f64(now));
let ms_u64: u64 = now_date.get_milliseconds() as u64;
quad_rand::srand(ms_u64); // u64
let width = 128; // was 64
let height = 128;
// Randomly decide whether to use Complex1 or Random5050
let _pattern: InitialPattern =
if quad_rand::gen_range(0, 2) == 0 {
InitialPattern::Complex1
} else {
InitialPattern::Random5050
};
let pattern = InitialPattern::Random5050;
let cells = generate_cells(width, height, pattern);
let prevcells = invert_cells(&cells);
let mousedown = false;
Universe
{
width,
height,
cells,
prevcells,
mousedown
}
}
pub fn reset_board(&mut self, pattern: InitialPattern) {
log!("reset_board() : {:?}", pattern);
let width = self.width();
let height = self.height();
self.prevcells = self.cells.clone(); // current grid, needed for correct redraw
self.cells = generate_cells(width, height, pattern);
}
}
// impl Universe block w/o wasm_bindgen attribute
// Needed for testing -- don't expose to our JS.
// Rust-generated WebAsm functions cannot return borrowed references.
// NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with
// the wasm_bindgen attribute and examine errors.
// NOTE: get_cells returns borrowed reference &self.cells
impl Universe {
/// Get the dead and alive values of the entire universe.
pub fn get_cells(&self) -> &[Cell] {
&self.cells
}
/// Set specific cells in a universe to Alive, give slice of (row,col) Tuples.
pub fn set_cells(&mut self, cells: &[(u32, u32)]) {
for (row, col) in cells.iter().cloned() {
let idx = self.get_index(row, col);
self.cells[idx] = Cell::Alive;
// NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive
// claims immutable borrow on self.get_index call and
// mutable borrow later used here. (I don't follow personally.)
}
}
} | let inverted_cells = (0..count).map(|i|
{
if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive } | random_line_split |
mod.rs | //! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
//!
//! There are three phases to this analysis:
//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
//! and also build a control flow graph.
//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
//! the CFG and find the exact points where we know a value is definitely dropped.
//!
//! The end result is a data structure that maps the post-order index of each node in the HIR tree
//! to a set of values that are known to be dropped at that location. | use crate::FnCtxt;
use hir::def_id::DefId;
use hir::{Body, HirId, HirIdMap, Node};
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::hir::map::Map;
use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
use rustc_middle::ty;
use std::collections::BTreeMap;
use std::fmt::Debug;
mod cfg_build;
mod cfg_propagate;
mod cfg_visualize;
mod record_consumed_borrow;
pub fn compute_drop_ranges<'a, 'tcx>(
fcx: &'a FnCtxt<'a, 'tcx>,
def_id: DefId,
body: &'tcx Body<'tcx>,
) -> DropRanges {
if fcx.sess().opts.unstable_opts.drop_tracking {
let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
let typeck_results = &fcx.typeck_results.borrow();
let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
&fcx,
typeck_results,
fcx.param_env,
consumed_borrowed_places,
body,
num_exprs,
);
drop_ranges.propagate_to_fixpoint();
debug!("borrowed_temporaries = {borrowed_temporaries:?}");
DropRanges {
tracked_value_map: drop_ranges.tracked_value_map,
nodes: drop_ranges.nodes,
borrowed_temporaries: Some(borrowed_temporaries),
}
} else {
// If drop range tracking is not enabled, skip all the analysis and produce an
// empty set of DropRanges.
DropRanges {
tracked_value_map: UnordMap::default(),
nodes: IndexVec::new(),
borrowed_temporaries: None,
}
}
}
/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
///
/// This includes the place itself, and if the place is a reference to a local
/// variable then `f` is also called on the HIR node for that variable as well.
///
/// For example, if `place` points to `foo()`, then `f` is called once for the
/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
/// be called both on the `ExprKind::Path` node that represents the expression
/// as well as the HirId of the local `x` itself.
fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
f(place);
let node = hir.find(place.hir_id());
if let Some(Node::Expr(expr)) = node {
match expr.kind {
hir::ExprKind::Path(hir::QPath::Resolved(
_,
hir::Path { res: hir::def::Res::Local(hir_id), .. },
)) => {
f(TrackedValue::Variable(*hir_id));
}
_ => (),
}
}
}
rustc_index::newtype_index! {
#[debug_format = "id({})"]
pub struct PostOrderId {}
}
rustc_index::newtype_index! {
#[debug_format = "hidx({})"]
pub struct TrackedValueIndex {}
}
/// Identifies a value whose drop state we need to track.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
enum TrackedValue {
/// Represents a named variable, such as a let binding, parameter, or upvar.
///
/// The HirId points to the variable's definition site.
Variable(HirId),
/// A value produced as a result of an expression.
///
/// The HirId points to the expression that returns this value.
Temporary(HirId),
}
impl Debug for TrackedValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx {
write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
} else {
match self {
Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"),
Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"),
}
}
})
}
}
impl TrackedValue {
fn hir_id(&self) -> HirId {
match self {
TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
}
}
fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
match place_with_id.place.base {
PlaceBase::Rvalue | PlaceBase::StaticItem => {
TrackedValue::Temporary(place_with_id.hir_id)
}
PlaceBase::Local(hir_id)
| PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
TrackedValue::Variable(hir_id)
}
}
}
}
/// Represents a reason why we might not be able to convert a HirId or Place
/// into a tracked value.
#[derive(Debug)]
enum TrackedValueConversionError {
/// Place projects are not currently supported.
///
/// The reasoning around these is kind of subtle, so we choose to be more
/// conservative around these for now. There is no reason in theory we
/// cannot support these, we just have not implemented it yet.
PlaceProjectionsNotSupported,
}
impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
type Error = TrackedValueConversionError;
fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
if !place_with_id.place.projections.is_empty() {
debug!(
"TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
place_with_id
);
return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
}
Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
}
}
pub struct DropRanges {
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
nodes: IndexVec<PostOrderId, NodeInfo>,
borrowed_temporaries: Option<UnordSet<HirId>>,
}
impl DropRanges {
pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
self.tracked_value_map
.get(&TrackedValue::Temporary(hir_id))
.or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
.cloned()
.is_some_and(|tracked_value_id| {
self.expect_node(location.into()).drop_state.contains(tracked_value_id)
})
}
pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
}
/// Returns a reference to the NodeInfo for a node, panicking if it does not exist
fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
&self.nodes[id]
}
}
/// Tracks information needed to compute drop ranges.
struct DropRangesBuilder {
/// The core of DropRangesBuilder is a set of nodes, which each represent
/// one expression. We primarily refer to them by their index in a
/// post-order traversal of the HIR tree, since this is what
/// generator_interior uses to talk about yield positions.
///
/// This IndexVec keeps the relevant details for each node. See the
/// NodeInfo struct for more details, but this information includes things
/// such as the set of control-flow successors, which variables are dropped
/// or reinitialized, and whether each variable has been inferred to be
/// known-dropped or potentially reinitialized at each point.
nodes: IndexVec<PostOrderId, NodeInfo>,
/// We refer to values whose drop state we are tracking by the HirId of
/// where they are defined. Within a NodeInfo, however, we store the
/// drop-state in a bit vector indexed by a HirIdIndex
/// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
/// from HirIds to the HirIdIndex that is used to represent that value in
/// bitvector.
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
/// When building the control flow graph, we don't always know the
/// post-order index of the target node at the point we encounter it.
/// For example, this happens with break and continue. In those cases,
/// we store a pair of the PostOrderId of the source and the HirId
/// of the target. Once we have gathered all of these edges, we make a
/// pass over the set of deferred edges (see process_deferred_edges in
/// cfg_build.rs), look up the PostOrderId for the target (since now the
/// post-order index for all nodes is known), and add missing control flow
/// edges.
deferred_edges: Vec<(PostOrderId, HirId)>,
/// This maps HirIds of expressions to their post-order index. It is
/// used in process_deferred_edges to correctly add back-edges.
post_order_map: HirIdMap<PostOrderId>,
}
impl Debug for DropRangesBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DropRanges")
.field("hir_id_map", &self.tracked_value_map)
.field("post_order_maps", &self.post_order_map)
.field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
.finish()
}
}
/// DropRanges keeps track of what values are definitely dropped at each point in the code.
///
/// Values of interest are defined by the hir_id of their place. Locations in code are identified
/// by their index in the post-order traversal. At its core, DropRanges maps
/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
/// dropped at the point of the node identified by post_order_id.
impl DropRangesBuilder {
/// Returns the number of values (hir_ids) that are tracked
fn num_values(&self) -> usize {
self.tracked_value_map.len()
}
fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
let size = self.num_values();
self.nodes.ensure_contains_elem(id, || NodeInfo::new(size))
}
fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
trace!("adding control edge from {:?} to {:?}", from, to);
self.node_mut(from).successors.push(to);
}
}
#[derive(Debug)]
struct NodeInfo {
/// IDs of nodes that can follow this one in the control flow
///
/// If the vec is empty, then control proceeds to the next node.
successors: Vec<PostOrderId>,
/// List of hir_ids that are dropped by this node.
drops: Vec<TrackedValueIndex>,
/// List of hir_ids that are reinitialized by this node.
reinits: Vec<TrackedValueIndex>,
/// Set of values that are definitely dropped at this point.
drop_state: BitSet<TrackedValueIndex>,
}
impl NodeInfo {
fn new(num_values: usize) -> Self {
Self {
successors: vec![],
drops: vec![],
reinits: vec![],
drop_state: BitSet::new_filled(num_values),
}
}
} |
use self::cfg_build::build_control_flow_graph;
use self::record_consumed_borrow::find_consumed_and_borrowed; | random_line_split |
mod.rs | //! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
//!
//! There are three phases to this analysis:
//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
//! and also build a control flow graph.
//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
//! the CFG and find the exact points where we know a value is definitely dropped.
//!
//! The end result is a data structure that maps the post-order index of each node in the HIR tree
//! to a set of values that are known to be dropped at that location.
use self::cfg_build::build_control_flow_graph;
use self::record_consumed_borrow::find_consumed_and_borrowed;
use crate::FnCtxt;
use hir::def_id::DefId;
use hir::{Body, HirId, HirIdMap, Node};
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::hir::map::Map;
use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
use rustc_middle::ty;
use std::collections::BTreeMap;
use std::fmt::Debug;
mod cfg_build;
mod cfg_propagate;
mod cfg_visualize;
mod record_consumed_borrow;
pub fn compute_drop_ranges<'a, 'tcx>(
fcx: &'a FnCtxt<'a, 'tcx>,
def_id: DefId,
body: &'tcx Body<'tcx>,
) -> DropRanges {
if fcx.sess().opts.unstable_opts.drop_tracking {
let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
let typeck_results = &fcx.typeck_results.borrow();
let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
&fcx,
typeck_results,
fcx.param_env,
consumed_borrowed_places,
body,
num_exprs,
);
drop_ranges.propagate_to_fixpoint();
debug!("borrowed_temporaries = {borrowed_temporaries:?}");
DropRanges {
tracked_value_map: drop_ranges.tracked_value_map,
nodes: drop_ranges.nodes,
borrowed_temporaries: Some(borrowed_temporaries),
}
} else {
// If drop range tracking is not enabled, skip all the analysis and produce an
// empty set of DropRanges.
DropRanges {
tracked_value_map: UnordMap::default(),
nodes: IndexVec::new(),
borrowed_temporaries: None,
}
}
}
/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
///
/// This includes the place itself, and if the place is a reference to a local
/// variable then `f` is also called on the HIR node for that variable as well.
///
/// For example, if `place` points to `foo()`, then `f` is called once for the
/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
/// be called both on the `ExprKind::Path` node that represents the expression
/// as well as the HirId of the local `x` itself.
fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
f(place);
let node = hir.find(place.hir_id());
if let Some(Node::Expr(expr)) = node {
match expr.kind {
hir::ExprKind::Path(hir::QPath::Resolved(
_,
hir::Path { res: hir::def::Res::Local(hir_id), .. },
)) => {
f(TrackedValue::Variable(*hir_id));
}
_ => (),
}
}
}
rustc_index::newtype_index! {
#[debug_format = "id({})"]
pub struct PostOrderId {}
}
rustc_index::newtype_index! {
#[debug_format = "hidx({})"]
pub struct TrackedValueIndex {}
}
/// Identifies a value whose drop state we need to track.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
enum TrackedValue {
/// Represents a named variable, such as a let binding, parameter, or upvar.
///
/// The HirId points to the variable's definition site.
Variable(HirId),
/// A value produced as a result of an expression.
///
/// The HirId points to the expression that returns this value.
Temporary(HirId),
}
impl Debug for TrackedValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx {
write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
} else {
match self {
Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"),
Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"),
}
}
})
}
}
impl TrackedValue {
fn hir_id(&self) -> HirId {
match self {
TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
}
}
fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
match place_with_id.place.base {
PlaceBase::Rvalue | PlaceBase::StaticItem => {
TrackedValue::Temporary(place_with_id.hir_id)
}
PlaceBase::Local(hir_id)
| PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
TrackedValue::Variable(hir_id)
}
}
}
}
/// Represents a reason why we might not be able to convert a HirId or Place
/// into a tracked value.
#[derive(Debug)]
enum TrackedValueConversionError {
/// Place projects are not currently supported.
///
/// The reasoning around these is kind of subtle, so we choose to be more
/// conservative around these for now. There is no reason in theory we
/// cannot support these, we just have not implemented it yet.
PlaceProjectionsNotSupported,
}
impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
type Error = TrackedValueConversionError;
fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
if !place_with_id.place.projections.is_empty() {
debug!(
"TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
place_with_id
);
return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
}
Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
}
}
pub struct DropRanges {
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
nodes: IndexVec<PostOrderId, NodeInfo>,
borrowed_temporaries: Option<UnordSet<HirId>>,
}
impl DropRanges {
pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
self.tracked_value_map
.get(&TrackedValue::Temporary(hir_id))
.or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
.cloned()
.is_some_and(|tracked_value_id| {
self.expect_node(location.into()).drop_state.contains(tracked_value_id)
})
}
pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
}
/// Returns a reference to the NodeInfo for a node, panicking if it does not exist
fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
&self.nodes[id]
}
}
/// Tracks information needed to compute drop ranges.
struct DropRangesBuilder {
/// The core of DropRangesBuilder is a set of nodes, which each represent
/// one expression. We primarily refer to them by their index in a
/// post-order traversal of the HIR tree, since this is what
/// generator_interior uses to talk about yield positions.
///
/// This IndexVec keeps the relevant details for each node. See the
/// NodeInfo struct for more details, but this information includes things
/// such as the set of control-flow successors, which variables are dropped
/// or reinitialized, and whether each variable has been inferred to be
/// known-dropped or potentially reinitialized at each point.
nodes: IndexVec<PostOrderId, NodeInfo>,
/// We refer to values whose drop state we are tracking by the HirId of
/// where they are defined. Within a NodeInfo, however, we store the
/// drop-state in a bit vector indexed by a HirIdIndex
/// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
/// from HirIds to the HirIdIndex that is used to represent that value in
/// bitvector.
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
/// When building the control flow graph, we don't always know the
/// post-order index of the target node at the point we encounter it.
/// For example, this happens with break and continue. In those cases,
/// we store a pair of the PostOrderId of the source and the HirId
/// of the target. Once we have gathered all of these edges, we make a
/// pass over the set of deferred edges (see process_deferred_edges in
/// cfg_build.rs), look up the PostOrderId for the target (since now the
/// post-order index for all nodes is known), and add missing control flow
/// edges.
deferred_edges: Vec<(PostOrderId, HirId)>,
/// This maps HirIds of expressions to their post-order index. It is
/// used in process_deferred_edges to correctly add back-edges.
post_order_map: HirIdMap<PostOrderId>,
}
impl Debug for DropRangesBuilder {
fn | (&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DropRanges")
.field("hir_id_map", &self.tracked_value_map)
.field("post_order_maps", &self.post_order_map)
.field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
.finish()
}
}
/// DropRanges keeps track of what values are definitely dropped at each point in the code.
///
/// Values of interest are defined by the hir_id of their place. Locations in code are identified
/// by their index in the post-order traversal. At its core, DropRanges maps
/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
/// dropped at the point of the node identified by post_order_id.
impl DropRangesBuilder {
/// Returns the number of values (hir_ids) that are tracked
fn num_values(&self) -> usize {
self.tracked_value_map.len()
}
fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
let size = self.num_values();
self.nodes.ensure_contains_elem(id, || NodeInfo::new(size))
}
fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
trace!("adding control edge from {:?} to {:?}", from, to);
self.node_mut(from).successors.push(to);
}
}
#[derive(Debug)]
struct NodeInfo {
/// IDs of nodes that can follow this one in the control flow
///
/// If the vec is empty, then control proceeds to the next node.
successors: Vec<PostOrderId>,
/// List of hir_ids that are dropped by this node.
drops: Vec<TrackedValueIndex>,
/// List of hir_ids that are reinitialized by this node.
reinits: Vec<TrackedValueIndex>,
/// Set of values that are definitely dropped at this point.
drop_state: BitSet<TrackedValueIndex>,
}
impl NodeInfo {
fn new(num_values: usize) -> Self {
Self {
successors: vec![],
drops: vec![],
reinits: vec![],
drop_state: BitSet::new_filled(num_values),
}
}
}
| fmt | identifier_name |
mod.rs | //! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
//!
//! There are three phases to this analysis:
//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
//! and also build a control flow graph.
//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
//! the CFG and find the exact points where we know a value is definitely dropped.
//!
//! The end result is a data structure that maps the post-order index of each node in the HIR tree
//! to a set of values that are known to be dropped at that location.
use self::cfg_build::build_control_flow_graph;
use self::record_consumed_borrow::find_consumed_and_borrowed;
use crate::FnCtxt;
use hir::def_id::DefId;
use hir::{Body, HirId, HirIdMap, Node};
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::hir::map::Map;
use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
use rustc_middle::ty;
use std::collections::BTreeMap;
use std::fmt::Debug;
mod cfg_build;
mod cfg_propagate;
mod cfg_visualize;
mod record_consumed_borrow;
pub fn compute_drop_ranges<'a, 'tcx>(
fcx: &'a FnCtxt<'a, 'tcx>,
def_id: DefId,
body: &'tcx Body<'tcx>,
) -> DropRanges {
if fcx.sess().opts.unstable_opts.drop_tracking {
let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
let typeck_results = &fcx.typeck_results.borrow();
let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
&fcx,
typeck_results,
fcx.param_env,
consumed_borrowed_places,
body,
num_exprs,
);
drop_ranges.propagate_to_fixpoint();
debug!("borrowed_temporaries = {borrowed_temporaries:?}");
DropRanges {
tracked_value_map: drop_ranges.tracked_value_map,
nodes: drop_ranges.nodes,
borrowed_temporaries: Some(borrowed_temporaries),
}
} else {
// If drop range tracking is not enabled, skip all the analysis and produce an
// empty set of DropRanges.
DropRanges {
tracked_value_map: UnordMap::default(),
nodes: IndexVec::new(),
borrowed_temporaries: None,
}
}
}
/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
///
/// This includes the place itself, and if the place is a reference to a local
/// variable then `f` is also called on the HIR node for that variable as well.
///
/// For example, if `place` points to `foo()`, then `f` is called once for the
/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
/// be called both on the `ExprKind::Path` node that represents the expression
/// as well as the HirId of the local `x` itself.
fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
f(place);
let node = hir.find(place.hir_id());
if let Some(Node::Expr(expr)) = node {
match expr.kind {
hir::ExprKind::Path(hir::QPath::Resolved(
_,
hir::Path { res: hir::def::Res::Local(hir_id), .. },
)) => {
f(TrackedValue::Variable(*hir_id));
}
_ => (),
}
}
}
rustc_index::newtype_index! {
#[debug_format = "id({})"]
pub struct PostOrderId {}
}
rustc_index::newtype_index! {
#[debug_format = "hidx({})"]
pub struct TrackedValueIndex {}
}
/// Identifies a value whose drop state we need to track.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
enum TrackedValue {
/// Represents a named variable, such as a let binding, parameter, or upvar.
///
/// The HirId points to the variable's definition site.
Variable(HirId),
/// A value produced as a result of an expression.
///
/// The HirId points to the expression that returns this value.
Temporary(HirId),
}
impl Debug for TrackedValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx | else {
match self {
Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"),
Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"),
}
}
})
}
}
impl TrackedValue {
fn hir_id(&self) -> HirId {
match self {
TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
}
}
fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
match place_with_id.place.base {
PlaceBase::Rvalue | PlaceBase::StaticItem => {
TrackedValue::Temporary(place_with_id.hir_id)
}
PlaceBase::Local(hir_id)
| PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
TrackedValue::Variable(hir_id)
}
}
}
}
/// Represents a reason why we might not be able to convert a HirId or Place
/// into a tracked value.
#[derive(Debug)]
enum TrackedValueConversionError {
/// Place projects are not currently supported.
///
/// The reasoning around these is kind of subtle, so we choose to be more
/// conservative around these for now. There is no reason in theory we
/// cannot support these, we just have not implemented it yet.
PlaceProjectionsNotSupported,
}
impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
type Error = TrackedValueConversionError;
fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
if !place_with_id.place.projections.is_empty() {
debug!(
"TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
place_with_id
);
return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
}
Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
}
}
pub struct DropRanges {
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
nodes: IndexVec<PostOrderId, NodeInfo>,
borrowed_temporaries: Option<UnordSet<HirId>>,
}
impl DropRanges {
pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
self.tracked_value_map
.get(&TrackedValue::Temporary(hir_id))
.or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
.cloned()
.is_some_and(|tracked_value_id| {
self.expect_node(location.into()).drop_state.contains(tracked_value_id)
})
}
pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
}
/// Returns a reference to the NodeInfo for a node, panicking if it does not exist
fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
&self.nodes[id]
}
}
/// Tracks information needed to compute drop ranges.
struct DropRangesBuilder {
/// The core of DropRangesBuilder is a set of nodes, which each represent
/// one expression. We primarily refer to them by their index in a
/// post-order traversal of the HIR tree, since this is what
/// generator_interior uses to talk about yield positions.
///
/// This IndexVec keeps the relevant details for each node. See the
/// NodeInfo struct for more details, but this information includes things
/// such as the set of control-flow successors, which variables are dropped
/// or reinitialized, and whether each variable has been inferred to be
/// known-dropped or potentially reinitialized at each point.
nodes: IndexVec<PostOrderId, NodeInfo>,
/// We refer to values whose drop state we are tracking by the HirId of
/// where they are defined. Within a NodeInfo, however, we store the
/// drop-state in a bit vector indexed by a HirIdIndex
/// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
/// from HirIds to the HirIdIndex that is used to represent that value in
/// bitvector.
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
/// When building the control flow graph, we don't always know the
/// post-order index of the target node at the point we encounter it.
/// For example, this happens with break and continue. In those cases,
/// we store a pair of the PostOrderId of the source and the HirId
/// of the target. Once we have gathered all of these edges, we make a
/// pass over the set of deferred edges (see process_deferred_edges in
/// cfg_build.rs), look up the PostOrderId for the target (since now the
/// post-order index for all nodes is known), and add missing control flow
/// edges.
deferred_edges: Vec<(PostOrderId, HirId)>,
/// This maps HirIds of expressions to their post-order index. It is
/// used in process_deferred_edges to correctly add back-edges.
post_order_map: HirIdMap<PostOrderId>,
}
impl Debug for DropRangesBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DropRanges")
.field("hir_id_map", &self.tracked_value_map)
.field("post_order_maps", &self.post_order_map)
.field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
.finish()
}
}
/// DropRanges keeps track of what values are definitely dropped at each point in the code.
///
/// Values of interest are defined by the hir_id of their place. Locations in code are identified
/// by their index in the post-order traversal. At its core, DropRanges maps
/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
/// dropped at the point of the node identified by post_order_id.
impl DropRangesBuilder {
/// Returns the number of values (hir_ids) that are tracked
fn num_values(&self) -> usize {
self.tracked_value_map.len()
}
fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
let size = self.num_values();
self.nodes.ensure_contains_elem(id, || NodeInfo::new(size))
}
fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
trace!("adding control edge from {:?} to {:?}", from, to);
self.node_mut(from).successors.push(to);
}
}
#[derive(Debug)]
struct NodeInfo {
/// IDs of nodes that can follow this one in the control flow
///
/// If the vec is empty, then control proceeds to the next node.
successors: Vec<PostOrderId>,
/// List of hir_ids that are dropped by this node.
drops: Vec<TrackedValueIndex>,
/// List of hir_ids that are reinitialized by this node.
reinits: Vec<TrackedValueIndex>,
/// Set of values that are definitely dropped at this point.
drop_state: BitSet<TrackedValueIndex>,
}
impl NodeInfo {
fn new(num_values: usize) -> Self {
Self {
successors: vec![],
drops: vec![],
reinits: vec![],
drop_state: BitSet::new_filled(num_values),
}
}
}
| {
write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
} | conditional_block |
mod.rs | //! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
//!
//! There are three phases to this analysis:
//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
//! and also build a control flow graph.
//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
//! the CFG and find the exact points where we know a value is definitely dropped.
//!
//! The end result is a data structure that maps the post-order index of each node in the HIR tree
//! to a set of values that are known to be dropped at that location.
use self::cfg_build::build_control_flow_graph;
use self::record_consumed_borrow::find_consumed_and_borrowed;
use crate::FnCtxt;
use hir::def_id::DefId;
use hir::{Body, HirId, HirIdMap, Node};
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::hir::map::Map;
use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
use rustc_middle::ty;
use std::collections::BTreeMap;
use std::fmt::Debug;
mod cfg_build;
mod cfg_propagate;
mod cfg_visualize;
mod record_consumed_borrow;
pub fn compute_drop_ranges<'a, 'tcx>(
fcx: &'a FnCtxt<'a, 'tcx>,
def_id: DefId,
body: &'tcx Body<'tcx>,
) -> DropRanges {
if fcx.sess().opts.unstable_opts.drop_tracking {
let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
let typeck_results = &fcx.typeck_results.borrow();
let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
&fcx,
typeck_results,
fcx.param_env,
consumed_borrowed_places,
body,
num_exprs,
);
drop_ranges.propagate_to_fixpoint();
debug!("borrowed_temporaries = {borrowed_temporaries:?}");
DropRanges {
tracked_value_map: drop_ranges.tracked_value_map,
nodes: drop_ranges.nodes,
borrowed_temporaries: Some(borrowed_temporaries),
}
} else {
// If drop range tracking is not enabled, skip all the analysis and produce an
// empty set of DropRanges.
DropRanges {
tracked_value_map: UnordMap::default(),
nodes: IndexVec::new(),
borrowed_temporaries: None,
}
}
}
/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
///
/// This includes the place itself, and if the place is a reference to a local
/// variable then `f` is also called on the HIR node for that variable as well.
///
/// For example, if `place` points to `foo()`, then `f` is called once for the
/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
/// be called both on the `ExprKind::Path` node that represents the expression
/// as well as the HirId of the local `x` itself.
fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
f(place);
let node = hir.find(place.hir_id());
if let Some(Node::Expr(expr)) = node {
match expr.kind {
hir::ExprKind::Path(hir::QPath::Resolved(
_,
hir::Path { res: hir::def::Res::Local(hir_id), .. },
)) => {
f(TrackedValue::Variable(*hir_id));
}
_ => (),
}
}
}
rustc_index::newtype_index! {
#[debug_format = "id({})"]
pub struct PostOrderId {}
}
rustc_index::newtype_index! {
#[debug_format = "hidx({})"]
pub struct TrackedValueIndex {}
}
/// Identifies a value whose drop state we need to track.
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
enum TrackedValue {
/// Represents a named variable, such as a let binding, parameter, or upvar.
///
/// The HirId points to the variable's definition site.
Variable(HirId),
/// A value produced as a result of an expression.
///
/// The HirId points to the expression that returns this value.
Temporary(HirId),
}
impl Debug for TrackedValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
ty::tls::with_opt(|opt_tcx| {
if let Some(tcx) = opt_tcx {
write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
} else {
match self {
Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"),
Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"),
}
}
})
}
}
impl TrackedValue {
fn hir_id(&self) -> HirId {
match self {
TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
}
}
fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
match place_with_id.place.base {
PlaceBase::Rvalue | PlaceBase::StaticItem => {
TrackedValue::Temporary(place_with_id.hir_id)
}
PlaceBase::Local(hir_id)
| PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
TrackedValue::Variable(hir_id)
}
}
}
}
/// Represents a reason why we might not be able to convert a HirId or Place
/// into a tracked value.
#[derive(Debug)]
enum TrackedValueConversionError {
/// Place projects are not currently supported.
///
/// The reasoning around these is kind of subtle, so we choose to be more
/// conservative around these for now. There is no reason in theory we
/// cannot support these, we just have not implemented it yet.
PlaceProjectionsNotSupported,
}
impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
type Error = TrackedValueConversionError;
fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
if !place_with_id.place.projections.is_empty() {
debug!(
"TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
place_with_id
);
return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
}
Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
}
}
pub struct DropRanges {
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
nodes: IndexVec<PostOrderId, NodeInfo>,
borrowed_temporaries: Option<UnordSet<HirId>>,
}
impl DropRanges {
pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
self.tracked_value_map
.get(&TrackedValue::Temporary(hir_id))
.or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
.cloned()
.is_some_and(|tracked_value_id| {
self.expect_node(location.into()).drop_state.contains(tracked_value_id)
})
}
pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
}
/// Returns a reference to the NodeInfo for a node, panicking if it does not exist
fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
&self.nodes[id]
}
}
/// Tracks information needed to compute drop ranges.
struct DropRangesBuilder {
/// The core of DropRangesBuilder is a set of nodes, which each represent
/// one expression. We primarily refer to them by their index in a
/// post-order traversal of the HIR tree, since this is what
/// generator_interior uses to talk about yield positions.
///
/// This IndexVec keeps the relevant details for each node. See the
/// NodeInfo struct for more details, but this information includes things
/// such as the set of control-flow successors, which variables are dropped
/// or reinitialized, and whether each variable has been inferred to be
/// known-dropped or potentially reinitialized at each point.
nodes: IndexVec<PostOrderId, NodeInfo>,
/// We refer to values whose drop state we are tracking by the HirId of
/// where they are defined. Within a NodeInfo, however, we store the
/// drop-state in a bit vector indexed by a HirIdIndex
/// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
/// from HirIds to the HirIdIndex that is used to represent that value in
/// bitvector.
tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>,
/// When building the control flow graph, we don't always know the
/// post-order index of the target node at the point we encounter it.
/// For example, this happens with break and continue. In those cases,
/// we store a pair of the PostOrderId of the source and the HirId
/// of the target. Once we have gathered all of these edges, we make a
/// pass over the set of deferred edges (see process_deferred_edges in
/// cfg_build.rs), look up the PostOrderId for the target (since now the
/// post-order index for all nodes is known), and add missing control flow
/// edges.
deferred_edges: Vec<(PostOrderId, HirId)>,
/// This maps HirIds of expressions to their post-order index. It is
/// used in process_deferred_edges to correctly add back-edges.
post_order_map: HirIdMap<PostOrderId>,
}
impl Debug for DropRangesBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DropRanges")
.field("hir_id_map", &self.tracked_value_map)
.field("post_order_maps", &self.post_order_map)
.field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
.finish()
}
}
/// DropRanges keeps track of what values are definitely dropped at each point in the code.
///
/// Values of interest are defined by the hir_id of their place. Locations in code are identified
/// by their index in the post-order traversal. At its core, DropRanges maps
/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
/// dropped at the point of the node identified by post_order_id.
impl DropRangesBuilder {
/// Returns the number of values (hir_ids) that are tracked
fn num_values(&self) -> usize |
fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
let size = self.num_values();
self.nodes.ensure_contains_elem(id, || NodeInfo::new(size))
}
fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
trace!("adding control edge from {:?} to {:?}", from, to);
self.node_mut(from).successors.push(to);
}
}
#[derive(Debug)]
struct NodeInfo {
/// IDs of nodes that can follow this one in the control flow
///
/// If the vec is empty, then control proceeds to the next node.
successors: Vec<PostOrderId>,
/// List of hir_ids that are dropped by this node.
drops: Vec<TrackedValueIndex>,
/// List of hir_ids that are reinitialized by this node.
reinits: Vec<TrackedValueIndex>,
/// Set of values that are definitely dropped at this point.
drop_state: BitSet<TrackedValueIndex>,
}
impl NodeInfo {
fn new(num_values: usize) -> Self {
Self {
successors: vec![],
drops: vec![],
reinits: vec![],
drop_state: BitSet::new_filled(num_values),
}
}
}
| {
self.tracked_value_map.len()
} | identifier_body |
forward_chain_bandaid.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
#FIXME: This script is not packaged. That goes against team policy.
#FIXME: This script must be removed or packaged.
# Script added in violation of policy because:
# 1. we're in a hurry to get it rolled out because of a related, current outage
# 2. this script should go away within a few weeks when it gets obsoleted by
# https://github.com/openshift/origin/pull/13465
'''
This entire script is a band-aid that needs to be in place until
https://github.com/openshift/origin/pull/13465 is merged and
backported to 3.4 and 3.5 and the hotfix that contains it is installed
on all clusters.
This script will:
1. create the OPENSHIFT-OUTPUT-FILTERING chain in the filter table
(if it doesn't exist)
2. check to see if https://github.com/openshift/origin/pull/13465 is
doing its thing
3. if not, it will make sure that an equivalent rule to jump to
OPENSHIFT-OUTPUT-FILTERING is present in the FORWARD chain and
that it is the top rule.
For best effect, run it frequently from cron so that any reordering
can be quickly remedied.
'''
import os
import subprocess
import fcntl
import errno
import time
# the rule that must be the first thing in the FORWARD chain.
# The "! -s 0.0.0.1/32" is present only so that we can tell OUR_RULE from THEIR_RULE
OUR_RULE = '-A FORWARD ! -s 0.0.0.1/32 -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# the rule that we're waiting for that will be added by the product. If we see this, our rule is no longer needed
THEIR_RULE = '-A FORWARD -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping to
JUMP_CHAIN_NAME = 'OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping from
SOURCE_CHAIN_NAME = 'FORWARD'
class TopRuleError(Exception):
'''All IpTablesChain methods throw this exception when errors occur'''
def __init__(self, msg, cmd, exit_code, output):
super(TopRuleError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
# pylint: disable=too-few-public-methods
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
class DummyContextMgr(object):
'''A dummy context manager that does nothing so that a 'with' can conditionally do nothing'''
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback_):
return False
# pylint: enable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class TopRule(object):
'''A single rule that should be at the top of the chain'''
def __init__(self, table, source_chain, jump_chain, ver, top_rule, noop_rule):
'''Create the TopRule object to ensure that the rule is at the top of the chain'''
self.table = table
self.source_chain = source_chain
self.jump_chain = jump_chain
self.ver = ver
self.top_rule = top_rule
self.noop_rule = noop_rule
self.restore_has_locks = None # i.e., unknown
self.wait_takes_seconds = None # i.e., unknown
def _build_cmd(self, *args):
'''
Create an iptables or ip6tables command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
retval = ["/usr/sbin/%s" % cmd, '--table', self.table]
retval.append('--wait')
if self._check_wait_takes_seconds():
retval.append('600')
retval.extend(args)
return retval
def _build_restore_cmd(self, *args):
'''
Create an iptables-restore or ip6tables-restore command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
retval = ["/usr/sbin/%s" % cmd, '--noflush', '--table', self.table]
if self._check_restore_has_locks():
retval.extend(['--wait', '600'])
retval.extend(args)
return retval
def _check_wait_takes_seconds(self):
'''Determine whether iptables -w accepts an optional timeout'''
# some versions of iptables have --wait and -w, but don't allow a timeout to be specified
if self.wait_takes_seconds is None:
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
# try a harmless operation that allows us to see if iptables pukes on the 1
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--rename-chain', 'INPUT', 'INPUT']
try:
subprocess.check_output(to_run, stderr=subprocess.STDOUT)
# we don't expect to ever get here, but if we do, then I guess it takes seconds.
self.wait_takes_seconds = True
except subprocess.CalledProcessError as ex:
self.wait_takes_seconds = bool('File exists.' in ex.output)
return self.wait_takes_seconds
def _check_restore_has_locks(self):
'''Determine whether iptables-restore has locking built in.'''
# The new version will have --wait just like iptables thanks to this patch:
# http://patchwork.ozlabs.org/patch/739234/
# Until then we'll need to do our own locking. So, this code detects whether we need to do locking
if self.restore_has_locks is None:
with open(os.devnull, 'w') as devnull:
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--noflush']
try:
subprocess.check_call(to_run, stderr=devnull, stdout=devnull)
self.restore_has_locks = True
except subprocess.CalledProcessError:
self.restore_has_locks = False
return self.restore_has_locks
def jump_chain_exists(self):
'''Return True if the jump chain exists or False otherwise'''
try:
# this is definitely going to throw. We're after the error message.
subprocess.check_output(self._build_cmd('--rename-chain', self.jump_chain, self.jump_chain),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if 'File exists.' in ex.output:
return True
if 'No chain/target/match by that name.' in ex.output:
return False
raise TopRuleError(msg="Failed to determine if chain exists",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def get(self):
'''Get all the rules of the chain'''
cmd = self._build_cmd('--list-rules', self.source_chain)
ipt = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate()
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to get existing chain rules",
cmd=cmd, exit_code=ipt.returncode, output=err)
return list([entry for entry in out.split('\n') if entry and not entry.startswith('-N ')])
def set(self):
'''Set all the rules of the chain to match the passed in rules'''
existing_rules = self.get()
updated_rules = [rule for rule in existing_rules if rule != self.top_rule]
if self.noop_rule not in updated_rules:
# find position to insert it. either just before the first -A rule, or at the end if there aren't any
enumeration = (i for i, rule in enumerate(updated_rules) if rule.startswith('-A'))
pos = next(enumeration, len(updated_rules))
updated_rules.insert(pos, self.top_rule)
if existing_rules == updated_rules:
# nothing to do, everything already looks good. early return
return
in_data = "*%s\n" % self.table
if not self.jump_chain_exists():
# create the jump_chain
|
# assume that source_chain already exists
# flush the source_chain since we're about to recreate its rules
in_data += "-F %s\n" % self.source_chain
in_data += ("\n".join(updated_rules))+"\n"
in_data += "COMMIT\n"
cmd = self._build_restore_cmd()
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
with open('/run/xtables.lock', 'a+') if not self._check_restore_has_locks() else DummyContextMgr() as fdnum:
if not self._check_restore_has_locks():
# do the locking ourselves
start = time.time()
locked = False
while time.time() < start+600:
try:
# the lock will be released automatically when the with block goes out of scope
# and the file is closed.
fcntl.flock(fdnum, fcntl.LOCK_EX | fcntl.LOCK_NB)
locked = True
break
except IOError as ex:
if ex.errno != errno.EDEADLK:
raise TopRuleError(msg="Failed to acquire iptables lock", exit_code=1, cmd='', output='')
time.sleep(0.5)
if not locked:
raise TopRuleError(msg="Timed out trying to acquire iptables lock", exit_code=1, cmd='', output='')
ipt = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate(in_data)
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to set chain rules",
cmd=cmd, exit_code=ipt.returncode, output=out+"\n"+err)
def main():
'''Band-aid script to ensure that the OpenShift filter rule is always at the top of the FORWARD chain'''
toprule = TopRule('filter', SOURCE_CHAIN_NAME, JUMP_CHAIN_NAME, 'ipv4', OUR_RULE, THEIR_RULE)
toprule.set()
if __name__ == '__main__':
main()
| in_data += ":%s - [0:0]\n" % self.jump_chain | conditional_block |
forward_chain_bandaid.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
#FIXME: This script is not packaged. That goes against team policy.
#FIXME: This script must be removed or packaged.
# Script added in violation of policy because:
# 1. we're in a hurry to get it rolled out because of a related, current outage
# 2. this script should go away within a few weeks when it gets obsoleted by
# https://github.com/openshift/origin/pull/13465
'''
This entire script is a band-aid that needs to be in place until
https://github.com/openshift/origin/pull/13465 is merged and
backported to 3.4 and 3.5 and the hotfix that contains it is installed
on all clusters.
This script will:
1. create the OPENSHIFT-OUTPUT-FILTERING chain in the filter table
(if it doesn't exist)
2. check to see if https://github.com/openshift/origin/pull/13465 is
doing its thing
3. if not, it will make sure that an equivalent rule to jump to
OPENSHIFT-OUTPUT-FILTERING is present in the FORWARD chain and
that it is the top rule.
For best effect, run it frequently from cron so that any reordering
can be quickly remedied.
'''
import os
import subprocess
import fcntl
import errno
import time
# the rule that must be the first thing in the FORWARD chain.
# The "! -s 0.0.0.1/32" is present only so that we can tell OUR_RULE from THEIR_RULE
OUR_RULE = '-A FORWARD ! -s 0.0.0.1/32 -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# the rule that we're waiting for that will be added by the product. If we see this, our rule is no longer needed
THEIR_RULE = '-A FORWARD -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping to
JUMP_CHAIN_NAME = 'OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping from
SOURCE_CHAIN_NAME = 'FORWARD'
class TopRuleError(Exception):
'''All IpTablesChain methods throw this exception when errors occur'''
def __init__(self, msg, cmd, exit_code, output):
super(TopRuleError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
# pylint: disable=too-few-public-methods
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
class DummyContextMgr(object):
|
# pylint: enable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class TopRule(object):
'''A single rule that should be at the top of the chain'''
def __init__(self, table, source_chain, jump_chain, ver, top_rule, noop_rule):
'''Create the TopRule object to ensure that the rule is at the top of the chain'''
self.table = table
self.source_chain = source_chain
self.jump_chain = jump_chain
self.ver = ver
self.top_rule = top_rule
self.noop_rule = noop_rule
self.restore_has_locks = None # i.e., unknown
self.wait_takes_seconds = None # i.e., unknown
def _build_cmd(self, *args):
'''
Create an iptables or ip6tables command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
retval = ["/usr/sbin/%s" % cmd, '--table', self.table]
retval.append('--wait')
if self._check_wait_takes_seconds():
retval.append('600')
retval.extend(args)
return retval
def _build_restore_cmd(self, *args):
'''
Create an iptables-restore or ip6tables-restore command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
retval = ["/usr/sbin/%s" % cmd, '--noflush', '--table', self.table]
if self._check_restore_has_locks():
retval.extend(['--wait', '600'])
retval.extend(args)
return retval
def _check_wait_takes_seconds(self):
'''Determine whether iptables -w accepts an optional timeout'''
# some versions of iptables have --wait and -w, but don't allow a timeout to be specified
if self.wait_takes_seconds is None:
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
# try a harmless operation that allows us to see if iptables pukes on the 1
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--rename-chain', 'INPUT', 'INPUT']
try:
subprocess.check_output(to_run, stderr=subprocess.STDOUT)
# we don't expect to ever get here, but if we do, then I guess it takes seconds.
self.wait_takes_seconds = True
except subprocess.CalledProcessError as ex:
self.wait_takes_seconds = bool('File exists.' in ex.output)
return self.wait_takes_seconds
def _check_restore_has_locks(self):
'''Determine whether iptables-restore has locking built in.'''
# The new version will have --wait just like iptables thanks to this patch:
# http://patchwork.ozlabs.org/patch/739234/
# Until then we'll need to do our own locking. So, this code detects whether we need to do locking
if self.restore_has_locks is None:
with open(os.devnull, 'w') as devnull:
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--noflush']
try:
subprocess.check_call(to_run, stderr=devnull, stdout=devnull)
self.restore_has_locks = True
except subprocess.CalledProcessError:
self.restore_has_locks = False
return self.restore_has_locks
def jump_chain_exists(self):
'''Return True if the jump chain exists or False otherwise'''
try:
# this is definitely going to throw. We're after the error message.
subprocess.check_output(self._build_cmd('--rename-chain', self.jump_chain, self.jump_chain),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if 'File exists.' in ex.output:
return True
if 'No chain/target/match by that name.' in ex.output:
return False
raise TopRuleError(msg="Failed to determine if chain exists",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def get(self):
'''Get all the rules of the chain'''
cmd = self._build_cmd('--list-rules', self.source_chain)
ipt = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate()
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to get existing chain rules",
cmd=cmd, exit_code=ipt.returncode, output=err)
return list([entry for entry in out.split('\n') if entry and not entry.startswith('-N ')])
def set(self):
'''Set all the rules of the chain to match the passed in rules'''
existing_rules = self.get()
updated_rules = [rule for rule in existing_rules if rule != self.top_rule]
if self.noop_rule not in updated_rules:
# find position to insert it. either just before the first -A rule, or at the end if there aren't any
enumeration = (i for i, rule in enumerate(updated_rules) if rule.startswith('-A'))
pos = next(enumeration, len(updated_rules))
updated_rules.insert(pos, self.top_rule)
if existing_rules == updated_rules:
# nothing to do, everything already looks good. early return
return
in_data = "*%s\n" % self.table
if not self.jump_chain_exists():
# create the jump_chain
in_data += ":%s - [0:0]\n" % self.jump_chain
# assume that source_chain already exists
# flush the source_chain since we're about to recreate its rules
in_data += "-F %s\n" % self.source_chain
in_data += ("\n".join(updated_rules))+"\n"
in_data += "COMMIT\n"
cmd = self._build_restore_cmd()
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
with open('/run/xtables.lock', 'a+') if not self._check_restore_has_locks() else DummyContextMgr() as fdnum:
if not self._check_restore_has_locks():
# do the locking ourselves
start = time.time()
locked = False
while time.time() < start+600:
try:
# the lock will be released automatically when the with block goes out of scope
# and the file is closed.
fcntl.flock(fdnum, fcntl.LOCK_EX | fcntl.LOCK_NB)
locked = True
break
except IOError as ex:
if ex.errno != errno.EDEADLK:
raise TopRuleError(msg="Failed to acquire iptables lock", exit_code=1, cmd='', output='')
time.sleep(0.5)
if not locked:
raise TopRuleError(msg="Timed out trying to acquire iptables lock", exit_code=1, cmd='', output='')
ipt = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate(in_data)
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to set chain rules",
cmd=cmd, exit_code=ipt.returncode, output=out+"\n"+err)
def main():
'''Band-aid script to ensure that the OpenShift filter rule is always at the top of the FORWARD chain'''
toprule = TopRule('filter', SOURCE_CHAIN_NAME, JUMP_CHAIN_NAME, 'ipv4', OUR_RULE, THEIR_RULE)
toprule.set()
if __name__ == '__main__':
main()
| '''A dummy context manager that does nothing so that a 'with' can conditionally do nothing'''
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback_):
return False | identifier_body |
forward_chain_bandaid.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
#FIXME: This script is not packaged. That goes against team policy.
#FIXME: This script must be removed or packaged.
# Script added in violation of policy because:
# 1. we're in a hurry to get it rolled out because of a related, current outage | # 2. this script should go away within a few weeks when it gets obsoleted by
# https://github.com/openshift/origin/pull/13465
'''
This entire script is a band-aid that needs to be in place until
https://github.com/openshift/origin/pull/13465 is merged and
backported to 3.4 and 3.5 and the hotfix that contains it is installed
on all clusters.
This script will:
1. create the OPENSHIFT-OUTPUT-FILTERING chain in the filter table
(if it doesn't exist)
2. check to see if https://github.com/openshift/origin/pull/13465 is
doing its thing
3. if not, it will make sure that an equivalent rule to jump to
OPENSHIFT-OUTPUT-FILTERING is present in the FORWARD chain and
that it is the top rule.
For best effect, run it frequently from cron so that any reordering
can be quickly remedied.
'''
import os
import subprocess
import fcntl
import errno
import time
# the rule that must be the first thing in the FORWARD chain.
# The "! -s 0.0.0.1/32" is present only so that we can tell OUR_RULE from THEIR_RULE
OUR_RULE = '-A FORWARD ! -s 0.0.0.1/32 -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# the rule that we're waiting for that will be added by the product. If we see this, our rule is no longer needed
THEIR_RULE = '-A FORWARD -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping to
JUMP_CHAIN_NAME = 'OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping from
SOURCE_CHAIN_NAME = 'FORWARD'
class TopRuleError(Exception):
'''All IpTablesChain methods throw this exception when errors occur'''
def __init__(self, msg, cmd, exit_code, output):
super(TopRuleError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
# pylint: disable=too-few-public-methods
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
class DummyContextMgr(object):
'''A dummy context manager that does nothing so that a 'with' can conditionally do nothing'''
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback_):
return False
# pylint: enable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class TopRule(object):
'''A single rule that should be at the top of the chain'''
def __init__(self, table, source_chain, jump_chain, ver, top_rule, noop_rule):
'''Create the TopRule object to ensure that the rule is at the top of the chain'''
self.table = table
self.source_chain = source_chain
self.jump_chain = jump_chain
self.ver = ver
self.top_rule = top_rule
self.noop_rule = noop_rule
self.restore_has_locks = None # i.e., unknown
self.wait_takes_seconds = None # i.e., unknown
def _build_cmd(self, *args):
'''
Create an iptables or ip6tables command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
retval = ["/usr/sbin/%s" % cmd, '--table', self.table]
retval.append('--wait')
if self._check_wait_takes_seconds():
retval.append('600')
retval.extend(args)
return retval
def _build_restore_cmd(self, *args):
'''
Create an iptables-restore or ip6tables-restore command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
retval = ["/usr/sbin/%s" % cmd, '--noflush', '--table', self.table]
if self._check_restore_has_locks():
retval.extend(['--wait', '600'])
retval.extend(args)
return retval
def _check_wait_takes_seconds(self):
'''Determine whether iptables -w accepts an optional timeout'''
# some versions of iptables have --wait and -w, but don't allow a timeout to be specified
if self.wait_takes_seconds is None:
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
# try a harmless operation that allows us to see if iptables pukes on the 1
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--rename-chain', 'INPUT', 'INPUT']
try:
subprocess.check_output(to_run, stderr=subprocess.STDOUT)
# we don't expect to ever get here, but if we do, then I guess it takes seconds.
self.wait_takes_seconds = True
except subprocess.CalledProcessError as ex:
self.wait_takes_seconds = bool('File exists.' in ex.output)
return self.wait_takes_seconds
def _check_restore_has_locks(self):
'''Determine whether iptables-restore has locking built in.'''
# The new version will have --wait just like iptables thanks to this patch:
# http://patchwork.ozlabs.org/patch/739234/
# Until then we'll need to do our own locking. So, this code detects whether we need to do locking
if self.restore_has_locks is None:
with open(os.devnull, 'w') as devnull:
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--noflush']
try:
subprocess.check_call(to_run, stderr=devnull, stdout=devnull)
self.restore_has_locks = True
except subprocess.CalledProcessError:
self.restore_has_locks = False
return self.restore_has_locks
def jump_chain_exists(self):
'''Return True if the jump chain exists or False otherwise'''
try:
# this is definitely going to throw. We're after the error message.
subprocess.check_output(self._build_cmd('--rename-chain', self.jump_chain, self.jump_chain),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if 'File exists.' in ex.output:
return True
if 'No chain/target/match by that name.' in ex.output:
return False
raise TopRuleError(msg="Failed to determine if chain exists",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def get(self):
'''Get all the rules of the chain'''
cmd = self._build_cmd('--list-rules', self.source_chain)
ipt = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate()
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to get existing chain rules",
cmd=cmd, exit_code=ipt.returncode, output=err)
return list([entry for entry in out.split('\n') if entry and not entry.startswith('-N ')])
def set(self):
'''Set all the rules of the chain to match the passed in rules'''
existing_rules = self.get()
updated_rules = [rule for rule in existing_rules if rule != self.top_rule]
if self.noop_rule not in updated_rules:
# find position to insert it. either just before the first -A rule, or at the end if there aren't any
enumeration = (i for i, rule in enumerate(updated_rules) if rule.startswith('-A'))
pos = next(enumeration, len(updated_rules))
updated_rules.insert(pos, self.top_rule)
if existing_rules == updated_rules:
# nothing to do, everything already looks good. early return
return
in_data = "*%s\n" % self.table
if not self.jump_chain_exists():
# create the jump_chain
in_data += ":%s - [0:0]\n" % self.jump_chain
# assume that source_chain already exists
# flush the source_chain since we're about to recreate its rules
in_data += "-F %s\n" % self.source_chain
in_data += ("\n".join(updated_rules))+"\n"
in_data += "COMMIT\n"
cmd = self._build_restore_cmd()
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
with open('/run/xtables.lock', 'a+') if not self._check_restore_has_locks() else DummyContextMgr() as fdnum:
if not self._check_restore_has_locks():
# do the locking ourselves
start = time.time()
locked = False
while time.time() < start+600:
try:
# the lock will be released automatically when the with block goes out of scope
# and the file is closed.
fcntl.flock(fdnum, fcntl.LOCK_EX | fcntl.LOCK_NB)
locked = True
break
except IOError as ex:
if ex.errno != errno.EDEADLK:
raise TopRuleError(msg="Failed to acquire iptables lock", exit_code=1, cmd='', output='')
time.sleep(0.5)
if not locked:
raise TopRuleError(msg="Timed out trying to acquire iptables lock", exit_code=1, cmd='', output='')
ipt = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate(in_data)
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to set chain rules",
cmd=cmd, exit_code=ipt.returncode, output=out+"\n"+err)
def main():
'''Band-aid script to ensure that the OpenShift filter rule is always at the top of the FORWARD chain'''
toprule = TopRule('filter', SOURCE_CHAIN_NAME, JUMP_CHAIN_NAME, 'ipv4', OUR_RULE, THEIR_RULE)
toprule.set()
if __name__ == '__main__':
main() | random_line_split | |
forward_chain_bandaid.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
#FIXME: This script is not packaged. That goes against team policy.
#FIXME: This script must be removed or packaged.
# Script added in violation of policy because:
# 1. we're in a hurry to get it rolled out because of a related, current outage
# 2. this script should go away within a few weeks when it gets obsoleted by
# https://github.com/openshift/origin/pull/13465
'''
This entire script is a band-aid that needs to be in place until
https://github.com/openshift/origin/pull/13465 is merged and
backported to 3.4 and 3.5 and the hotfix that contains it is installed
on all clusters.
This script will:
1. create the OPENSHIFT-OUTPUT-FILTERING chain in the filter table
(if it doesn't exist)
2. check to see if https://github.com/openshift/origin/pull/13465 is
doing its thing
3. if not, it will make sure that an equivalent rule to jump to
OPENSHIFT-OUTPUT-FILTERING is present in the FORWARD chain and
that it is the top rule.
For best effect, run it frequently from cron so that any reordering
can be quickly remedied.
'''
import os
import subprocess
import fcntl
import errno
import time
# the rule that must be the first thing in the FORWARD chain.
# The "! -s 0.0.0.1/32" is present only so that we can tell OUR_RULE from THEIR_RULE
OUR_RULE = '-A FORWARD ! -s 0.0.0.1/32 -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# the rule that we're waiting for that will be added by the product. If we see this, our rule is no longer needed
THEIR_RULE = '-A FORWARD -i tun0 ! -o tun0 -j OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping to
JUMP_CHAIN_NAME = 'OPENSHIFT-OUTPUT-FILTERING'
# chain we're jumping from
SOURCE_CHAIN_NAME = 'FORWARD'
class TopRuleError(Exception):
'''All IpTablesChain methods throw this exception when errors occur'''
def __init__(self, msg, cmd, exit_code, output):
super(TopRuleError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
# pylint: disable=too-few-public-methods
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
class DummyContextMgr(object):
'''A dummy context manager that does nothing so that a 'with' can conditionally do nothing'''
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback_):
return False
# pylint: enable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class TopRule(object):
'''A single rule that should be at the top of the chain'''
def __init__(self, table, source_chain, jump_chain, ver, top_rule, noop_rule):
'''Create the TopRule object to ensure that the rule is at the top of the chain'''
self.table = table
self.source_chain = source_chain
self.jump_chain = jump_chain
self.ver = ver
self.top_rule = top_rule
self.noop_rule = noop_rule
self.restore_has_locks = None # i.e., unknown
self.wait_takes_seconds = None # i.e., unknown
def _build_cmd(self, *args):
'''
Create an iptables or ip6tables command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
retval = ["/usr/sbin/%s" % cmd, '--table', self.table]
retval.append('--wait')
if self._check_wait_takes_seconds():
retval.append('600')
retval.extend(args)
return retval
def | (self, *args):
'''
Create an iptables-restore or ip6tables-restore command
Return a list of command args suitable for use with subprocess.*
'''
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
retval = ["/usr/sbin/%s" % cmd, '--noflush', '--table', self.table]
if self._check_restore_has_locks():
retval.extend(['--wait', '600'])
retval.extend(args)
return retval
def _check_wait_takes_seconds(self):
'''Determine whether iptables -w accepts an optional timeout'''
# some versions of iptables have --wait and -w, but don't allow a timeout to be specified
if self.wait_takes_seconds is None:
cmd = 'iptables' if self.ver == 'ipv4' else 'ip6tables'
# try a harmless operation that allows us to see if iptables pukes on the 1
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--rename-chain', 'INPUT', 'INPUT']
try:
subprocess.check_output(to_run, stderr=subprocess.STDOUT)
# we don't expect to ever get here, but if we do, then I guess it takes seconds.
self.wait_takes_seconds = True
except subprocess.CalledProcessError as ex:
self.wait_takes_seconds = bool('File exists.' in ex.output)
return self.wait_takes_seconds
def _check_restore_has_locks(self):
'''Determine whether iptables-restore has locking built in.'''
# The new version will have --wait just like iptables thanks to this patch:
# http://patchwork.ozlabs.org/patch/739234/
# Until then we'll need to do our own locking. So, this code detects whether we need to do locking
if self.restore_has_locks is None:
with open(os.devnull, 'w') as devnull:
cmd = 'iptables-restore' if self.ver == 'ipv4' else 'ip6tables-restore'
to_run = ["/usr/sbin/%s" % cmd, '--wait', '10', '--noflush']
try:
subprocess.check_call(to_run, stderr=devnull, stdout=devnull)
self.restore_has_locks = True
except subprocess.CalledProcessError:
self.restore_has_locks = False
return self.restore_has_locks
def jump_chain_exists(self):
'''Return True if the jump chain exists or False otherwise'''
try:
# this is definitely going to throw. We're after the error message.
subprocess.check_output(self._build_cmd('--rename-chain', self.jump_chain, self.jump_chain),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if 'File exists.' in ex.output:
return True
if 'No chain/target/match by that name.' in ex.output:
return False
raise TopRuleError(msg="Failed to determine if chain exists",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def get(self):
'''Get all the rules of the chain'''
cmd = self._build_cmd('--list-rules', self.source_chain)
ipt = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate()
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to get existing chain rules",
cmd=cmd, exit_code=ipt.returncode, output=err)
return list([entry for entry in out.split('\n') if entry and not entry.startswith('-N ')])
def set(self):
'''Set all the rules of the chain to match the passed in rules'''
existing_rules = self.get()
updated_rules = [rule for rule in existing_rules if rule != self.top_rule]
if self.noop_rule not in updated_rules:
# find position to insert it. either just before the first -A rule, or at the end if there aren't any
enumeration = (i for i, rule in enumerate(updated_rules) if rule.startswith('-A'))
pos = next(enumeration, len(updated_rules))
updated_rules.insert(pos, self.top_rule)
if existing_rules == updated_rules:
# nothing to do, everything already looks good. early return
return
in_data = "*%s\n" % self.table
if not self.jump_chain_exists():
# create the jump_chain
in_data += ":%s - [0:0]\n" % self.jump_chain
# assume that source_chain already exists
# flush the source_chain since we're about to recreate its rules
in_data += "-F %s\n" % self.source_chain
in_data += ("\n".join(updated_rules))+"\n"
in_data += "COMMIT\n"
cmd = self._build_restore_cmd()
# as seen on http://stackoverflow.com/questions/27803059/conditional-with-statement-in-python
with open('/run/xtables.lock', 'a+') if not self._check_restore_has_locks() else DummyContextMgr() as fdnum:
if not self._check_restore_has_locks():
# do the locking ourselves
start = time.time()
locked = False
while time.time() < start+600:
try:
# the lock will be released automatically when the with block goes out of scope
# and the file is closed.
fcntl.flock(fdnum, fcntl.LOCK_EX | fcntl.LOCK_NB)
locked = True
break
except IOError as ex:
if ex.errno != errno.EDEADLK:
raise TopRuleError(msg="Failed to acquire iptables lock", exit_code=1, cmd='', output='')
time.sleep(0.5)
if not locked:
raise TopRuleError(msg="Timed out trying to acquire iptables lock", exit_code=1, cmd='', output='')
ipt = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ipt.communicate(in_data)
if ipt.returncode != 0:
raise TopRuleError(msg="Failed to set chain rules",
cmd=cmd, exit_code=ipt.returncode, output=out+"\n"+err)
def main():
'''Band-aid script to ensure that the OpenShift filter rule is always at the top of the FORWARD chain'''
toprule = TopRule('filter', SOURCE_CHAIN_NAME, JUMP_CHAIN_NAME, 'ipv4', OUR_RULE, THEIR_RULE)
toprule.set()
if __name__ == '__main__':
main()
| _build_restore_cmd | identifier_name |
jobOptions_TileLasMon.py | #
# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
#
#*****************************************************************
#
# """topOptions file for Tile Laser Reconstruciton and Monitoring in Athena"""
# """This topOptions is intended to test the monitoring code"""
#=================================================================
from __future__ import print_function
MonitorOutput='Tile'
from AthenaCommon.Logging import logging
log = logging.getLogger( 'jobOptions_TileLasMon.py' )
from os import system, popen
def | (path, runinput):
run = str(runinput)
while len(run) < 7:
run = '0' + run
files = []
fullname = []
if path.startswith("/castor") :
for f in popen('nsls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
elif path.startswith("/eos") :
for f in popen('eos ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
else:
for f in popen('ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
for nn in range(len(files)):
temp = files[nn].split('\n')
fullname.append(path + '/' + temp[0])
return [fullname,run]
# include Flags jobOption
include( "TileMonitoring/TileRec_FlagOptions.py" )
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon import CfgMgr
toolSvc = CfgMgr.ToolSvc()
# set global flags
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('commis')
globalflags.DataSource.set_Value_and_Lock('data')
globalflags.InputFormat.set_Value_and_Lock('bytestream')
from AthenaCommon.BeamFlags import jobproperties
jobproperties.Beam.beamType.set_Value_and_Lock(beamType)
# reset everything which is not needed
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOff() #Switched off to avoid geometry
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
DetFlags.detdescr.ID_setOff()
DetFlags.detdescr.Muon_setOff()
DetFlags.detdescr.LAr_setOn()
DetFlags.detdescr.Tile_setOn()
DetFlags.readRDOBS.Tile_setOn()
if CheckDCS:
DetFlags.dcs.Tile_setOn()
else:
DetFlags.dcs.Tile_setOff()
DetFlags.Print()
from RecExConfig.RecFlags import rec
rec.doLArg = False
# set online flag if neeed
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if athenaCommonFlags.isOnline() or doOnline or doStateless:
athenaCommonFlags.isOnline=True
log.info( 'athenaCommonFlags.isOnline = True : Online Mode' )
if doStateless:
athenaCommonFlags.isOnlineStateless=True
log.info( 'athenaCommonFlags.isOnlineStateless = True : Stateless Online Mode' )
#-----------------
# ByteSream Input
#-----------------
if not athenaCommonFlags.isOnline():
include( "ByteStreamCnvSvc/BSEventStorageEventSelector_jobOptions.py" )
include( "ByteStreamCnvSvcBase/BSAddProvSvc_RDO_jobOptions.py" )
if not 'InputDirectory' in dir():
InputDirectory="/castor/cern.ch/grid/atlas/t0/perm/DAQ"
if not 'RunNumber' in dir():
RunNumber=0
if not 'RunFromLocal' in dir():
if InputDirectory=="." or RunNumber<10:
RunFromLocal=True
else:
RunFromLocal=False
if not 'FileNameVec' in dir():
if not 'FileName' in dir():
tmp = FindFile(InputDirectory,RunNumber)
FileNameVec = tmp[0]
FormattedRunNumber = tmp[1]
else:
FileNameVec = [ InputDirectory+'/'+FileName ]
FormattedRunNumber = RunNumber
else:
FormattedRunNumber = RunNumber
svcMgr.EventSelector.SkipEvents = EvtMin
theApp.EvtMax = EvtMax
log.info( "InputDirectory is " + str(InputDirectory) )
log.info( "RunNumber is " + str(FormattedRunNumber) )
log.info( "FullFileName is " + str(FileNameVec) )
log.info( "Skip Events is " + str(EvtMin) )
log.info( "Max events is " + str(EvtMax) )
svcMgr.EventSelector.Input = FileNameVec
svcMgr.EventSelector.MaxBadEvents = MaxBadEvents
athenaCommonFlags.FilesInput = FileNameVec
projectName = FileNameVec[0].split('/').pop().split('.')[0]
log.info( "Project name is " + projectName )
rec.projectName = projectName
# init DetDescr
from AthenaCommon.GlobalFlags import jobproperties
if not 'DetDescrVersion' in dir():
DetDescrVersion = 'ATLAS-R2-2016-01-00-01'
jobproperties.Global.DetDescrVersion = DetDescrVersion
log.info( "DetDescrVersion = %s" % (jobproperties.Global.DetDescrVersion() ))
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from GeoModelSvc.GeoModelSvcConf import GeoModelSvc
GeoModelSvc = GeoModelSvc()
GeoModelSvc.IgnoreTagDifference = True
log.info( "GeoModelSvc.AtlasVersion = %s" % (GeoModelSvc.AtlasVersion) )
# Setup Db stuff
if TileUseCOOL:
from IOVDbSvc.CondDB import conddb
log.info( 'Tile COOL tag: ' + tileCOOLtag )
conddb.setGlobalTag(tileCOOLtag)
# setting option to build frag->ROB mapping at the begin of run
ByteStreamCnvSvc = Service( "ByteStreamCnvSvc" )
ByteStreamCnvSvc.ROD2ROBmap = [ "-1" ]
from TileRecUtils.TileDQstatusAlgDefault import TileDQstatusAlgDefault
TileDQstatusAlgDefault()
if not athenaCommonFlags.isOnline():
from LumiBlockComps.LuminosityCondAlgDefault import LuminosityCondAlgDefault
LuminosityCondAlgDefault()
TileRunType = 2 # laser run
doTileFit = True
TileCorrectTime = True
doTileOptATLAS = False
TileLasRun = True
TilePhysTiming = True
# load conditions data
include( "TileRec/TileDefaults_jobOptions.py" )
include( "TileConditions/TileConditions_jobOptions.py" )
# set reconstruction flags and reconstruct data
from TileRecUtils.TileRecFlags import jobproperties
jobproperties.TileRecFlags.calibrateEnergy.set_Value_and_Lock(False) #don't need pC in raw channels, keep ADC counts
jobproperties.TileRecFlags.noiseFilter.set_Value_and_Lock(1) #Enable noise filter tool
jobproperties.TileRecFlags.BestPhaseFromCOOL.set_Value_and_Lock(True) #Use best phase from COOL
jobproperties.TileRecFlags.doTileOverflowFit.set_Value_and_Lock(False)
include( "TileRec/TileRec_jobOptions.py" )
if not 'LaserUpdateFrequency' in dir():
LaserUpdateFrequency = 0
if not 'LaserResetAfterUpdate' in dir():
LaserResetAfterUpdate = False
if not 'LaserDoSummaryVsPMT' in dir():
LaserDoSummaryVsPMT = False
#----------------
# TileMonitoring
#----------------
topSequence += CfgMgr.AthenaMonManager( "TileLasMon"
, ManualRunLBSetup = True
, ManualDataTypeSetup = True
, Environment = "online"
, FileKey = MonitorOutput
, Run = RunNumber
, LumiBlock = 1)
#-------------------------------
# Tile raw channel time monitoring
#-------------------------------
TileLasRawChannelTimeMon = CfgMgr.TileRawChannelTimeMonTool ( name = "TileLasRawChannelTimeMon"
, histoPathBase = "/Tile/RawChannelTime"
, runType = TileRunType
, doOnline = athenaCommonFlags.isOnline()
, TimeCorrectionLBA = -15.18
, TimeCorrectionLBC = -15.37
, TimeCorrectionEBA = 47.65
, TimeCorrectionEBC = 47.42
, TileRawChannelContainer = "TileRawChannelFit")
topSequence.TileLasMon.AthenaMonTools += [ TileLasRawChannelTimeMon ]
print(TileLasRawChannelTimeMon)
#-------------------------------
# Tile DQFrag monitoring
#-------------------------------
TileLasDQFragMon = CfgMgr.TileDQFragMonTool( name = 'TileLasDQFragMon'
, OutputLevel = 3
, TileRawChannelContainerDSP = "TileRawChannelCnt"
, TileRawChannelContainerOffl = "TileRawChannelFit"
, TileDigitsContainer = "TileDigitsCnt"
, NegAmpHG = -200.
, NegAmpLG = -15.
, SkipMasked = True
, SkipGapCells = True
, doOnline = athenaCommonFlags.isOnline()
, doPlots = False
, CheckDCS = TileUseDCS
, histoPathBase = "/Tile/DMUErrors");
topSequence.TileLasMon.AthenaMonTools += [ TileLasDQFragMon ];
print(TileLasDQFragMon)
print(topSequence.TileLasMon)
import os
# -- use root histos --
# THistService for native root in Athena
if not athenaCommonFlags.isOnline() or storeHisto or athenaCommonFlags.isOnlineStateless():
#theApp.HistogramPersistency = "ROOT"
if not hasattr(svcMgr,"THistSvc"):
from GaudiSvc.GaudiSvcConf import THistSvc
svcMgr += THistSvc("THistSvc")
if os.path.exists(RootHistOutputFileName):
os.remove(RootHistOutputFileName)
svcMgr.THistSvc.Output = [MonitorOutput+" DATAFILE='"+RootHistOutputFileName+"' OPT='RECREATE'"]
else:
from TrigServices.TrigServicesConf import TrigMonTHistSvc
trigmonTHistSvc = TrigMonTHistSvc("THistSvc")
svcMgr += trigmonTHistSvc
#To read CTP RESULTS and DSP Raw Channels
if not hasattr( svcMgr, "ByteStreamAddressProviderSvc" ):
from ByteStreamCnvSvcBase.ByteStreamCnvSvcBaseConf import ByteStreamAddressProviderSvc
svcMgr += ByteStreamAddressProviderSvc()
svcMgr.ByteStreamAddressProviderSvc.TypeNames += [
"TileRawChannelContainer/TileRawChannelCnt",
"CTP_RDO/CTP_RDO",
"CTP_RIO/CTP_RIO",
]
svcMgr.MessageSvc.defaultLimit= MsgLinesLimit
svcMgr.MessageSvc.OutputLevel = OutputLevel
svcMgr.MessageSvc.Format = "% F%35W%S%7W%R%T %0W%M"
svcMgr.MessageSvc.useColors = useColors
#svcMgr.HistorySvc.OutputLevel = 3
theApp.EvtMax = EvtMax
from AthenaServices.AthenaServicesConf import AthenaEventLoopMgr
svcMgr += AthenaEventLoopMgr()
svcMgr.AthenaEventLoopMgr.EventPrintoutInterval = 100
if TileUseCOOL:
from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc
svcMgr += DBReplicaSvc(UseCOOLSQLite=False)
topSequence.TileDQstatusAlg.TileBeamElemContainer = ""
| FindFile | identifier_name |
jobOptions_TileLasMon.py | #
# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
#
#*****************************************************************
#
# """topOptions file for Tile Laser Reconstruciton and Monitoring in Athena"""
# """This topOptions is intended to test the monitoring code"""
#=================================================================
from __future__ import print_function
MonitorOutput='Tile'
from AthenaCommon.Logging import logging
log = logging.getLogger( 'jobOptions_TileLasMon.py' )
from os import system, popen
def FindFile(path, runinput):
run = str(runinput)
while len(run) < 7:
run = '0' + run
files = []
fullname = []
if path.startswith("/castor") :
for f in popen('nsls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
elif path.startswith("/eos") :
for f in popen('eos ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
else:
for f in popen('ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
for nn in range(len(files)):
temp = files[nn].split('\n')
fullname.append(path + '/' + temp[0])
return [fullname,run]
# include Flags jobOption
include( "TileMonitoring/TileRec_FlagOptions.py" )
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon import CfgMgr
toolSvc = CfgMgr.ToolSvc()
# set global flags
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('commis')
globalflags.DataSource.set_Value_and_Lock('data')
globalflags.InputFormat.set_Value_and_Lock('bytestream')
from AthenaCommon.BeamFlags import jobproperties
jobproperties.Beam.beamType.set_Value_and_Lock(beamType)
# reset everything which is not needed
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOff() #Switched off to avoid geometry
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
DetFlags.detdescr.ID_setOff()
DetFlags.detdescr.Muon_setOff()
DetFlags.detdescr.LAr_setOn()
DetFlags.detdescr.Tile_setOn()
DetFlags.readRDOBS.Tile_setOn()
if CheckDCS:
DetFlags.dcs.Tile_setOn()
else:
DetFlags.dcs.Tile_setOff()
DetFlags.Print()
from RecExConfig.RecFlags import rec
rec.doLArg = False
# set online flag if neeed
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if athenaCommonFlags.isOnline() or doOnline or doStateless:
athenaCommonFlags.isOnline=True
log.info( 'athenaCommonFlags.isOnline = True : Online Mode' )
if doStateless:
athenaCommonFlags.isOnlineStateless=True
log.info( 'athenaCommonFlags.isOnlineStateless = True : Stateless Online Mode' )
#-----------------
# ByteSream Input
#-----------------
if not athenaCommonFlags.isOnline():
include( "ByteStreamCnvSvc/BSEventStorageEventSelector_jobOptions.py" )
include( "ByteStreamCnvSvcBase/BSAddProvSvc_RDO_jobOptions.py" )
if not 'InputDirectory' in dir():
InputDirectory="/castor/cern.ch/grid/atlas/t0/perm/DAQ"
if not 'RunNumber' in dir():
RunNumber=0
if not 'RunFromLocal' in dir():
if InputDirectory=="." or RunNumber<10:
RunFromLocal=True
else:
RunFromLocal=False
if not 'FileNameVec' in dir():
if not 'FileName' in dir():
tmp = FindFile(InputDirectory,RunNumber)
FileNameVec = tmp[0]
FormattedRunNumber = tmp[1]
else:
FileNameVec = [ InputDirectory+'/'+FileName ]
FormattedRunNumber = RunNumber
else:
FormattedRunNumber = RunNumber
svcMgr.EventSelector.SkipEvents = EvtMin
theApp.EvtMax = EvtMax
log.info( "InputDirectory is " + str(InputDirectory) )
log.info( "RunNumber is " + str(FormattedRunNumber) )
log.info( "FullFileName is " + str(FileNameVec) )
log.info( "Skip Events is " + str(EvtMin) )
log.info( "Max events is " + str(EvtMax) )
svcMgr.EventSelector.Input = FileNameVec
svcMgr.EventSelector.MaxBadEvents = MaxBadEvents
athenaCommonFlags.FilesInput = FileNameVec
projectName = FileNameVec[0].split('/').pop().split('.')[0]
log.info( "Project name is " + projectName )
rec.projectName = projectName
# init DetDescr
from AthenaCommon.GlobalFlags import jobproperties
if not 'DetDescrVersion' in dir():
DetDescrVersion = 'ATLAS-R2-2016-01-00-01'
jobproperties.Global.DetDescrVersion = DetDescrVersion
log.info( "DetDescrVersion = %s" % (jobproperties.Global.DetDescrVersion() ))
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from GeoModelSvc.GeoModelSvcConf import GeoModelSvc
GeoModelSvc = GeoModelSvc()
GeoModelSvc.IgnoreTagDifference = True
log.info( "GeoModelSvc.AtlasVersion = %s" % (GeoModelSvc.AtlasVersion) )
# Setup Db stuff
if TileUseCOOL:
from IOVDbSvc.CondDB import conddb
log.info( 'Tile COOL tag: ' + tileCOOLtag )
conddb.setGlobalTag(tileCOOLtag)
# setting option to build frag->ROB mapping at the begin of run
ByteStreamCnvSvc = Service( "ByteStreamCnvSvc" )
ByteStreamCnvSvc.ROD2ROBmap = [ "-1" ]
from TileRecUtils.TileDQstatusAlgDefault import TileDQstatusAlgDefault
TileDQstatusAlgDefault()
if not athenaCommonFlags.isOnline():
from LumiBlockComps.LuminosityCondAlgDefault import LuminosityCondAlgDefault
LuminosityCondAlgDefault()
TileRunType = 2 # laser run
doTileFit = True
TileCorrectTime = True
doTileOptATLAS = False
TileLasRun = True
TilePhysTiming = True
# load conditions data
include( "TileRec/TileDefaults_jobOptions.py" )
include( "TileConditions/TileConditions_jobOptions.py" )
# set reconstruction flags and reconstruct data
from TileRecUtils.TileRecFlags import jobproperties
jobproperties.TileRecFlags.calibrateEnergy.set_Value_and_Lock(False) #don't need pC in raw channels, keep ADC counts
jobproperties.TileRecFlags.noiseFilter.set_Value_and_Lock(1) #Enable noise filter tool
jobproperties.TileRecFlags.BestPhaseFromCOOL.set_Value_and_Lock(True) #Use best phase from COOL
jobproperties.TileRecFlags.doTileOverflowFit.set_Value_and_Lock(False)
include( "TileRec/TileRec_jobOptions.py" )
if not 'LaserUpdateFrequency' in dir():
LaserUpdateFrequency = 0
if not 'LaserResetAfterUpdate' in dir():
LaserResetAfterUpdate = False
if not 'LaserDoSummaryVsPMT' in dir():
LaserDoSummaryVsPMT = False
#----------------
# TileMonitoring
#----------------
topSequence += CfgMgr.AthenaMonManager( "TileLasMon"
, ManualRunLBSetup = True
, ManualDataTypeSetup = True
, Environment = "online"
, FileKey = MonitorOutput
, Run = RunNumber
, LumiBlock = 1)
#-------------------------------
# Tile raw channel time monitoring
#-------------------------------
TileLasRawChannelTimeMon = CfgMgr.TileRawChannelTimeMonTool ( name = "TileLasRawChannelTimeMon"
, histoPathBase = "/Tile/RawChannelTime"
, runType = TileRunType
, doOnline = athenaCommonFlags.isOnline()
, TimeCorrectionLBA = -15.18
, TimeCorrectionLBC = -15.37
, TimeCorrectionEBA = 47.65
, TimeCorrectionEBC = 47.42
, TileRawChannelContainer = "TileRawChannelFit")
topSequence.TileLasMon.AthenaMonTools += [ TileLasRawChannelTimeMon ]
print(TileLasRawChannelTimeMon)
#-------------------------------
# Tile DQFrag monitoring
#-------------------------------
TileLasDQFragMon = CfgMgr.TileDQFragMonTool( name = 'TileLasDQFragMon'
, OutputLevel = 3
, TileRawChannelContainerDSP = "TileRawChannelCnt"
, TileRawChannelContainerOffl = "TileRawChannelFit"
, TileDigitsContainer = "TileDigitsCnt"
, NegAmpHG = -200.
, NegAmpLG = -15.
, SkipMasked = True
, SkipGapCells = True
, doOnline = athenaCommonFlags.isOnline()
, doPlots = False
, CheckDCS = TileUseDCS
, histoPathBase = "/Tile/DMUErrors"); | print(topSequence.TileLasMon)
import os
# -- use root histos --
# THistService for native root in Athena
if not athenaCommonFlags.isOnline() or storeHisto or athenaCommonFlags.isOnlineStateless():
#theApp.HistogramPersistency = "ROOT"
if not hasattr(svcMgr,"THistSvc"):
from GaudiSvc.GaudiSvcConf import THistSvc
svcMgr += THistSvc("THistSvc")
if os.path.exists(RootHistOutputFileName):
os.remove(RootHistOutputFileName)
svcMgr.THistSvc.Output = [MonitorOutput+" DATAFILE='"+RootHistOutputFileName+"' OPT='RECREATE'"]
else:
from TrigServices.TrigServicesConf import TrigMonTHistSvc
trigmonTHistSvc = TrigMonTHistSvc("THistSvc")
svcMgr += trigmonTHistSvc
#To read CTP RESULTS and DSP Raw Channels
if not hasattr( svcMgr, "ByteStreamAddressProviderSvc" ):
from ByteStreamCnvSvcBase.ByteStreamCnvSvcBaseConf import ByteStreamAddressProviderSvc
svcMgr += ByteStreamAddressProviderSvc()
svcMgr.ByteStreamAddressProviderSvc.TypeNames += [
"TileRawChannelContainer/TileRawChannelCnt",
"CTP_RDO/CTP_RDO",
"CTP_RIO/CTP_RIO",
]
svcMgr.MessageSvc.defaultLimit= MsgLinesLimit
svcMgr.MessageSvc.OutputLevel = OutputLevel
svcMgr.MessageSvc.Format = "% F%35W%S%7W%R%T %0W%M"
svcMgr.MessageSvc.useColors = useColors
#svcMgr.HistorySvc.OutputLevel = 3
theApp.EvtMax = EvtMax
from AthenaServices.AthenaServicesConf import AthenaEventLoopMgr
svcMgr += AthenaEventLoopMgr()
svcMgr.AthenaEventLoopMgr.EventPrintoutInterval = 100
if TileUseCOOL:
from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc
svcMgr += DBReplicaSvc(UseCOOLSQLite=False)
topSequence.TileDQstatusAlg.TileBeamElemContainer = "" |
topSequence.TileLasMon.AthenaMonTools += [ TileLasDQFragMon ];
print(TileLasDQFragMon) | random_line_split |
jobOptions_TileLasMon.py | #
# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
#
#*****************************************************************
#
# """topOptions file for Tile Laser Reconstruciton and Monitoring in Athena"""
# """This topOptions is intended to test the monitoring code"""
#=================================================================
from __future__ import print_function
MonitorOutput='Tile'
from AthenaCommon.Logging import logging
log = logging.getLogger( 'jobOptions_TileLasMon.py' )
from os import system, popen
def FindFile(path, runinput):
run = str(runinput)
while len(run) < 7:
run = '0' + run
files = []
fullname = []
if path.startswith("/castor") :
for f in popen('nsls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
elif path.startswith("/eos") :
for f in popen('eos ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
else:
for f in popen('ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
for nn in range(len(files)):
temp = files[nn].split('\n')
fullname.append(path + '/' + temp[0])
return [fullname,run]
# include Flags jobOption
include( "TileMonitoring/TileRec_FlagOptions.py" )
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon import CfgMgr
toolSvc = CfgMgr.ToolSvc()
# set global flags
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('commis')
globalflags.DataSource.set_Value_and_Lock('data')
globalflags.InputFormat.set_Value_and_Lock('bytestream')
from AthenaCommon.BeamFlags import jobproperties
jobproperties.Beam.beamType.set_Value_and_Lock(beamType)
# reset everything which is not needed
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOff() #Switched off to avoid geometry
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
DetFlags.detdescr.ID_setOff()
DetFlags.detdescr.Muon_setOff()
DetFlags.detdescr.LAr_setOn()
DetFlags.detdescr.Tile_setOn()
DetFlags.readRDOBS.Tile_setOn()
if CheckDCS:
DetFlags.dcs.Tile_setOn()
else:
DetFlags.dcs.Tile_setOff()
DetFlags.Print()
from RecExConfig.RecFlags import rec
rec.doLArg = False
# set online flag if neeed
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if athenaCommonFlags.isOnline() or doOnline or doStateless:
athenaCommonFlags.isOnline=True
log.info( 'athenaCommonFlags.isOnline = True : Online Mode' )
if doStateless:
athenaCommonFlags.isOnlineStateless=True
log.info( 'athenaCommonFlags.isOnlineStateless = True : Stateless Online Mode' )
#-----------------
# ByteSream Input
#-----------------
if not athenaCommonFlags.isOnline():
include( "ByteStreamCnvSvc/BSEventStorageEventSelector_jobOptions.py" )
include( "ByteStreamCnvSvcBase/BSAddProvSvc_RDO_jobOptions.py" )
if not 'InputDirectory' in dir():
InputDirectory="/castor/cern.ch/grid/atlas/t0/perm/DAQ"
if not 'RunNumber' in dir():
RunNumber=0
if not 'RunFromLocal' in dir():
|
if not 'FileNameVec' in dir():
if not 'FileName' in dir():
tmp = FindFile(InputDirectory,RunNumber)
FileNameVec = tmp[0]
FormattedRunNumber = tmp[1]
else:
FileNameVec = [ InputDirectory+'/'+FileName ]
FormattedRunNumber = RunNumber
else:
FormattedRunNumber = RunNumber
svcMgr.EventSelector.SkipEvents = EvtMin
theApp.EvtMax = EvtMax
log.info( "InputDirectory is " + str(InputDirectory) )
log.info( "RunNumber is " + str(FormattedRunNumber) )
log.info( "FullFileName is " + str(FileNameVec) )
log.info( "Skip Events is " + str(EvtMin) )
log.info( "Max events is " + str(EvtMax) )
svcMgr.EventSelector.Input = FileNameVec
svcMgr.EventSelector.MaxBadEvents = MaxBadEvents
athenaCommonFlags.FilesInput = FileNameVec
projectName = FileNameVec[0].split('/').pop().split('.')[0]
log.info( "Project name is " + projectName )
rec.projectName = projectName
# init DetDescr
from AthenaCommon.GlobalFlags import jobproperties
if not 'DetDescrVersion' in dir():
DetDescrVersion = 'ATLAS-R2-2016-01-00-01'
jobproperties.Global.DetDescrVersion = DetDescrVersion
log.info( "DetDescrVersion = %s" % (jobproperties.Global.DetDescrVersion() ))
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from GeoModelSvc.GeoModelSvcConf import GeoModelSvc
GeoModelSvc = GeoModelSvc()
GeoModelSvc.IgnoreTagDifference = True
log.info( "GeoModelSvc.AtlasVersion = %s" % (GeoModelSvc.AtlasVersion) )
# Setup Db stuff
if TileUseCOOL:
from IOVDbSvc.CondDB import conddb
log.info( 'Tile COOL tag: ' + tileCOOLtag )
conddb.setGlobalTag(tileCOOLtag)
# setting option to build frag->ROB mapping at the begin of run
ByteStreamCnvSvc = Service( "ByteStreamCnvSvc" )
ByteStreamCnvSvc.ROD2ROBmap = [ "-1" ]
from TileRecUtils.TileDQstatusAlgDefault import TileDQstatusAlgDefault
TileDQstatusAlgDefault()
if not athenaCommonFlags.isOnline():
from LumiBlockComps.LuminosityCondAlgDefault import LuminosityCondAlgDefault
LuminosityCondAlgDefault()
TileRunType = 2 # laser run
doTileFit = True
TileCorrectTime = True
doTileOptATLAS = False
TileLasRun = True
TilePhysTiming = True
# load conditions data
include( "TileRec/TileDefaults_jobOptions.py" )
include( "TileConditions/TileConditions_jobOptions.py" )
# set reconstruction flags and reconstruct data
from TileRecUtils.TileRecFlags import jobproperties
jobproperties.TileRecFlags.calibrateEnergy.set_Value_and_Lock(False) #don't need pC in raw channels, keep ADC counts
jobproperties.TileRecFlags.noiseFilter.set_Value_and_Lock(1) #Enable noise filter tool
jobproperties.TileRecFlags.BestPhaseFromCOOL.set_Value_and_Lock(True) #Use best phase from COOL
jobproperties.TileRecFlags.doTileOverflowFit.set_Value_and_Lock(False)
include( "TileRec/TileRec_jobOptions.py" )
if not 'LaserUpdateFrequency' in dir():
LaserUpdateFrequency = 0
if not 'LaserResetAfterUpdate' in dir():
LaserResetAfterUpdate = False
if not 'LaserDoSummaryVsPMT' in dir():
LaserDoSummaryVsPMT = False
#----------------
# TileMonitoring
#----------------
topSequence += CfgMgr.AthenaMonManager( "TileLasMon"
, ManualRunLBSetup = True
, ManualDataTypeSetup = True
, Environment = "online"
, FileKey = MonitorOutput
, Run = RunNumber
, LumiBlock = 1)
#-------------------------------
# Tile raw channel time monitoring
#-------------------------------
TileLasRawChannelTimeMon = CfgMgr.TileRawChannelTimeMonTool ( name = "TileLasRawChannelTimeMon"
, histoPathBase = "/Tile/RawChannelTime"
, runType = TileRunType
, doOnline = athenaCommonFlags.isOnline()
, TimeCorrectionLBA = -15.18
, TimeCorrectionLBC = -15.37
, TimeCorrectionEBA = 47.65
, TimeCorrectionEBC = 47.42
, TileRawChannelContainer = "TileRawChannelFit")
topSequence.TileLasMon.AthenaMonTools += [ TileLasRawChannelTimeMon ]
print(TileLasRawChannelTimeMon)
#-------------------------------
# Tile DQFrag monitoring
#-------------------------------
TileLasDQFragMon = CfgMgr.TileDQFragMonTool( name = 'TileLasDQFragMon'
, OutputLevel = 3
, TileRawChannelContainerDSP = "TileRawChannelCnt"
, TileRawChannelContainerOffl = "TileRawChannelFit"
, TileDigitsContainer = "TileDigitsCnt"
, NegAmpHG = -200.
, NegAmpLG = -15.
, SkipMasked = True
, SkipGapCells = True
, doOnline = athenaCommonFlags.isOnline()
, doPlots = False
, CheckDCS = TileUseDCS
, histoPathBase = "/Tile/DMUErrors");
topSequence.TileLasMon.AthenaMonTools += [ TileLasDQFragMon ];
print(TileLasDQFragMon)
print(topSequence.TileLasMon)
import os
# -- use root histos --
# THistService for native root in Athena
if not athenaCommonFlags.isOnline() or storeHisto or athenaCommonFlags.isOnlineStateless():
#theApp.HistogramPersistency = "ROOT"
if not hasattr(svcMgr,"THistSvc"):
from GaudiSvc.GaudiSvcConf import THistSvc
svcMgr += THistSvc("THistSvc")
if os.path.exists(RootHistOutputFileName):
os.remove(RootHistOutputFileName)
svcMgr.THistSvc.Output = [MonitorOutput+" DATAFILE='"+RootHistOutputFileName+"' OPT='RECREATE'"]
else:
from TrigServices.TrigServicesConf import TrigMonTHistSvc
trigmonTHistSvc = TrigMonTHistSvc("THistSvc")
svcMgr += trigmonTHistSvc
#To read CTP RESULTS and DSP Raw Channels
if not hasattr( svcMgr, "ByteStreamAddressProviderSvc" ):
from ByteStreamCnvSvcBase.ByteStreamCnvSvcBaseConf import ByteStreamAddressProviderSvc
svcMgr += ByteStreamAddressProviderSvc()
svcMgr.ByteStreamAddressProviderSvc.TypeNames += [
"TileRawChannelContainer/TileRawChannelCnt",
"CTP_RDO/CTP_RDO",
"CTP_RIO/CTP_RIO",
]
svcMgr.MessageSvc.defaultLimit= MsgLinesLimit
svcMgr.MessageSvc.OutputLevel = OutputLevel
svcMgr.MessageSvc.Format = "% F%35W%S%7W%R%T %0W%M"
svcMgr.MessageSvc.useColors = useColors
#svcMgr.HistorySvc.OutputLevel = 3
theApp.EvtMax = EvtMax
from AthenaServices.AthenaServicesConf import AthenaEventLoopMgr
svcMgr += AthenaEventLoopMgr()
svcMgr.AthenaEventLoopMgr.EventPrintoutInterval = 100
if TileUseCOOL:
from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc
svcMgr += DBReplicaSvc(UseCOOLSQLite=False)
topSequence.TileDQstatusAlg.TileBeamElemContainer = ""
| if InputDirectory=="." or RunNumber<10:
RunFromLocal=True
else:
RunFromLocal=False | conditional_block |
jobOptions_TileLasMon.py | #
# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
#
#*****************************************************************
#
# """topOptions file for Tile Laser Reconstruciton and Monitoring in Athena"""
# """This topOptions is intended to test the monitoring code"""
#=================================================================
from __future__ import print_function
MonitorOutput='Tile'
from AthenaCommon.Logging import logging
log = logging.getLogger( 'jobOptions_TileLasMon.py' )
from os import system, popen
def FindFile(path, runinput):
|
# include Flags jobOption
include( "TileMonitoring/TileRec_FlagOptions.py" )
## get a handle to the default top-level algorithm sequence
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
# Get a handle to the ServiceManager
from AthenaCommon.AppMgr import ServiceMgr as svcMgr
from AthenaCommon import CfgMgr
toolSvc = CfgMgr.ToolSvc()
# set global flags
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo.set_Value_and_Lock('commis')
globalflags.DataSource.set_Value_and_Lock('data')
globalflags.InputFormat.set_Value_and_Lock('bytestream')
from AthenaCommon.BeamFlags import jobproperties
jobproperties.Beam.beamType.set_Value_and_Lock(beamType)
# reset everything which is not needed
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOff() #Switched off to avoid geometry
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
DetFlags.detdescr.ID_setOff()
DetFlags.detdescr.Muon_setOff()
DetFlags.detdescr.LAr_setOn()
DetFlags.detdescr.Tile_setOn()
DetFlags.readRDOBS.Tile_setOn()
if CheckDCS:
DetFlags.dcs.Tile_setOn()
else:
DetFlags.dcs.Tile_setOff()
DetFlags.Print()
from RecExConfig.RecFlags import rec
rec.doLArg = False
# set online flag if neeed
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if athenaCommonFlags.isOnline() or doOnline or doStateless:
athenaCommonFlags.isOnline=True
log.info( 'athenaCommonFlags.isOnline = True : Online Mode' )
if doStateless:
athenaCommonFlags.isOnlineStateless=True
log.info( 'athenaCommonFlags.isOnlineStateless = True : Stateless Online Mode' )
#-----------------
# ByteSream Input
#-----------------
if not athenaCommonFlags.isOnline():
include( "ByteStreamCnvSvc/BSEventStorageEventSelector_jobOptions.py" )
include( "ByteStreamCnvSvcBase/BSAddProvSvc_RDO_jobOptions.py" )
if not 'InputDirectory' in dir():
InputDirectory="/castor/cern.ch/grid/atlas/t0/perm/DAQ"
if not 'RunNumber' in dir():
RunNumber=0
if not 'RunFromLocal' in dir():
if InputDirectory=="." or RunNumber<10:
RunFromLocal=True
else:
RunFromLocal=False
if not 'FileNameVec' in dir():
if not 'FileName' in dir():
tmp = FindFile(InputDirectory,RunNumber)
FileNameVec = tmp[0]
FormattedRunNumber = tmp[1]
else:
FileNameVec = [ InputDirectory+'/'+FileName ]
FormattedRunNumber = RunNumber
else:
FormattedRunNumber = RunNumber
svcMgr.EventSelector.SkipEvents = EvtMin
theApp.EvtMax = EvtMax
log.info( "InputDirectory is " + str(InputDirectory) )
log.info( "RunNumber is " + str(FormattedRunNumber) )
log.info( "FullFileName is " + str(FileNameVec) )
log.info( "Skip Events is " + str(EvtMin) )
log.info( "Max events is " + str(EvtMax) )
svcMgr.EventSelector.Input = FileNameVec
svcMgr.EventSelector.MaxBadEvents = MaxBadEvents
athenaCommonFlags.FilesInput = FileNameVec
projectName = FileNameVec[0].split('/').pop().split('.')[0]
log.info( "Project name is " + projectName )
rec.projectName = projectName
# init DetDescr
from AthenaCommon.GlobalFlags import jobproperties
if not 'DetDescrVersion' in dir():
DetDescrVersion = 'ATLAS-R2-2016-01-00-01'
jobproperties.Global.DetDescrVersion = DetDescrVersion
log.info( "DetDescrVersion = %s" % (jobproperties.Global.DetDescrVersion() ))
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from GeoModelSvc.GeoModelSvcConf import GeoModelSvc
GeoModelSvc = GeoModelSvc()
GeoModelSvc.IgnoreTagDifference = True
log.info( "GeoModelSvc.AtlasVersion = %s" % (GeoModelSvc.AtlasVersion) )
# Setup Db stuff
if TileUseCOOL:
from IOVDbSvc.CondDB import conddb
log.info( 'Tile COOL tag: ' + tileCOOLtag )
conddb.setGlobalTag(tileCOOLtag)
# setting option to build frag->ROB mapping at the begin of run
ByteStreamCnvSvc = Service( "ByteStreamCnvSvc" )
ByteStreamCnvSvc.ROD2ROBmap = [ "-1" ]
from TileRecUtils.TileDQstatusAlgDefault import TileDQstatusAlgDefault
TileDQstatusAlgDefault()
if not athenaCommonFlags.isOnline():
from LumiBlockComps.LuminosityCondAlgDefault import LuminosityCondAlgDefault
LuminosityCondAlgDefault()
TileRunType = 2 # laser run
doTileFit = True
TileCorrectTime = True
doTileOptATLAS = False
TileLasRun = True
TilePhysTiming = True
# load conditions data
include( "TileRec/TileDefaults_jobOptions.py" )
include( "TileConditions/TileConditions_jobOptions.py" )
# set reconstruction flags and reconstruct data
from TileRecUtils.TileRecFlags import jobproperties
jobproperties.TileRecFlags.calibrateEnergy.set_Value_and_Lock(False) #don't need pC in raw channels, keep ADC counts
jobproperties.TileRecFlags.noiseFilter.set_Value_and_Lock(1) #Enable noise filter tool
jobproperties.TileRecFlags.BestPhaseFromCOOL.set_Value_and_Lock(True) #Use best phase from COOL
jobproperties.TileRecFlags.doTileOverflowFit.set_Value_and_Lock(False)
include( "TileRec/TileRec_jobOptions.py" )
if not 'LaserUpdateFrequency' in dir():
LaserUpdateFrequency = 0
if not 'LaserResetAfterUpdate' in dir():
LaserResetAfterUpdate = False
if not 'LaserDoSummaryVsPMT' in dir():
LaserDoSummaryVsPMT = False
#----------------
# TileMonitoring
#----------------
topSequence += CfgMgr.AthenaMonManager( "TileLasMon"
, ManualRunLBSetup = True
, ManualDataTypeSetup = True
, Environment = "online"
, FileKey = MonitorOutput
, Run = RunNumber
, LumiBlock = 1)
#-------------------------------
# Tile raw channel time monitoring
#-------------------------------
TileLasRawChannelTimeMon = CfgMgr.TileRawChannelTimeMonTool ( name = "TileLasRawChannelTimeMon"
, histoPathBase = "/Tile/RawChannelTime"
, runType = TileRunType
, doOnline = athenaCommonFlags.isOnline()
, TimeCorrectionLBA = -15.18
, TimeCorrectionLBC = -15.37
, TimeCorrectionEBA = 47.65
, TimeCorrectionEBC = 47.42
, TileRawChannelContainer = "TileRawChannelFit")
topSequence.TileLasMon.AthenaMonTools += [ TileLasRawChannelTimeMon ]
print(TileLasRawChannelTimeMon)
#-------------------------------
# Tile DQFrag monitoring
#-------------------------------
TileLasDQFragMon = CfgMgr.TileDQFragMonTool( name = 'TileLasDQFragMon'
, OutputLevel = 3
, TileRawChannelContainerDSP = "TileRawChannelCnt"
, TileRawChannelContainerOffl = "TileRawChannelFit"
, TileDigitsContainer = "TileDigitsCnt"
, NegAmpHG = -200.
, NegAmpLG = -15.
, SkipMasked = True
, SkipGapCells = True
, doOnline = athenaCommonFlags.isOnline()
, doPlots = False
, CheckDCS = TileUseDCS
, histoPathBase = "/Tile/DMUErrors");
topSequence.TileLasMon.AthenaMonTools += [ TileLasDQFragMon ];
print(TileLasDQFragMon)
print(topSequence.TileLasMon)
import os
# -- use root histos --
# THistService for native root in Athena
if not athenaCommonFlags.isOnline() or storeHisto or athenaCommonFlags.isOnlineStateless():
#theApp.HistogramPersistency = "ROOT"
if not hasattr(svcMgr,"THistSvc"):
from GaudiSvc.GaudiSvcConf import THistSvc
svcMgr += THistSvc("THistSvc")
if os.path.exists(RootHistOutputFileName):
os.remove(RootHistOutputFileName)
svcMgr.THistSvc.Output = [MonitorOutput+" DATAFILE='"+RootHistOutputFileName+"' OPT='RECREATE'"]
else:
from TrigServices.TrigServicesConf import TrigMonTHistSvc
trigmonTHistSvc = TrigMonTHistSvc("THistSvc")
svcMgr += trigmonTHistSvc
#To read CTP RESULTS and DSP Raw Channels
if not hasattr( svcMgr, "ByteStreamAddressProviderSvc" ):
from ByteStreamCnvSvcBase.ByteStreamCnvSvcBaseConf import ByteStreamAddressProviderSvc
svcMgr += ByteStreamAddressProviderSvc()
svcMgr.ByteStreamAddressProviderSvc.TypeNames += [
"TileRawChannelContainer/TileRawChannelCnt",
"CTP_RDO/CTP_RDO",
"CTP_RIO/CTP_RIO",
]
svcMgr.MessageSvc.defaultLimit= MsgLinesLimit
svcMgr.MessageSvc.OutputLevel = OutputLevel
svcMgr.MessageSvc.Format = "% F%35W%S%7W%R%T %0W%M"
svcMgr.MessageSvc.useColors = useColors
#svcMgr.HistorySvc.OutputLevel = 3
theApp.EvtMax = EvtMax
from AthenaServices.AthenaServicesConf import AthenaEventLoopMgr
svcMgr += AthenaEventLoopMgr()
svcMgr.AthenaEventLoopMgr.EventPrintoutInterval = 100
if TileUseCOOL:
from DBReplicaSvc.DBReplicaSvcConf import DBReplicaSvc
svcMgr += DBReplicaSvc(UseCOOLSQLite=False)
topSequence.TileDQstatusAlg.TileBeamElemContainer = ""
| run = str(runinput)
while len(run) < 7:
run = '0' + run
files = []
fullname = []
if path.startswith("/castor") :
for f in popen('nsls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
elif path.startswith("/eos") :
for f in popen('eos ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
else:
for f in popen('ls %(path)s | grep %(run)s' % {'path': path, 'run':run }):
files.append(f)
for nn in range(len(files)):
temp = files[nn].split('\n')
fullname.append(path + '/' + temp[0])
return [fullname,run] | identifier_body |
trainer.py | import logging
import os.path as osp
import queue
import sys
import threading
import time
from collections import OrderedDict
import torch
from det3d import torchie
from . import hooks
from .checkpoint import load_checkpoint, save_checkpoint
from .hooks import (
CheckpointHook,
Hook,
IterTimerHook,
LrUpdaterHook,
OptimizerHook,
lr_updater,
)
from .log_buffer import LogBuffer
from .priority import get_priority
from .utils import (
all_gather,
get_dist_info,
get_host_info,
get_time_str,
obj_from_dict,
synchronize,
)
def example_to_device(example, device, non_blocking=False) -> dict:
example_torch = {}
float_names = ["voxels", "bev_map"]
for k, v in example.items():
if k in ["anchors", "anchors_mask", "reg_targets", "reg_weights", "labels", "hm",
"anno_box", "ind", "mask", 'cat', 'points']:
example_torch[k] = [res.to(device, non_blocking=non_blocking) for res in v]
elif k in [
"voxels",
"bev_map",
"coordinates",
"num_points",
"num_voxels",
"cyv_voxels",
"cyv_num_voxels",
"cyv_coordinates",
"cyv_num_points",
"gt_boxes_and_cls"
]:
example_torch[k] = v.to(device, non_blocking=non_blocking)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = v1.to(device, non_blocking=non_blocking)
example_torch[k] = calib
else:
example_torch[k] = v
return example_torch
def parse_second_losses(losses):
log_vars = OrderedDict()
loss = sum(losses["loss"])
for loss_name, loss_value in losses.items():
if loss_name == "loc_loss_elem":
log_vars[loss_name] = [[i.item() for i in j] for j in loss_value]
else:
log_vars[loss_name] = [i.item() for i in loss_value]
return loss, log_vars
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, max_prefetch=1):
threading.Thread.__init__(self)
self.queue = queue.Queue(max_prefetch)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
# Python 3 compatibility
def __next__(self):
return self.next()
def __iter__(self):
return self
class Prefetcher(object):
def __init__(self, dataloader):
self.loader = iter(dataloader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input = next(self.loader)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
self.next_input = example_to_device(
self.next_input, torch.cuda.current_device(), non_blocking=False
)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
self.preload()
return input
class Trainer(object):
""" A training helper for PyTorch
Args:
model:
batch_processor:
optimizer:
workdir:
log_level:
logger:
"""
def __init__(
self,
model,
batch_processor,
optimizer=None,
lr_scheduler=None,
work_dir=None,
log_level=logging.INFO,
logger=None,
**kwargs,
):
assert callable(batch_processor)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.batch_processor = batch_processor
# Create work_dir
if torchie.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
torchie.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError("'work_dir' must be a str or None")
# Get model name from the model class
if hasattr(self.model, "module"):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info()
self.timestamp = get_time_str()
if logger is None:
self.logger = self.init_logger(work_dir, log_level)
else:
self.logger = logger
self.log_buffer = LogBuffer()
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def rank(self):
"""int: Rank of current process. (distributed training)"""
return self._rank
@property
def world_size(self):
"""int: Number of processes participating in the job.
(distributed training)"""
return self._world_size
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
def init_optimizer(self, optimizer):
"""Init the optimizer
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`)
Returns:
:obj:`~torch.optim.Optimizer`
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD`>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=self.model.parameters())
)
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
"optimizer must be either an Optimizer object or a dict, "
"but got {}".format(type(optimizer))
)
return optimizer
def _add_file_handler(self, logger, filename=None, mode="w", level=logging.INFO):
# TODO: move this method out of runner
file_handler = logging.FileHandler(filename, mode)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
def init_logger(self, log_dir=None, level=logging.INFO):
"""Init the logger.
Args:
Returns:
:obj:`~logging.Logger`: Python logger.
"""
logging.basicConfig(
format="%(asctime)s - %(levelname)s - % (message)s", level=level
)
logger = logging.getLogger(__name__)
if log_dir and self.rank == 0:
filename = "{}.log".format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
def current_lr(self):
if self.optimizer is None:
raise RuntimeError("lr is not applicable because optimizer does not exist.")
return [group["lr"] for group in self.optimizer.param_groups]
def register_hook(self, hook, priority="NORMAL"):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`)
priority (int or str or :obj:`Priority`)
"""
assert isinstance(hook, Hook)
if hasattr(hook, "priority"):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# Insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def build_hook(self, args, hook_type=None):
if isinstance(args, Hook):
return args
elif isinstance(args, dict):
assert issubclass(hook_type, Hook)
return hook_type(**args)
else:
raise TypeError(
"'args' must be either a Hook object"
" or dict, not {}".format(type(args))
)
def call_hook(self, fn_name):
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, filename, map_location="cpu", strict=False):
self.logger.info("load checkpoint from %s", filename)
return load_checkpoint(self.model, filename, map_location, strict, self.logger)
def save_checkpoint(
self, out_dir, filename_tmpl="epoch_{}.pth", save_optimizer=True, meta=None
):
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
else:
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
linkpath = osp.join(out_dir, "latest.pth")
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# Use relative symlink
torchie.symlink(filename, linkpath)
def batch_processor_inline(self, model, data, train_mode, **kwargs):
if "local_rank" in kwargs:
device = torch.device(kwargs["local_rank"])
else:
device = None
# data = example_convert_to_torch(data, device=device)
example = example_to_device(
data, torch.cuda.current_device(), non_blocking=False
)
self.call_hook("after_data_to_device")
if train_mode:
|
else:
return model(example, return_loss=False)
def train(self, data_loader, epoch, **kwargs):
self.model.train()
self.mode = "train"
self.data_loader = data_loader
self.length = len(data_loader)
self._max_iters = self._max_epochs * self.length
self.call_hook("before_train_epoch")
base_step = epoch * self.length
# prefetcher = Prefetcher(data_loader)
# for data_batch in BackgroundGenerator(data_loader, max_prefetch=3):
for i, data_batch in enumerate(data_loader):
global_step = base_step + i
if self.lr_scheduler is not None:
#print(global_step)
self.lr_scheduler.step(global_step)
self._inner_iter = i
self.call_hook("before_train_iter")
# outputs = self.batch_processor(self.model,
# data_batch,
# train_mode=True,
# **kwargs)
outputs = self.batch_processor_inline(
self.model, data_batch, train_mode=True, **kwargs
)
if not isinstance(outputs, dict):
raise TypeError("batch_processor() must return a dict")
if "log_vars" in outputs:
self.log_buffer.update(outputs["log_vars"], outputs["num_samples"])
self.outputs = outputs
self.call_hook("after_train_iter")
self._iter += 1
self.call_hook("after_train_epoch")
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = "val"
self.data_loader = data_loader
self.call_hook("before_val_epoch")
self.logger.info(f"work dir: {self.work_dir}")
if self.rank == 0:
prog_bar = torchie.ProgressBar(len(data_loader.dataset))
detections = {}
cpu_device = torch.device("cpu")
for i, data_batch in enumerate(data_loader):
self._inner_iter = i
self.call_hook("before_val_iter")
with torch.no_grad():
outputs = self.batch_processor(
self.model, data_batch, train_mode=False, **kwargs
)
for output in outputs:
token = output["metadata"]["token"]
for k, v in output.items():
if k not in [
"metadata",
]:
output[k] = v.to(cpu_device)
detections.update(
{token: output,}
)
if self.rank == 0:
for _ in range(self.world_size):
prog_bar.update()
synchronize()
all_predictions = all_gather(detections)
if self.rank != 0:
return
predictions = {}
for p in all_predictions:
predictions.update(p)
# torch.save(predictions, "final_predictions_debug.pkl")
# TODO fix evaluation module
result_dict, _ = self.data_loader.dataset.evaluation(
predictions, output_dir=self.work_dir
)
self.logger.info("\n")
for k, v in result_dict["results"].items():
self.logger.info(f"Evaluation {k}: {v}")
self.call_hook("after_val_epoch")
def resume(self, checkpoint, resume_optimizer=True, map_location="default"):
if map_location == "default":
checkpoint = self.load_checkpoint(
checkpoint , map_location='cuda:{}'.format(torch.cuda.current_device()) # TODO: FIX THIS!!
)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint["meta"]["epoch"]
self._iter = checkpoint["meta"]["iter"]
if "optimizer" in checkpoint and resume_optimizer:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.logger.info("resumed epoch %d, iter %d", self.epoch, self.iter)
def run(self, data_loaders, workflow, max_epochs, **kwargs):
""" Start running.
Args:
data_loaders (list[:obj:`DataLoader`])
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs.
max_epochs (int)
"""
assert isinstance(data_loaders, list)
assert torchie.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else "NONE"
self.logger.info(
"Start running, host: %s, work_dir: %s", get_host_info(), work_dir
)
self.logger.info("workflow: %s, max: %d epochs", workflow, max_epochs)
self.call_hook("before_run")
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str):
if not hasattr(self, mode):
raise ValueError(
"Trainer has no method named '{}' to run an epoch".format(
mode
)
)
epoch_runner = getattr(self, mode)
elif callable(mode):
epoch_runner = mode
else:
raise TypeError(
"mode in workflow must be a str or "
"callable function not '{}'".format(type(mode))
)
for _ in range(epochs):
if mode == "train" and self.epoch >= max_epochs:
return
elif mode == "val":
epoch_runner(data_loaders[i], **kwargs)
else:
epoch_runner(data_loaders[i], self.epoch, **kwargs)
# time.sleep(1)
self.call_hook("after_run")
def register_lr_hooks(self, lr_config):
if isinstance(lr_config, LrUpdaterHook):
self.register_hook(lr_config)
elif isinstance(lr_config, dict):
assert "policy" in lr_config
hook_name = lr_config["policy"].title() + "LrUpdaterHook"
if not hasattr(lr_updater, hook_name):
raise ValueError('"{}" does not exist'.format(hook_name))
hook_cls = getattr(lr_updater, hook_name)
self.register_hook(hook_cls(**lr_config))
else:
raise TypeError(
"'lr_config' must be eigher a LrUpdaterHook object"
" or dict, not '{}'".format(type(lr_config))
)
def register_logger_hooks(self, log_config):
log_interval = log_config["interval"]
for info in log_config["hooks"]:
logger_hook = obj_from_dict(
info, hooks, default_args=dict(interval=log_interval)
)
self.register_hook(logger_hook, priority="VERY_LOW")
def register_training_hooks(
self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None
):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- OptimizerStepperHook
- CheckpointSaverHook
- IterTimerHook
- LoggerHook(s)
"""
if optimizer_config is None:
optimizer_config = {}
if checkpoint_config is None:
checkpoint_config = {}
if lr_config is not None:
assert self.lr_scheduler is None
self.register_lr_hooks(lr_config)
self.register_hook(self.build_hook(optimizer_config, OptimizerHook))
self.register_hook(self.build_hook(checkpoint_config, CheckpointHook))
self.register_hook(IterTimerHook())
if log_config is not None:
self.register_logger_hooks(log_config)
| losses = model(example, return_loss=True)
self.call_hook("after_forward")
loss, log_vars = parse_second_losses(losses)
del losses
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=-1 # TODO: FIX THIS
)
self.call_hook("after_parse_loss")
return outputs | conditional_block |
trainer.py | import logging
import os.path as osp
import queue
import sys
import threading
import time
from collections import OrderedDict
import torch
from det3d import torchie
from . import hooks
from .checkpoint import load_checkpoint, save_checkpoint
from .hooks import (
CheckpointHook,
Hook,
IterTimerHook,
LrUpdaterHook,
OptimizerHook,
lr_updater,
)
from .log_buffer import LogBuffer
from .priority import get_priority
from .utils import (
all_gather,
get_dist_info,
get_host_info,
get_time_str,
obj_from_dict,
synchronize,
)
def example_to_device(example, device, non_blocking=False) -> dict:
example_torch = {}
float_names = ["voxels", "bev_map"]
for k, v in example.items():
if k in ["anchors", "anchors_mask", "reg_targets", "reg_weights", "labels", "hm",
"anno_box", "ind", "mask", 'cat', 'points']:
example_torch[k] = [res.to(device, non_blocking=non_blocking) for res in v]
elif k in [
"voxels",
"bev_map",
"coordinates",
"num_points",
"num_voxels",
"cyv_voxels",
"cyv_num_voxels",
"cyv_coordinates",
"cyv_num_points",
"gt_boxes_and_cls"
]:
example_torch[k] = v.to(device, non_blocking=non_blocking)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = v1.to(device, non_blocking=non_blocking)
example_torch[k] = calib
else:
example_torch[k] = v
return example_torch
def parse_second_losses(losses):
log_vars = OrderedDict()
loss = sum(losses["loss"])
for loss_name, loss_value in losses.items():
if loss_name == "loc_loss_elem":
log_vars[loss_name] = [[i.item() for i in j] for j in loss_value]
else:
log_vars[loss_name] = [i.item() for i in loss_value]
return loss, log_vars
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, max_prefetch=1):
threading.Thread.__init__(self)
self.queue = queue.Queue(max_prefetch)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
# Python 3 compatibility
def __next__(self):
return self.next()
def __iter__(self):
return self
class Prefetcher(object):
def __init__(self, dataloader):
self.loader = iter(dataloader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input = next(self.loader)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
self.next_input = example_to_device(
self.next_input, torch.cuda.current_device(), non_blocking=False
)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
self.preload()
return input
class Trainer(object):
""" A training helper for PyTorch
Args:
model:
batch_processor:
optimizer:
workdir:
log_level:
logger:
"""
def __init__( | self,
model,
batch_processor,
optimizer=None,
lr_scheduler=None,
work_dir=None,
log_level=logging.INFO,
logger=None,
**kwargs,
):
assert callable(batch_processor)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.batch_processor = batch_processor
# Create work_dir
if torchie.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
torchie.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError("'work_dir' must be a str or None")
# Get model name from the model class
if hasattr(self.model, "module"):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info()
self.timestamp = get_time_str()
if logger is None:
self.logger = self.init_logger(work_dir, log_level)
else:
self.logger = logger
self.log_buffer = LogBuffer()
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def rank(self):
"""int: Rank of current process. (distributed training)"""
return self._rank
@property
def world_size(self):
"""int: Number of processes participating in the job.
(distributed training)"""
return self._world_size
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
def init_optimizer(self, optimizer):
"""Init the optimizer
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`)
Returns:
:obj:`~torch.optim.Optimizer`
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD`>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=self.model.parameters())
)
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
"optimizer must be either an Optimizer object or a dict, "
"but got {}".format(type(optimizer))
)
return optimizer
def _add_file_handler(self, logger, filename=None, mode="w", level=logging.INFO):
# TODO: move this method out of runner
file_handler = logging.FileHandler(filename, mode)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
def init_logger(self, log_dir=None, level=logging.INFO):
"""Init the logger.
Args:
Returns:
:obj:`~logging.Logger`: Python logger.
"""
logging.basicConfig(
format="%(asctime)s - %(levelname)s - % (message)s", level=level
)
logger = logging.getLogger(__name__)
if log_dir and self.rank == 0:
filename = "{}.log".format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
def current_lr(self):
if self.optimizer is None:
raise RuntimeError("lr is not applicable because optimizer does not exist.")
return [group["lr"] for group in self.optimizer.param_groups]
def register_hook(self, hook, priority="NORMAL"):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`)
priority (int or str or :obj:`Priority`)
"""
assert isinstance(hook, Hook)
if hasattr(hook, "priority"):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# Insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def build_hook(self, args, hook_type=None):
if isinstance(args, Hook):
return args
elif isinstance(args, dict):
assert issubclass(hook_type, Hook)
return hook_type(**args)
else:
raise TypeError(
"'args' must be either a Hook object"
" or dict, not {}".format(type(args))
)
def call_hook(self, fn_name):
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, filename, map_location="cpu", strict=False):
self.logger.info("load checkpoint from %s", filename)
return load_checkpoint(self.model, filename, map_location, strict, self.logger)
def save_checkpoint(
self, out_dir, filename_tmpl="epoch_{}.pth", save_optimizer=True, meta=None
):
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
else:
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
linkpath = osp.join(out_dir, "latest.pth")
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# Use relative symlink
torchie.symlink(filename, linkpath)
def batch_processor_inline(self, model, data, train_mode, **kwargs):
if "local_rank" in kwargs:
device = torch.device(kwargs["local_rank"])
else:
device = None
# data = example_convert_to_torch(data, device=device)
example = example_to_device(
data, torch.cuda.current_device(), non_blocking=False
)
self.call_hook("after_data_to_device")
if train_mode:
losses = model(example, return_loss=True)
self.call_hook("after_forward")
loss, log_vars = parse_second_losses(losses)
del losses
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=-1 # TODO: FIX THIS
)
self.call_hook("after_parse_loss")
return outputs
else:
return model(example, return_loss=False)
def train(self, data_loader, epoch, **kwargs):
self.model.train()
self.mode = "train"
self.data_loader = data_loader
self.length = len(data_loader)
self._max_iters = self._max_epochs * self.length
self.call_hook("before_train_epoch")
base_step = epoch * self.length
# prefetcher = Prefetcher(data_loader)
# for data_batch in BackgroundGenerator(data_loader, max_prefetch=3):
for i, data_batch in enumerate(data_loader):
global_step = base_step + i
if self.lr_scheduler is not None:
#print(global_step)
self.lr_scheduler.step(global_step)
self._inner_iter = i
self.call_hook("before_train_iter")
# outputs = self.batch_processor(self.model,
# data_batch,
# train_mode=True,
# **kwargs)
outputs = self.batch_processor_inline(
self.model, data_batch, train_mode=True, **kwargs
)
if not isinstance(outputs, dict):
raise TypeError("batch_processor() must return a dict")
if "log_vars" in outputs:
self.log_buffer.update(outputs["log_vars"], outputs["num_samples"])
self.outputs = outputs
self.call_hook("after_train_iter")
self._iter += 1
self.call_hook("after_train_epoch")
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = "val"
self.data_loader = data_loader
self.call_hook("before_val_epoch")
self.logger.info(f"work dir: {self.work_dir}")
if self.rank == 0:
prog_bar = torchie.ProgressBar(len(data_loader.dataset))
detections = {}
cpu_device = torch.device("cpu")
for i, data_batch in enumerate(data_loader):
self._inner_iter = i
self.call_hook("before_val_iter")
with torch.no_grad():
outputs = self.batch_processor(
self.model, data_batch, train_mode=False, **kwargs
)
for output in outputs:
token = output["metadata"]["token"]
for k, v in output.items():
if k not in [
"metadata",
]:
output[k] = v.to(cpu_device)
detections.update(
{token: output,}
)
if self.rank == 0:
for _ in range(self.world_size):
prog_bar.update()
synchronize()
all_predictions = all_gather(detections)
if self.rank != 0:
return
predictions = {}
for p in all_predictions:
predictions.update(p)
# torch.save(predictions, "final_predictions_debug.pkl")
# TODO fix evaluation module
result_dict, _ = self.data_loader.dataset.evaluation(
predictions, output_dir=self.work_dir
)
self.logger.info("\n")
for k, v in result_dict["results"].items():
self.logger.info(f"Evaluation {k}: {v}")
self.call_hook("after_val_epoch")
def resume(self, checkpoint, resume_optimizer=True, map_location="default"):
if map_location == "default":
checkpoint = self.load_checkpoint(
checkpoint , map_location='cuda:{}'.format(torch.cuda.current_device()) # TODO: FIX THIS!!
)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint["meta"]["epoch"]
self._iter = checkpoint["meta"]["iter"]
if "optimizer" in checkpoint and resume_optimizer:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.logger.info("resumed epoch %d, iter %d", self.epoch, self.iter)
def run(self, data_loaders, workflow, max_epochs, **kwargs):
""" Start running.
Args:
data_loaders (list[:obj:`DataLoader`])
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs.
max_epochs (int)
"""
assert isinstance(data_loaders, list)
assert torchie.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else "NONE"
self.logger.info(
"Start running, host: %s, work_dir: %s", get_host_info(), work_dir
)
self.logger.info("workflow: %s, max: %d epochs", workflow, max_epochs)
self.call_hook("before_run")
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str):
if not hasattr(self, mode):
raise ValueError(
"Trainer has no method named '{}' to run an epoch".format(
mode
)
)
epoch_runner = getattr(self, mode)
elif callable(mode):
epoch_runner = mode
else:
raise TypeError(
"mode in workflow must be a str or "
"callable function not '{}'".format(type(mode))
)
for _ in range(epochs):
if mode == "train" and self.epoch >= max_epochs:
return
elif mode == "val":
epoch_runner(data_loaders[i], **kwargs)
else:
epoch_runner(data_loaders[i], self.epoch, **kwargs)
# time.sleep(1)
self.call_hook("after_run")
def register_lr_hooks(self, lr_config):
if isinstance(lr_config, LrUpdaterHook):
self.register_hook(lr_config)
elif isinstance(lr_config, dict):
assert "policy" in lr_config
hook_name = lr_config["policy"].title() + "LrUpdaterHook"
if not hasattr(lr_updater, hook_name):
raise ValueError('"{}" does not exist'.format(hook_name))
hook_cls = getattr(lr_updater, hook_name)
self.register_hook(hook_cls(**lr_config))
else:
raise TypeError(
"'lr_config' must be eigher a LrUpdaterHook object"
" or dict, not '{}'".format(type(lr_config))
)
def register_logger_hooks(self, log_config):
log_interval = log_config["interval"]
for info in log_config["hooks"]:
logger_hook = obj_from_dict(
info, hooks, default_args=dict(interval=log_interval)
)
self.register_hook(logger_hook, priority="VERY_LOW")
def register_training_hooks(
self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None
):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- OptimizerStepperHook
- CheckpointSaverHook
- IterTimerHook
- LoggerHook(s)
"""
if optimizer_config is None:
optimizer_config = {}
if checkpoint_config is None:
checkpoint_config = {}
if lr_config is not None:
assert self.lr_scheduler is None
self.register_lr_hooks(lr_config)
self.register_hook(self.build_hook(optimizer_config, OptimizerHook))
self.register_hook(self.build_hook(checkpoint_config, CheckpointHook))
self.register_hook(IterTimerHook())
if log_config is not None:
self.register_logger_hooks(log_config) | random_line_split | |
trainer.py | import logging
import os.path as osp
import queue
import sys
import threading
import time
from collections import OrderedDict
import torch
from det3d import torchie
from . import hooks
from .checkpoint import load_checkpoint, save_checkpoint
from .hooks import (
CheckpointHook,
Hook,
IterTimerHook,
LrUpdaterHook,
OptimizerHook,
lr_updater,
)
from .log_buffer import LogBuffer
from .priority import get_priority
from .utils import (
all_gather,
get_dist_info,
get_host_info,
get_time_str,
obj_from_dict,
synchronize,
)
def example_to_device(example, device, non_blocking=False) -> dict:
example_torch = {}
float_names = ["voxels", "bev_map"]
for k, v in example.items():
if k in ["anchors", "anchors_mask", "reg_targets", "reg_weights", "labels", "hm",
"anno_box", "ind", "mask", 'cat', 'points']:
example_torch[k] = [res.to(device, non_blocking=non_blocking) for res in v]
elif k in [
"voxels",
"bev_map",
"coordinates",
"num_points",
"num_voxels",
"cyv_voxels",
"cyv_num_voxels",
"cyv_coordinates",
"cyv_num_points",
"gt_boxes_and_cls"
]:
example_torch[k] = v.to(device, non_blocking=non_blocking)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = v1.to(device, non_blocking=non_blocking)
example_torch[k] = calib
else:
example_torch[k] = v
return example_torch
def parse_second_losses(losses):
log_vars = OrderedDict()
loss = sum(losses["loss"])
for loss_name, loss_value in losses.items():
if loss_name == "loc_loss_elem":
log_vars[loss_name] = [[i.item() for i in j] for j in loss_value]
else:
log_vars[loss_name] = [i.item() for i in loss_value]
return loss, log_vars
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, max_prefetch=1):
threading.Thread.__init__(self)
self.queue = queue.Queue(max_prefetch)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
# Python 3 compatibility
def __next__(self):
return self.next()
def __iter__(self):
return self
class Prefetcher(object):
def __init__(self, dataloader):
self.loader = iter(dataloader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input = next(self.loader)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
self.next_input = example_to_device(
self.next_input, torch.cuda.current_device(), non_blocking=False
)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
self.preload()
return input
class Trainer(object):
""" A training helper for PyTorch
Args:
model:
batch_processor:
optimizer:
workdir:
log_level:
logger:
"""
def __init__(
self,
model,
batch_processor,
optimizer=None,
lr_scheduler=None,
work_dir=None,
log_level=logging.INFO,
logger=None,
**kwargs,
):
assert callable(batch_processor)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.batch_processor = batch_processor
# Create work_dir
if torchie.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
torchie.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError("'work_dir' must be a str or None")
# Get model name from the model class
if hasattr(self.model, "module"):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info()
self.timestamp = get_time_str()
if logger is None:
self.logger = self.init_logger(work_dir, log_level)
else:
self.logger = logger
self.log_buffer = LogBuffer()
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def rank(self):
"""int: Rank of current process. (distributed training)"""
return self._rank
@property
def world_size(self):
"""int: Number of processes participating in the job.
(distributed training)"""
return self._world_size
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
def | (self, optimizer):
"""Init the optimizer
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`)
Returns:
:obj:`~torch.optim.Optimizer`
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD`>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=self.model.parameters())
)
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
"optimizer must be either an Optimizer object or a dict, "
"but got {}".format(type(optimizer))
)
return optimizer
def _add_file_handler(self, logger, filename=None, mode="w", level=logging.INFO):
# TODO: move this method out of runner
file_handler = logging.FileHandler(filename, mode)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
def init_logger(self, log_dir=None, level=logging.INFO):
"""Init the logger.
Args:
Returns:
:obj:`~logging.Logger`: Python logger.
"""
logging.basicConfig(
format="%(asctime)s - %(levelname)s - % (message)s", level=level
)
logger = logging.getLogger(__name__)
if log_dir and self.rank == 0:
filename = "{}.log".format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
def current_lr(self):
if self.optimizer is None:
raise RuntimeError("lr is not applicable because optimizer does not exist.")
return [group["lr"] for group in self.optimizer.param_groups]
def register_hook(self, hook, priority="NORMAL"):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`)
priority (int or str or :obj:`Priority`)
"""
assert isinstance(hook, Hook)
if hasattr(hook, "priority"):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# Insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def build_hook(self, args, hook_type=None):
if isinstance(args, Hook):
return args
elif isinstance(args, dict):
assert issubclass(hook_type, Hook)
return hook_type(**args)
else:
raise TypeError(
"'args' must be either a Hook object"
" or dict, not {}".format(type(args))
)
def call_hook(self, fn_name):
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, filename, map_location="cpu", strict=False):
self.logger.info("load checkpoint from %s", filename)
return load_checkpoint(self.model, filename, map_location, strict, self.logger)
def save_checkpoint(
self, out_dir, filename_tmpl="epoch_{}.pth", save_optimizer=True, meta=None
):
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
else:
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
linkpath = osp.join(out_dir, "latest.pth")
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# Use relative symlink
torchie.symlink(filename, linkpath)
def batch_processor_inline(self, model, data, train_mode, **kwargs):
if "local_rank" in kwargs:
device = torch.device(kwargs["local_rank"])
else:
device = None
# data = example_convert_to_torch(data, device=device)
example = example_to_device(
data, torch.cuda.current_device(), non_blocking=False
)
self.call_hook("after_data_to_device")
if train_mode:
losses = model(example, return_loss=True)
self.call_hook("after_forward")
loss, log_vars = parse_second_losses(losses)
del losses
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=-1 # TODO: FIX THIS
)
self.call_hook("after_parse_loss")
return outputs
else:
return model(example, return_loss=False)
def train(self, data_loader, epoch, **kwargs):
self.model.train()
self.mode = "train"
self.data_loader = data_loader
self.length = len(data_loader)
self._max_iters = self._max_epochs * self.length
self.call_hook("before_train_epoch")
base_step = epoch * self.length
# prefetcher = Prefetcher(data_loader)
# for data_batch in BackgroundGenerator(data_loader, max_prefetch=3):
for i, data_batch in enumerate(data_loader):
global_step = base_step + i
if self.lr_scheduler is not None:
#print(global_step)
self.lr_scheduler.step(global_step)
self._inner_iter = i
self.call_hook("before_train_iter")
# outputs = self.batch_processor(self.model,
# data_batch,
# train_mode=True,
# **kwargs)
outputs = self.batch_processor_inline(
self.model, data_batch, train_mode=True, **kwargs
)
if not isinstance(outputs, dict):
raise TypeError("batch_processor() must return a dict")
if "log_vars" in outputs:
self.log_buffer.update(outputs["log_vars"], outputs["num_samples"])
self.outputs = outputs
self.call_hook("after_train_iter")
self._iter += 1
self.call_hook("after_train_epoch")
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = "val"
self.data_loader = data_loader
self.call_hook("before_val_epoch")
self.logger.info(f"work dir: {self.work_dir}")
if self.rank == 0:
prog_bar = torchie.ProgressBar(len(data_loader.dataset))
detections = {}
cpu_device = torch.device("cpu")
for i, data_batch in enumerate(data_loader):
self._inner_iter = i
self.call_hook("before_val_iter")
with torch.no_grad():
outputs = self.batch_processor(
self.model, data_batch, train_mode=False, **kwargs
)
for output in outputs:
token = output["metadata"]["token"]
for k, v in output.items():
if k not in [
"metadata",
]:
output[k] = v.to(cpu_device)
detections.update(
{token: output,}
)
if self.rank == 0:
for _ in range(self.world_size):
prog_bar.update()
synchronize()
all_predictions = all_gather(detections)
if self.rank != 0:
return
predictions = {}
for p in all_predictions:
predictions.update(p)
# torch.save(predictions, "final_predictions_debug.pkl")
# TODO fix evaluation module
result_dict, _ = self.data_loader.dataset.evaluation(
predictions, output_dir=self.work_dir
)
self.logger.info("\n")
for k, v in result_dict["results"].items():
self.logger.info(f"Evaluation {k}: {v}")
self.call_hook("after_val_epoch")
def resume(self, checkpoint, resume_optimizer=True, map_location="default"):
if map_location == "default":
checkpoint = self.load_checkpoint(
checkpoint , map_location='cuda:{}'.format(torch.cuda.current_device()) # TODO: FIX THIS!!
)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint["meta"]["epoch"]
self._iter = checkpoint["meta"]["iter"]
if "optimizer" in checkpoint and resume_optimizer:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.logger.info("resumed epoch %d, iter %d", self.epoch, self.iter)
def run(self, data_loaders, workflow, max_epochs, **kwargs):
""" Start running.
Args:
data_loaders (list[:obj:`DataLoader`])
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs.
max_epochs (int)
"""
assert isinstance(data_loaders, list)
assert torchie.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else "NONE"
self.logger.info(
"Start running, host: %s, work_dir: %s", get_host_info(), work_dir
)
self.logger.info("workflow: %s, max: %d epochs", workflow, max_epochs)
self.call_hook("before_run")
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str):
if not hasattr(self, mode):
raise ValueError(
"Trainer has no method named '{}' to run an epoch".format(
mode
)
)
epoch_runner = getattr(self, mode)
elif callable(mode):
epoch_runner = mode
else:
raise TypeError(
"mode in workflow must be a str or "
"callable function not '{}'".format(type(mode))
)
for _ in range(epochs):
if mode == "train" and self.epoch >= max_epochs:
return
elif mode == "val":
epoch_runner(data_loaders[i], **kwargs)
else:
epoch_runner(data_loaders[i], self.epoch, **kwargs)
# time.sleep(1)
self.call_hook("after_run")
def register_lr_hooks(self, lr_config):
if isinstance(lr_config, LrUpdaterHook):
self.register_hook(lr_config)
elif isinstance(lr_config, dict):
assert "policy" in lr_config
hook_name = lr_config["policy"].title() + "LrUpdaterHook"
if not hasattr(lr_updater, hook_name):
raise ValueError('"{}" does not exist'.format(hook_name))
hook_cls = getattr(lr_updater, hook_name)
self.register_hook(hook_cls(**lr_config))
else:
raise TypeError(
"'lr_config' must be eigher a LrUpdaterHook object"
" or dict, not '{}'".format(type(lr_config))
)
def register_logger_hooks(self, log_config):
log_interval = log_config["interval"]
for info in log_config["hooks"]:
logger_hook = obj_from_dict(
info, hooks, default_args=dict(interval=log_interval)
)
self.register_hook(logger_hook, priority="VERY_LOW")
def register_training_hooks(
self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None
):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- OptimizerStepperHook
- CheckpointSaverHook
- IterTimerHook
- LoggerHook(s)
"""
if optimizer_config is None:
optimizer_config = {}
if checkpoint_config is None:
checkpoint_config = {}
if lr_config is not None:
assert self.lr_scheduler is None
self.register_lr_hooks(lr_config)
self.register_hook(self.build_hook(optimizer_config, OptimizerHook))
self.register_hook(self.build_hook(checkpoint_config, CheckpointHook))
self.register_hook(IterTimerHook())
if log_config is not None:
self.register_logger_hooks(log_config)
| init_optimizer | identifier_name |
trainer.py | import logging
import os.path as osp
import queue
import sys
import threading
import time
from collections import OrderedDict
import torch
from det3d import torchie
from . import hooks
from .checkpoint import load_checkpoint, save_checkpoint
from .hooks import (
CheckpointHook,
Hook,
IterTimerHook,
LrUpdaterHook,
OptimizerHook,
lr_updater,
)
from .log_buffer import LogBuffer
from .priority import get_priority
from .utils import (
all_gather,
get_dist_info,
get_host_info,
get_time_str,
obj_from_dict,
synchronize,
)
def example_to_device(example, device, non_blocking=False) -> dict:
example_torch = {}
float_names = ["voxels", "bev_map"]
for k, v in example.items():
if k in ["anchors", "anchors_mask", "reg_targets", "reg_weights", "labels", "hm",
"anno_box", "ind", "mask", 'cat', 'points']:
example_torch[k] = [res.to(device, non_blocking=non_blocking) for res in v]
elif k in [
"voxels",
"bev_map",
"coordinates",
"num_points",
"num_voxels",
"cyv_voxels",
"cyv_num_voxels",
"cyv_coordinates",
"cyv_num_points",
"gt_boxes_and_cls"
]:
example_torch[k] = v.to(device, non_blocking=non_blocking)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = v1.to(device, non_blocking=non_blocking)
example_torch[k] = calib
else:
example_torch[k] = v
return example_torch
def parse_second_losses(losses):
log_vars = OrderedDict()
loss = sum(losses["loss"])
for loss_name, loss_value in losses.items():
if loss_name == "loc_loss_elem":
log_vars[loss_name] = [[i.item() for i in j] for j in loss_value]
else:
log_vars[loss_name] = [i.item() for i in loss_value]
return loss, log_vars
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, max_prefetch=1):
threading.Thread.__init__(self)
self.queue = queue.Queue(max_prefetch)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
# Python 3 compatibility
def __next__(self):
return self.next()
def __iter__(self):
return self
class Prefetcher(object):
def __init__(self, dataloader):
self.loader = iter(dataloader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input = next(self.loader)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
self.next_input = example_to_device(
self.next_input, torch.cuda.current_device(), non_blocking=False
)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
self.preload()
return input
class Trainer(object):
| """ A training helper for PyTorch
Args:
model:
batch_processor:
optimizer:
workdir:
log_level:
logger:
"""
def __init__(
self,
model,
batch_processor,
optimizer=None,
lr_scheduler=None,
work_dir=None,
log_level=logging.INFO,
logger=None,
**kwargs,
):
assert callable(batch_processor)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.batch_processor = batch_processor
# Create work_dir
if torchie.is_str(work_dir):
self.work_dir = osp.abspath(work_dir)
torchie.mkdir_or_exist(self.work_dir)
elif work_dir is None:
self.work_dir = None
else:
raise TypeError("'work_dir' must be a str or None")
# Get model name from the model class
if hasattr(self.model, "module"):
self._model_name = self.model.module.__class__.__name__
else:
self._model_name = self.model.__class__.__name__
self._rank, self._world_size = get_dist_info()
self.timestamp = get_time_str()
if logger is None:
self.logger = self.init_logger(work_dir, log_level)
else:
self.logger = logger
self.log_buffer = LogBuffer()
self.mode = None
self._hooks = []
self._epoch = 0
self._iter = 0
self._inner_iter = 0
self._max_epochs = 0
self._max_iters = 0
@property
def model_name(self):
"""str: Name of the model, usually the module class name."""
return self._model_name
@property
def rank(self):
"""int: Rank of current process. (distributed training)"""
return self._rank
@property
def world_size(self):
"""int: Number of processes participating in the job.
(distributed training)"""
return self._world_size
@property
def hooks(self):
"""list[:obj:`Hook`]: A list of registered hooks."""
return self._hooks
@property
def epoch(self):
"""int: Current epoch."""
return self._epoch
@property
def iter(self):
"""int: Current iteration."""
return self._iter
@property
def inner_iter(self):
"""int: Iteration in an epoch."""
return self._inner_iter
@property
def max_epochs(self):
"""int: Maximum training epochs."""
return self._max_epochs
@property
def max_iters(self):
"""int: Maximum training iterations."""
return self._max_iters
def init_optimizer(self, optimizer):
"""Init the optimizer
Args:
optimizer (dict or :obj:`~torch.optim.Optimizer`)
Returns:
:obj:`~torch.optim.Optimizer`
Examples:
>>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9)
>>> type(runner.init_optimizer(optimizer))
<class 'torch.optim.sgd.SGD`>
"""
if isinstance(optimizer, dict):
optimizer = obj_from_dict(
optimizer, torch.optim, dict(params=self.model.parameters())
)
elif not isinstance(optimizer, torch.optim.Optimizer):
raise TypeError(
"optimizer must be either an Optimizer object or a dict, "
"but got {}".format(type(optimizer))
)
return optimizer
def _add_file_handler(self, logger, filename=None, mode="w", level=logging.INFO):
# TODO: move this method out of runner
file_handler = logging.FileHandler(filename, mode)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
file_handler.setLevel(level)
logger.addHandler(file_handler)
return logger
def init_logger(self, log_dir=None, level=logging.INFO):
"""Init the logger.
Args:
Returns:
:obj:`~logging.Logger`: Python logger.
"""
logging.basicConfig(
format="%(asctime)s - %(levelname)s - % (message)s", level=level
)
logger = logging.getLogger(__name__)
if log_dir and self.rank == 0:
filename = "{}.log".format(self.timestamp)
log_file = osp.join(log_dir, filename)
self._add_file_handler(logger, log_file, level=level)
return logger
def current_lr(self):
if self.optimizer is None:
raise RuntimeError("lr is not applicable because optimizer does not exist.")
return [group["lr"] for group in self.optimizer.param_groups]
def register_hook(self, hook, priority="NORMAL"):
"""Register a hook into the hook list.
Args:
hook (:obj:`Hook`)
priority (int or str or :obj:`Priority`)
"""
assert isinstance(hook, Hook)
if hasattr(hook, "priority"):
raise ValueError('"priority" is a reserved attribute for hooks')
priority = get_priority(priority)
hook.priority = priority
# Insert the hook to a sorted list
inserted = False
for i in range(len(self._hooks) - 1, -1, -1):
if priority >= self._hooks[i].priority:
self._hooks.insert(i + 1, hook)
inserted = True
break
if not inserted:
self._hooks.insert(0, hook)
def build_hook(self, args, hook_type=None):
if isinstance(args, Hook):
return args
elif isinstance(args, dict):
assert issubclass(hook_type, Hook)
return hook_type(**args)
else:
raise TypeError(
"'args' must be either a Hook object"
" or dict, not {}".format(type(args))
)
def call_hook(self, fn_name):
for hook in self._hooks:
getattr(hook, fn_name)(self)
def load_checkpoint(self, filename, map_location="cpu", strict=False):
self.logger.info("load checkpoint from %s", filename)
return load_checkpoint(self.model, filename, map_location, strict, self.logger)
def save_checkpoint(
self, out_dir, filename_tmpl="epoch_{}.pth", save_optimizer=True, meta=None
):
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
else:
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
linkpath = osp.join(out_dir, "latest.pth")
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# Use relative symlink
torchie.symlink(filename, linkpath)
def batch_processor_inline(self, model, data, train_mode, **kwargs):
if "local_rank" in kwargs:
device = torch.device(kwargs["local_rank"])
else:
device = None
# data = example_convert_to_torch(data, device=device)
example = example_to_device(
data, torch.cuda.current_device(), non_blocking=False
)
self.call_hook("after_data_to_device")
if train_mode:
losses = model(example, return_loss=True)
self.call_hook("after_forward")
loss, log_vars = parse_second_losses(losses)
del losses
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=-1 # TODO: FIX THIS
)
self.call_hook("after_parse_loss")
return outputs
else:
return model(example, return_loss=False)
def train(self, data_loader, epoch, **kwargs):
self.model.train()
self.mode = "train"
self.data_loader = data_loader
self.length = len(data_loader)
self._max_iters = self._max_epochs * self.length
self.call_hook("before_train_epoch")
base_step = epoch * self.length
# prefetcher = Prefetcher(data_loader)
# for data_batch in BackgroundGenerator(data_loader, max_prefetch=3):
for i, data_batch in enumerate(data_loader):
global_step = base_step + i
if self.lr_scheduler is not None:
#print(global_step)
self.lr_scheduler.step(global_step)
self._inner_iter = i
self.call_hook("before_train_iter")
# outputs = self.batch_processor(self.model,
# data_batch,
# train_mode=True,
# **kwargs)
outputs = self.batch_processor_inline(
self.model, data_batch, train_mode=True, **kwargs
)
if not isinstance(outputs, dict):
raise TypeError("batch_processor() must return a dict")
if "log_vars" in outputs:
self.log_buffer.update(outputs["log_vars"], outputs["num_samples"])
self.outputs = outputs
self.call_hook("after_train_iter")
self._iter += 1
self.call_hook("after_train_epoch")
self._epoch += 1
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = "val"
self.data_loader = data_loader
self.call_hook("before_val_epoch")
self.logger.info(f"work dir: {self.work_dir}")
if self.rank == 0:
prog_bar = torchie.ProgressBar(len(data_loader.dataset))
detections = {}
cpu_device = torch.device("cpu")
for i, data_batch in enumerate(data_loader):
self._inner_iter = i
self.call_hook("before_val_iter")
with torch.no_grad():
outputs = self.batch_processor(
self.model, data_batch, train_mode=False, **kwargs
)
for output in outputs:
token = output["metadata"]["token"]
for k, v in output.items():
if k not in [
"metadata",
]:
output[k] = v.to(cpu_device)
detections.update(
{token: output,}
)
if self.rank == 0:
for _ in range(self.world_size):
prog_bar.update()
synchronize()
all_predictions = all_gather(detections)
if self.rank != 0:
return
predictions = {}
for p in all_predictions:
predictions.update(p)
# torch.save(predictions, "final_predictions_debug.pkl")
# TODO fix evaluation module
result_dict, _ = self.data_loader.dataset.evaluation(
predictions, output_dir=self.work_dir
)
self.logger.info("\n")
for k, v in result_dict["results"].items():
self.logger.info(f"Evaluation {k}: {v}")
self.call_hook("after_val_epoch")
def resume(self, checkpoint, resume_optimizer=True, map_location="default"):
if map_location == "default":
checkpoint = self.load_checkpoint(
checkpoint , map_location='cuda:{}'.format(torch.cuda.current_device()) # TODO: FIX THIS!!
)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint["meta"]["epoch"]
self._iter = checkpoint["meta"]["iter"]
if "optimizer" in checkpoint and resume_optimizer:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.logger.info("resumed epoch %d, iter %d", self.epoch, self.iter)
def run(self, data_loaders, workflow, max_epochs, **kwargs):
""" Start running.
Args:
data_loaders (list[:obj:`DataLoader`])
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs.
max_epochs (int)
"""
assert isinstance(data_loaders, list)
assert torchie.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
self._max_epochs = max_epochs
work_dir = self.work_dir if self.work_dir is not None else "NONE"
self.logger.info(
"Start running, host: %s, work_dir: %s", get_host_info(), work_dir
)
self.logger.info("workflow: %s, max: %d epochs", workflow, max_epochs)
self.call_hook("before_run")
while self.epoch < max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str):
if not hasattr(self, mode):
raise ValueError(
"Trainer has no method named '{}' to run an epoch".format(
mode
)
)
epoch_runner = getattr(self, mode)
elif callable(mode):
epoch_runner = mode
else:
raise TypeError(
"mode in workflow must be a str or "
"callable function not '{}'".format(type(mode))
)
for _ in range(epochs):
if mode == "train" and self.epoch >= max_epochs:
return
elif mode == "val":
epoch_runner(data_loaders[i], **kwargs)
else:
epoch_runner(data_loaders[i], self.epoch, **kwargs)
# time.sleep(1)
self.call_hook("after_run")
def register_lr_hooks(self, lr_config):
if isinstance(lr_config, LrUpdaterHook):
self.register_hook(lr_config)
elif isinstance(lr_config, dict):
assert "policy" in lr_config
hook_name = lr_config["policy"].title() + "LrUpdaterHook"
if not hasattr(lr_updater, hook_name):
raise ValueError('"{}" does not exist'.format(hook_name))
hook_cls = getattr(lr_updater, hook_name)
self.register_hook(hook_cls(**lr_config))
else:
raise TypeError(
"'lr_config' must be eigher a LrUpdaterHook object"
" or dict, not '{}'".format(type(lr_config))
)
def register_logger_hooks(self, log_config):
log_interval = log_config["interval"]
for info in log_config["hooks"]:
logger_hook = obj_from_dict(
info, hooks, default_args=dict(interval=log_interval)
)
self.register_hook(logger_hook, priority="VERY_LOW")
def register_training_hooks(
self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None
):
"""Register default hooks for training.
Default hooks include:
- LrUpdaterHook
- OptimizerStepperHook
- CheckpointSaverHook
- IterTimerHook
- LoggerHook(s)
"""
if optimizer_config is None:
optimizer_config = {}
if checkpoint_config is None:
checkpoint_config = {}
if lr_config is not None:
assert self.lr_scheduler is None
self.register_lr_hooks(lr_config)
self.register_hook(self.build_hook(optimizer_config, OptimizerHook))
self.register_hook(self.build_hook(checkpoint_config, CheckpointHook))
self.register_hook(IterTimerHook())
if log_config is not None:
self.register_logger_hooks(log_config) | identifier_body | |
path_planning.py | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
#######################################################################################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] #buffer
white = [] #buffer
yellow = [] #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx) | GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main() | #print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x | random_line_split |
path_planning.py | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math | ##############################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] #buffer
white = [] #buffer
yellow = [] #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx)
#print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
| .cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
######################################################### | identifier_body |
path_planning.py | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
#######################################################################################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] #buffer
white = [] #buffer
yellow = [] #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx)
#print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subs | gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
| criber(' | identifier_name |
path_planning.py | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
#######################################################################################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] | #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx)
#print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
| #buffer
white = [] #buffer
yellow = [] | conditional_block |
pyNN_target_sim.py | # -*- coding: utf-8 -*-
"""
Building and simulating spiking neural networks using
`pyNN <http://neuralensemble.org/docs/PyNN/>`_.
@author: rbodo
"""
import os
import sys
import time
import warnings
import numpy as np
from six.moves import cPickle
from snntoolbox.utils.utils import confirm_overwrite, is_module_installed
from snntoolbox.simulation.utils import AbstractSNN, get_shape_from_label
from snntoolbox.bin.utils import config_string_to_set_of_strings
class SNN(AbstractSNN):
"""Class to hold the compiled spiking neural network.
Represents the compiled spiking neural network, ready for testing in a
spiking simulator.
Attributes
----------
layers: list[pyNN.Population]
Each entry represents a layer, i.e. a population of neurons, in form of
pyNN ``Population`` objects.
connections: list[pyNN.Projection]
pyNN ``Projection`` objects representing the connections between
individual layers.
cellparams: dict
Neuron cell parameters determining properties of the spiking neurons in
pyNN simulators.
"""
def __init__(self, config, queue=None):
AbstractSNN.__init__(self, config, queue)
self.layers = []
self.connections = []
self.cellparams = {key: config.getfloat('cell', key) for key in
config_string_to_set_of_strings(config.get(
'restrictions', 'cellparams_pyNN'))}
if 'i_offset' in self.cellparams.keys():
print("SNN toolbox WARNING: The cell parameter 'i_offset' is "
"reserved for the biases and should not be set globally.")
self.cellparams.pop('i_offset')
self.change_padding = False
@property
def is_parallelizable(self):
return False
def add_input_layer(self, input_shape):
celltype = self.sim.SpikeSourcePoisson() if self._poisson_input \
else self.sim.SpikeSourceArray()
self.layers.append(self.sim.Population(
np.prod(input_shape[1:], dtype=np.int).item(), celltype,
label='InputLayer'))
def add_layer(self, layer):
# This implementation of ZeroPadding layers assumes symmetric single
# padding ((1, 1), (1, 1)).
# Todo: Generalize for asymmetric padding or arbitrary size.
if 'ZeroPadding' in layer.__class__.__name__:
# noinspection PyUnresolvedReferences
padding = layer.padding
if set(padding).issubset((1, (1, 1))):
self.change_padding = True
return
else:
raise NotImplementedError(
"Border_mode {} not supported.".format(padding))
# Latest Keras versions need special permutation after Flatten layers.
if 'Flatten' in layer.__class__.__name__ and \
self.config.get('input', 'model_lib') == 'keras':
self.flatten_shapes.append(
(layer.name, get_shape_from_label(self.layers[-1].label)))
return
self.layers.append(self.sim.Population(
np.prod(layer.output_shape[1:], dtype=np.int).item(),
self.sim.IF_curr_exp, self.cellparams, label=layer.name))
self.layers[-1].initialize(v=self.layers[-1].get('v_rest'))
def build_dense(self, layer):
"""
Parameters
----------
layer : keras.layers.Dense
Returns
-------
"""
if layer.activation.__name__ == 'softmax':
warnings.warn("Activation 'softmax' not implemented. Using 'relu' "
"activation instead.", RuntimeWarning)
weights, biases = layer.get_weights()
self.set_biases(np.array(biases, 'float64'))
delay = self.config.getfloat('cell', 'delay')
connections = []
if len(self.flatten_shapes) == 1:
print("Swapping data_format of Flatten layer.")
flatten_name, shape = self.flatten_shapes.pop()
if self.data_format == 'channels_last':
y_in, x_in, f_in = shape
else:
f_in, y_in, x_in = shape
for i in range(weights.shape[0]): # Input neurons
# Sweep across channel axis of feature map. Assumes that each
# consecutive input neuron lies in a different channel. This is
# the case for channels_last, but not for channels_first.
f = i % f_in
# Sweep across height of feature map. Increase y by one if all
# rows along the channel axis were seen.
y = i // (f_in * x_in)
# Sweep across width of feature map.
x = (i // f_in) % x_in
new_i = f * x_in * y_in + x_in * y + x
for j in range(weights.shape[1]): # Output neurons
connections.append((new_i, j, weights[i, j], delay))
elif len(self.flatten_shapes) > 1:
raise RuntimeWarning("Not all Flatten layers have been consumed.")
else:
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
connections.append((i, j, weights[i, j], delay))
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_convolution(self, layer):
from snntoolbox.simulation.utils import build_convolution
# If the parsed model contains a ZeroPadding layer, we need to tell the
# Conv layer about it here, because ZeroPadding layers are removed when
# building the pyNN model.
if self.change_padding:
if layer.padding == 'valid':
self.change_padding = False
layer.padding = 'ZeroPadding'
else:
raise NotImplementedError(
"Border_mode {} in combination with ZeroPadding is not "
"supported.".format(layer.padding))
delay = self.config.getfloat('cell', 'delay')
transpose_kernel = \
self.config.get('simulation', 'keras_backend') == 'tensorflow'
connections, biases = build_convolution(layer, delay, transpose_kernel)
self.set_biases(biases)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_pooling(self, layer):
from snntoolbox.simulation.utils import build_pooling
delay = self.config.getfloat('cell', 'delay')
connections = build_pooling(layer, delay)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def compile(self):
pass
def simulate(self, **kwargs):
data = kwargs[str('x_b_l')]
if self.data_format == 'channels_last' and data.ndim == 4:
data = np.moveaxis(data, 3, 1)
x_flat = np.ravel(data)
if self._poisson_input:
self.layers[0].set(rate=list(x_flat / self.rescale_fac * 1000))
elif self._is_aedat_input:
raise NotImplementedError
else:
spike_times = []
for amplitude in x_flat:
st = np.linspace(0, self._duration,
int(self._duration * amplitude))
spike_times.append(st)
self.layers[0].set(spike_times=spike_times)
if is_module_installed('pynn_object_serialisation'):
from pynn_object_serialisation.functions import intercept_simulator
current_time = time.strftime("_%H%M%S_%d%m%Y")
intercept_simulator(self.sim, "snn_toolbox_pynn_" + current_time)
self.sim.run(self._duration - self._dt,
callbacks=[MyProgressBar(self._dt, self._duration)])
print("\nCollecting results...")
output_b_l_t = self.get_recorded_vars(self.layers)
return output_b_l_t
def reset(self, sample_idx):
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
if sample_idx % mod == 0:
print("Resetting simulator...")
self.sim.reset()
print("Done.")
def end_sim(self):
self.sim.end()
def save(self, path, filename):
print("Saving model to {}...".format(path))
self.save_assembly(path, filename)
self.save_connections(path)
self.save_biases(path)
print("Done.\n")
def load(self, path, filename):
self.layers = self.load_assembly(path, filename)
for i in range(len(self.layers) - 1):
filepath = os.path.join(path, self.layers[i + 1].label)
assert os.path.isfile(filepath), \
"Connections were not found at specified location."
self.sim.Projection(self.layers[i], self.layers[i + 1],
self.sim.FromFileConnector(filepath))
self.layers[i + 1].set(**self.cellparams)
self.layers[i + 1].initialize(v=self.layers[i + 1].get('v_rest'))
# Biases should be already be loaded from the assembly file.
# Otherwise do this:
# filepath = os.path.join(path, self.layers[i + 1].label+'_biases')
# biases = np.loadtxt(filepath)
# self.layers[i + 1].set(i_offset=biases*self._dt/1e2)
def init_cells(self):
vars_to_record = self.get_vars_to_record()
if 'spikes' in vars_to_record:
self.layers[0].record([str('spikes')]) # Input layer has no 'v'
for layer in self.layers[1:]:
layer.record(vars_to_record)
# The spikes of the last layer are recorded by default because they
# contain the networks output (classification guess).
if 'spikes' not in vars_to_record:
vars_to_record.append(str('spikes'))
self.layers[-1].record(vars_to_record)
def set_biases(self, biases):
"""Set biases.
Notes
-----
This assumes no leak.
"""
if not np.any(biases):
return
v_rest = self.config.getfloat('cell', 'v_rest')
v_thresh = self.config.getfloat('cell', 'v_thresh')
cm = self.config.getfloat('cell', 'cm')
i_offset = biases * cm * (v_thresh - v_rest) / self._duration
self.layers[-1].set(i_offset=i_offset)
def get_vars_to_record(self):
"""Get variables to record during simulation.
Returns
-------
vars_to_record: list[str]
The names of variables to record during simulation.
"""
vars_to_record = []
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations'} & self._plot_keys) \
or 'spiketrains_n_b_l_t' in self._log_keys:
vars_to_record.append(str('spikes'))
if 'mem_n_b_l_t' in self._log_keys or 'v_mem' in self._plot_keys:
vars_to_record.append(str('v'))
return vars_to_record
def get_spiketrains(self, **kwargs):
j = self._spiketrains_container_counter
if self.spiketrains_n_b_l_t is None \
or j >= len(self.spiketrains_n_b_l_t):
return None
shape = self.spiketrains_n_b_l_t[j][0].shape
# Outer for-loop that calls this function starts with
# 'monitor_index' = 0, but this is reserved for the input and handled
# by `get_spiketrains_input()`.
i = len(self.layers) - 1 if kwargs[str('monitor_index')] == -1 else \
kwargs[str('monitor_index')] + 1
spiketrains_flat = self.layers[i].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_input(self):
shape = list(self.parsed_model.input_shape) + [self._num_timesteps]
spiketrains_flat = self.layers[0].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_output(self):
shape = [self.batch_size, self.num_classes, self._num_timesteps]
spiketrains_flat = self.layers[-1].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_vmem(self, **kwargs):
vs = kwargs[str('layer')].get_data().segments[-1].analogsignals
if len(vs) > 0:
return np.array([np.swapaxes(v, 0, 1) for v in vs])
def save_assembly(self, path, filename):
"""Write layers of neural network to disk.
The size, structure, labels of all the population of an assembly are
stored in a dictionary such that one can load them again using the
`load_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to save layers.
filename: str
Name of file to write layers to.
"""
filepath = os.path.join(path, filename)
if not (self.config.getboolean('output', 'overwrite') or
confirm_overwrite(filepath)):
return
print("Saving assembly...")
s = {}
labels = []
variables = ['size', 'structure', 'label']
for population in self.layers:
labels.append(population.label)
data = {}
for variable in variables:
if hasattr(population, variable):
data[variable] = getattr(population, variable)
if hasattr(population.celltype, 'describe'):
data['celltype'] = population.celltype.describe()
if population.label != 'InputLayer':
data['i_offset'] = population.get('i_offset')
s[population.label] = data
s['labels'] = labels # List of population labels describing the net.
s['variables'] = variables # List of variable names.
s['size'] = len(self.layers) # Number of populations in assembly.
cPickle.dump(s, open(filepath, 'wb'), -1)
def save_connections(self, path):
"""Write parameters of a neural network to disk.
The parameters between two layers are saved in a text file.
They can then be used to connect pyNN populations e.g. with
``sim.Projection(layer1, layer2, sim.FromListConnector(filename))``,
where ``sim`` is a simulator supported by pyNN, e.g. Brian, NEURON, or
NEST.
Parameters
----------
path: str
Path to directory where connections are saved.
Return
------
Text files containing the layer connections. Each file is named
after the layer it connects to, e.g. ``layer2.txt`` if connecting
layer1 to layer2.
"""
print("Saving connections...")
# Iterate over layers to save each projection in a separate txt file.
for projection in self.connections:
filepath = os.path.join(path, projection.label.partition('→')[-1])
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
projection.save('connections', filepath)
def save_biases(self, path):
"""Write biases of a neural network to disk.
Parameters
----------
path: str
Path to directory where connections are saved.
"""
print("Saving biases...")
for layer in self.layers:
filepath = os.path.join(path, layer.label + '_biases')
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
if 'Input' in layer.label:
continue
try:
biases = layer.get('i_offset')
except KeyError:
continue
if np.isscalar(biases):
continue
np.savetxt(filepath, biases)
def lo | elf, path, filename):
"""Load the populations in an assembly.
Loads the populations in an assembly that was saved with the
`save_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to load model from.
filename: str
Name of file to load model from.
Returns
-------
layers: list[pyNN.Population]
List of pyNN ``Population`` objects.
"""
filepath = os.path.join(path, filename)
assert os.path.isfile(filepath), \
"Spiking neuron layers were not found at specified location."
if sys.version_info < (3,):
s = cPickle.load(open(filepath, 'rb'))
else:
s = cPickle.load(open(filepath, 'rb'), encoding='bytes')
# Iterate over populations in assembly
layers = []
for label in s['labels']:
celltype = getattr(self.sim, s[label]['celltype'])
population = self.sim.Population(s[label]['size'], celltype,
celltype.default_parameters,
structure=s[label]['structure'],
label=label)
# Set the rest of the specified variables, if any.
for variable in s['variables']:
if getattr(population, variable, None) is None:
setattr(population, variable, s[label][variable])
if label != 'InputLayer':
population.set(i_offset=s[label]['i_offset'])
layers.append(population)
return layers
def set_spiketrain_stats_input(self):
AbstractSNN.set_spiketrain_stats_input(self)
class MyProgressBar(object):
"""
A callback which draws a progress bar in the terminal.
"""
def __init__(self, interval, t_stop):
self.interval = interval
self.t_stop = t_stop
from pyNN.utility import ProgressBar
self.pb = ProgressBar(width=int(t_stop / interval), char=".")
def __call__(self, t):
self.pb(t / self.t_stop)
return t + self.interval
| ad_assembly(s | identifier_name |
pyNN_target_sim.py | # -*- coding: utf-8 -*-
"""
Building and simulating spiking neural networks using
`pyNN <http://neuralensemble.org/docs/PyNN/>`_.
@author: rbodo
"""
import os
import sys
import time
import warnings
import numpy as np
from six.moves import cPickle
from snntoolbox.utils.utils import confirm_overwrite, is_module_installed
from snntoolbox.simulation.utils import AbstractSNN, get_shape_from_label
from snntoolbox.bin.utils import config_string_to_set_of_strings
class SNN(AbstractSNN):
"""Class to hold the compiled spiking neural network.
Represents the compiled spiking neural network, ready for testing in a
spiking simulator.
Attributes
----------
layers: list[pyNN.Population]
Each entry represents a layer, i.e. a population of neurons, in form of
pyNN ``Population`` objects.
connections: list[pyNN.Projection]
pyNN ``Projection`` objects representing the connections between
individual layers.
cellparams: dict
Neuron cell parameters determining properties of the spiking neurons in
pyNN simulators.
"""
def __init__(self, config, queue=None):
AbstractSNN.__init__(self, config, queue)
self.layers = []
self.connections = []
self.cellparams = {key: config.getfloat('cell', key) for key in
config_string_to_set_of_strings(config.get(
'restrictions', 'cellparams_pyNN'))}
if 'i_offset' in self.cellparams.keys():
print("SNN toolbox WARNING: The cell parameter 'i_offset' is "
"reserved for the biases and should not be set globally.")
self.cellparams.pop('i_offset')
self.change_padding = False
@property
def is_parallelizable(self):
return False
def add_input_layer(self, input_shape):
celltype = self.sim.SpikeSourcePoisson() if self._poisson_input \
else self.sim.SpikeSourceArray()
self.layers.append(self.sim.Population(
np.prod(input_shape[1:], dtype=np.int).item(), celltype,
label='InputLayer'))
def add_layer(self, layer):
# This implementation of ZeroPadding layers assumes symmetric single
# padding ((1, 1), (1, 1)).
# Todo: Generalize for asymmetric padding or arbitrary size.
if 'ZeroPadding' in layer.__class__.__name__:
# noinspection PyUnresolvedReferences
padding = layer.padding
if set(padding).issubset((1, (1, 1))):
self.change_padding = True
return
else:
raise NotImplementedError(
"Border_mode {} not supported.".format(padding))
# Latest Keras versions need special permutation after Flatten layers.
if 'Flatten' in layer.__class__.__name__ and \
self.config.get('input', 'model_lib') == 'keras':
self.flatten_shapes.append(
(layer.name, get_shape_from_label(self.layers[-1].label)))
return
self.layers.append(self.sim.Population(
np.prod(layer.output_shape[1:], dtype=np.int).item(),
self.sim.IF_curr_exp, self.cellparams, label=layer.name))
self.layers[-1].initialize(v=self.layers[-1].get('v_rest'))
def build_dense(self, layer):
"""
Parameters
----------
layer : keras.layers.Dense
Returns
-------
"""
if layer.activation.__name__ == 'softmax':
warnings.warn("Activation 'softmax' not implemented. Using 'relu' "
"activation instead.", RuntimeWarning)
weights, biases = layer.get_weights()
self.set_biases(np.array(biases, 'float64'))
delay = self.config.getfloat('cell', 'delay')
connections = []
if len(self.flatten_shapes) == 1:
print("Swapping data_format of Flatten layer.")
flatten_name, shape = self.flatten_shapes.pop()
if self.data_format == 'channels_last':
y_in, x_in, f_in = shape
else:
f_in, y_in, x_in = shape
for i in range(weights.shape[0]): # Input neurons
# Sweep across channel axis of feature map. Assumes that each
# consecutive input neuron lies in a different channel. This is
# the case for channels_last, but not for channels_first.
f = i % f_in
# Sweep across height of feature map. Increase y by one if all
# rows along the channel axis were seen.
y = i // (f_in * x_in)
# Sweep across width of feature map.
x = (i // f_in) % x_in
new_i = f * x_in * y_in + x_in * y + x
for j in range(weights.shape[1]): # Output neurons
connections.append((new_i, j, weights[i, j], delay))
elif len(self.flatten_shapes) > 1:
raise RuntimeWarning("Not all Flatten layers have been consumed.")
else:
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
connections.append((i, j, weights[i, j], delay))
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_convolution(self, layer):
from snntoolbox.simulation.utils import build_convolution
# If the parsed model contains a ZeroPadding layer, we need to tell the
# Conv layer about it here, because ZeroPadding layers are removed when
# building the pyNN model.
if self.change_padding:
if layer.padding == 'valid':
self.change_padding = False
layer.padding = 'ZeroPadding'
else:
raise NotImplementedError(
"Border_mode {} in combination with ZeroPadding is not "
"supported.".format(layer.padding))
delay = self.config.getfloat('cell', 'delay')
transpose_kernel = \
self.config.get('simulation', 'keras_backend') == 'tensorflow'
connections, biases = build_convolution(layer, delay, transpose_kernel)
self.set_biases(biases)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_pooling(self, layer):
from snntoolbox.simulation.utils import build_pooling
delay = self.config.getfloat('cell', 'delay')
connections = build_pooling(layer, delay)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def compile(self):
pass
def simulate(self, **kwargs):
data = kwargs[str('x_b_l')]
if self.data_format == 'channels_last' and data.ndim == 4:
data = np.moveaxis(data, 3, 1)
x_flat = np.ravel(data)
if self._poisson_input:
self.layers[0].set(rate=list(x_flat / self.rescale_fac * 1000))
elif self._is_aedat_input:
raise NotImplementedError
else:
spike_times = []
for amplitude in x_flat:
st = np.linspace(0, self._duration,
int(self._duration * amplitude))
spike_times.append(st)
self.layers[0].set(spike_times=spike_times)
if is_module_installed('pynn_object_serialisation'):
from pynn_object_serialisation.functions import intercept_simulator
current_time = time.strftime("_%H%M%S_%d%m%Y")
intercept_simulator(self.sim, "snn_toolbox_pynn_" + current_time)
self.sim.run(self._duration - self._dt,
callbacks=[MyProgressBar(self._dt, self._duration)])
print("\nCollecting results...")
output_b_l_t = self.get_recorded_vars(self.layers)
return output_b_l_t
def reset(self, sample_idx):
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
if sample_idx % mod == 0:
print("Resetting simulator...")
self.sim.reset()
print("Done.")
def end_sim(self):
self.sim.end()
def save(self, path, filename):
print("Saving model to {}...".format(path))
self.save_assembly(path, filename)
self.save_connections(path)
self.save_biases(path)
print("Done.\n")
def load(self, path, filename):
self.layers = self.load_assembly(path, filename)
for i in range(len(self.layers) - 1):
filepath = os.path.join(path, self.layers[i + 1].label)
assert os.path.isfile(filepath), \
"Connections were not found at specified location."
self.sim.Projection(self.layers[i], self.layers[i + 1],
self.sim.FromFileConnector(filepath))
self.layers[i + 1].set(**self.cellparams)
self.layers[i + 1].initialize(v=self.layers[i + 1].get('v_rest'))
# Biases should be already be loaded from the assembly file.
# Otherwise do this:
# filepath = os.path.join(path, self.layers[i + 1].label+'_biases')
# biases = np.loadtxt(filepath)
# self.layers[i + 1].set(i_offset=biases*self._dt/1e2)
def init_cells(self):
vars_to_record = self.get_vars_to_record()
if 'spikes' in vars_to_record:
self.layers[0].record([str('spikes')]) # Input layer has no 'v'
for layer in self.layers[1:]:
layer.record(vars_to_record)
# The spikes of the last layer are recorded by default because they
# contain the networks output (classification guess).
if 'spikes' not in vars_to_record:
vars_to_record.append(str('spikes'))
self.layers[-1].record(vars_to_record)
def set_biases(self, biases):
"""Set biases.
Notes
-----
This assumes no leak.
"""
if not np.any(biases):
return
v_rest = self.config.getfloat('cell', 'v_rest')
v_thresh = self.config.getfloat('cell', 'v_thresh')
cm = self.config.getfloat('cell', 'cm')
i_offset = biases * cm * (v_thresh - v_rest) / self._duration
self.layers[-1].set(i_offset=i_offset)
def get_vars_to_record(self):
"""Get variables to record during simulation.
Returns
-------
vars_to_record: list[str]
The names of variables to record during simulation.
"""
vars_to_record = []
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations'} & self._plot_keys) \
or 'spiketrains_n_b_l_t' in self._log_keys:
vars_to_record.append(str('spikes'))
if 'mem_n_b_l_t' in self._log_keys or 'v_mem' in self._plot_keys:
vars_to_record.append(str('v'))
return vars_to_record
def get_spiketrains(self, **kwargs):
j = self._spiketrains_container_counter
if self.spiketrains_n_b_l_t is None \
or j >= len(self.spiketrains_n_b_l_t):
return None
shape = self.spiketrains_n_b_l_t[j][0].shape
# Outer for-loop that calls this function starts with
# 'monitor_index' = 0, but this is reserved for the input and handled
# by `get_spiketrains_input()`.
i = len(self.layers) - 1 if kwargs[str('monitor_index')] == -1 else \
kwargs[str('monitor_index')] + 1
spiketrains_flat = self.layers[i].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_input(self):
shape = list(self.parsed_model.input_shape) + [self._num_timesteps]
spiketrains_flat = self.layers[0].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_output(self):
shape = [self.batch_size, self.num_classes, self._num_timesteps]
spiketrains_flat = self.layers[-1].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_vmem(self, **kwargs):
vs = kwargs[str('layer')].get_data().segments[-1].analogsignals
if len(vs) > 0:
return np.array([np.swapaxes(v, 0, 1) for v in vs])
def save_assembly(self, path, filename):
|
def save_connections(self, path):
"""Write parameters of a neural network to disk.
The parameters between two layers are saved in a text file.
They can then be used to connect pyNN populations e.g. with
``sim.Projection(layer1, layer2, sim.FromListConnector(filename))``,
where ``sim`` is a simulator supported by pyNN, e.g. Brian, NEURON, or
NEST.
Parameters
----------
path: str
Path to directory where connections are saved.
Return
------
Text files containing the layer connections. Each file is named
after the layer it connects to, e.g. ``layer2.txt`` if connecting
layer1 to layer2.
"""
print("Saving connections...")
# Iterate over layers to save each projection in a separate txt file.
for projection in self.connections:
filepath = os.path.join(path, projection.label.partition('→')[-1])
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
projection.save('connections', filepath)
def save_biases(self, path):
"""Write biases of a neural network to disk.
Parameters
----------
path: str
Path to directory where connections are saved.
"""
print("Saving biases...")
for layer in self.layers:
filepath = os.path.join(path, layer.label + '_biases')
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
if 'Input' in layer.label:
continue
try:
biases = layer.get('i_offset')
except KeyError:
continue
if np.isscalar(biases):
continue
np.savetxt(filepath, biases)
def load_assembly(self, path, filename):
"""Load the populations in an assembly.
Loads the populations in an assembly that was saved with the
`save_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to load model from.
filename: str
Name of file to load model from.
Returns
-------
layers: list[pyNN.Population]
List of pyNN ``Population`` objects.
"""
filepath = os.path.join(path, filename)
assert os.path.isfile(filepath), \
"Spiking neuron layers were not found at specified location."
if sys.version_info < (3,):
s = cPickle.load(open(filepath, 'rb'))
else:
s = cPickle.load(open(filepath, 'rb'), encoding='bytes')
# Iterate over populations in assembly
layers = []
for label in s['labels']:
celltype = getattr(self.sim, s[label]['celltype'])
population = self.sim.Population(s[label]['size'], celltype,
celltype.default_parameters,
structure=s[label]['structure'],
label=label)
# Set the rest of the specified variables, if any.
for variable in s['variables']:
if getattr(population, variable, None) is None:
setattr(population, variable, s[label][variable])
if label != 'InputLayer':
population.set(i_offset=s[label]['i_offset'])
layers.append(population)
return layers
def set_spiketrain_stats_input(self):
AbstractSNN.set_spiketrain_stats_input(self)
class MyProgressBar(object):
"""
A callback which draws a progress bar in the terminal.
"""
def __init__(self, interval, t_stop):
self.interval = interval
self.t_stop = t_stop
from pyNN.utility import ProgressBar
self.pb = ProgressBar(width=int(t_stop / interval), char=".")
def __call__(self, t):
self.pb(t / self.t_stop)
return t + self.interval
| """Write layers of neural network to disk.
The size, structure, labels of all the population of an assembly are
stored in a dictionary such that one can load them again using the
`load_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to save layers.
filename: str
Name of file to write layers to.
"""
filepath = os.path.join(path, filename)
if not (self.config.getboolean('output', 'overwrite') or
confirm_overwrite(filepath)):
return
print("Saving assembly...")
s = {}
labels = []
variables = ['size', 'structure', 'label']
for population in self.layers:
labels.append(population.label)
data = {}
for variable in variables:
if hasattr(population, variable):
data[variable] = getattr(population, variable)
if hasattr(population.celltype, 'describe'):
data['celltype'] = population.celltype.describe()
if population.label != 'InputLayer':
data['i_offset'] = population.get('i_offset')
s[population.label] = data
s['labels'] = labels # List of population labels describing the net.
s['variables'] = variables # List of variable names.
s['size'] = len(self.layers) # Number of populations in assembly.
cPickle.dump(s, open(filepath, 'wb'), -1) | identifier_body |
pyNN_target_sim.py | # -*- coding: utf-8 -*-
"""
Building and simulating spiking neural networks using
`pyNN <http://neuralensemble.org/docs/PyNN/>`_.
@author: rbodo
"""
import os
import sys
import time
import warnings
import numpy as np
from six.moves import cPickle
from snntoolbox.utils.utils import confirm_overwrite, is_module_installed
from snntoolbox.simulation.utils import AbstractSNN, get_shape_from_label
from snntoolbox.bin.utils import config_string_to_set_of_strings
class SNN(AbstractSNN):
"""Class to hold the compiled spiking neural network.
Represents the compiled spiking neural network, ready for testing in a
spiking simulator.
Attributes
----------
layers: list[pyNN.Population]
Each entry represents a layer, i.e. a population of neurons, in form of
pyNN ``Population`` objects.
connections: list[pyNN.Projection]
pyNN ``Projection`` objects representing the connections between
individual layers.
cellparams: dict
Neuron cell parameters determining properties of the spiking neurons in
pyNN simulators.
"""
def __init__(self, config, queue=None):
AbstractSNN.__init__(self, config, queue)
self.layers = []
self.connections = []
self.cellparams = {key: config.getfloat('cell', key) for key in
config_string_to_set_of_strings(config.get(
'restrictions', 'cellparams_pyNN'))}
if 'i_offset' in self.cellparams.keys():
print("SNN toolbox WARNING: The cell parameter 'i_offset' is "
"reserved for the biases and should not be set globally.")
self.cellparams.pop('i_offset')
self.change_padding = False
@property
def is_parallelizable(self):
return False
def add_input_layer(self, input_shape):
celltype = self.sim.SpikeSourcePoisson() if self._poisson_input \
else self.sim.SpikeSourceArray()
self.layers.append(self.sim.Population(
np.prod(input_shape[1:], dtype=np.int).item(), celltype,
label='InputLayer'))
def add_layer(self, layer):
# This implementation of ZeroPadding layers assumes symmetric single
# padding ((1, 1), (1, 1)).
# Todo: Generalize for asymmetric padding or arbitrary size.
if 'ZeroPadding' in layer.__class__.__name__:
# noinspection PyUnresolvedReferences
padding = layer.padding
if set(padding).issubset((1, (1, 1))):
self.change_padding = True
return
else:
raise NotImplementedError(
"Border_mode {} not supported.".format(padding))
# Latest Keras versions need special permutation after Flatten layers.
if 'Flatten' in layer.__class__.__name__ and \
self.config.get('input', 'model_lib') == 'keras':
self.flatten_shapes.append(
(layer.name, get_shape_from_label(self.layers[-1].label)))
return
self.layers.append(self.sim.Population(
np.prod(layer.output_shape[1:], dtype=np.int).item(),
self.sim.IF_curr_exp, self.cellparams, label=layer.name))
self.layers[-1].initialize(v=self.layers[-1].get('v_rest'))
def build_dense(self, layer):
"""
Parameters
----------
layer : keras.layers.Dense
Returns
-------
"""
if layer.activation.__name__ == 'softmax':
warnings.warn("Activation 'softmax' not implemented. Using 'relu' "
"activation instead.", RuntimeWarning)
weights, biases = layer.get_weights()
self.set_biases(np.array(biases, 'float64'))
delay = self.config.getfloat('cell', 'delay')
connections = []
if len(self.flatten_shapes) == 1:
print("Swapping data_format of Flatten layer.")
flatten_name, shape = self.flatten_shapes.pop()
if self.data_format == 'channels_last':
y_in, x_in, f_in = shape
else:
f_in, y_in, x_in = shape
for i in range(weights.shape[0]): # Input neurons
# Sweep across channel axis of feature map. Assumes that each
# consecutive input neuron lies in a different channel. This is
# the case for channels_last, but not for channels_first.
f = i % f_in
# Sweep across height of feature map. Increase y by one if all
# rows along the channel axis were seen.
y = i // (f_in * x_in)
# Sweep across width of feature map.
x = (i // f_in) % x_in
new_i = f * x_in * y_in + x_in * y + x
for j in range(weights.shape[1]): # Output neurons
connections.append((new_i, j, weights[i, j], delay))
elif len(self.flatten_shapes) > 1:
raise RuntimeWarning("Not all Flatten layers have been consumed.")
else:
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
connections.append((i, j, weights[i, j], delay))
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_convolution(self, layer):
from snntoolbox.simulation.utils import build_convolution
# If the parsed model contains a ZeroPadding layer, we need to tell the
# Conv layer about it here, because ZeroPadding layers are removed when
# building the pyNN model.
if self.change_padding:
if layer.padding == 'valid':
self.change_padding = False
layer.padding = 'ZeroPadding'
else:
raise NotImplementedError(
"Border_mode {} in combination with ZeroPadding is not "
"supported.".format(layer.padding))
delay = self.config.getfloat('cell', 'delay')
transpose_kernel = \
self.config.get('simulation', 'keras_backend') == 'tensorflow'
connections, biases = build_convolution(layer, delay, transpose_kernel)
self.set_biases(biases)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_pooling(self, layer):
from snntoolbox.simulation.utils import build_pooling
delay = self.config.getfloat('cell', 'delay')
connections = build_pooling(layer, delay)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def compile(self):
pass
def simulate(self, **kwargs):
data = kwargs[str('x_b_l')]
if self.data_format == 'channels_last' and data.ndim == 4:
data = np.moveaxis(data, 3, 1)
x_flat = np.ravel(data)
if self._poisson_input:
self.layers[0].set(rate=list(x_flat / self.rescale_fac * 1000))
elif self._is_aedat_input:
raise NotImplementedError
else:
spike_times = []
for amplitude in x_flat:
st = np.linspace(0, self._duration,
int(self._duration * amplitude))
spike_times.append(st)
self.layers[0].set(spike_times=spike_times)
if is_module_installed('pynn_object_serialisation'):
from pynn_object_serialisation.functions import intercept_simulator
current_time = time.strftime("_%H%M%S_%d%m%Y")
intercept_simulator(self.sim, "snn_toolbox_pynn_" + current_time)
self.sim.run(self._duration - self._dt,
callbacks=[MyProgressBar(self._dt, self._duration)])
print("\nCollecting results...")
output_b_l_t = self.get_recorded_vars(self.layers)
return output_b_l_t
def reset(self, sample_idx):
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
if sample_idx % mod == 0:
|
def end_sim(self):
self.sim.end()
def save(self, path, filename):
print("Saving model to {}...".format(path))
self.save_assembly(path, filename)
self.save_connections(path)
self.save_biases(path)
print("Done.\n")
def load(self, path, filename):
self.layers = self.load_assembly(path, filename)
for i in range(len(self.layers) - 1):
filepath = os.path.join(path, self.layers[i + 1].label)
assert os.path.isfile(filepath), \
"Connections were not found at specified location."
self.sim.Projection(self.layers[i], self.layers[i + 1],
self.sim.FromFileConnector(filepath))
self.layers[i + 1].set(**self.cellparams)
self.layers[i + 1].initialize(v=self.layers[i + 1].get('v_rest'))
# Biases should be already be loaded from the assembly file.
# Otherwise do this:
# filepath = os.path.join(path, self.layers[i + 1].label+'_biases')
# biases = np.loadtxt(filepath)
# self.layers[i + 1].set(i_offset=biases*self._dt/1e2)
def init_cells(self):
vars_to_record = self.get_vars_to_record()
if 'spikes' in vars_to_record:
self.layers[0].record([str('spikes')]) # Input layer has no 'v'
for layer in self.layers[1:]:
layer.record(vars_to_record)
# The spikes of the last layer are recorded by default because they
# contain the networks output (classification guess).
if 'spikes' not in vars_to_record:
vars_to_record.append(str('spikes'))
self.layers[-1].record(vars_to_record)
def set_biases(self, biases):
"""Set biases.
Notes
-----
This assumes no leak.
"""
if not np.any(biases):
return
v_rest = self.config.getfloat('cell', 'v_rest')
v_thresh = self.config.getfloat('cell', 'v_thresh')
cm = self.config.getfloat('cell', 'cm')
i_offset = biases * cm * (v_thresh - v_rest) / self._duration
self.layers[-1].set(i_offset=i_offset)
def get_vars_to_record(self):
"""Get variables to record during simulation.
Returns
-------
vars_to_record: list[str]
The names of variables to record during simulation.
"""
vars_to_record = []
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations'} & self._plot_keys) \
or 'spiketrains_n_b_l_t' in self._log_keys:
vars_to_record.append(str('spikes'))
if 'mem_n_b_l_t' in self._log_keys or 'v_mem' in self._plot_keys:
vars_to_record.append(str('v'))
return vars_to_record
def get_spiketrains(self, **kwargs):
j = self._spiketrains_container_counter
if self.spiketrains_n_b_l_t is None \
or j >= len(self.spiketrains_n_b_l_t):
return None
shape = self.spiketrains_n_b_l_t[j][0].shape
# Outer for-loop that calls this function starts with
# 'monitor_index' = 0, but this is reserved for the input and handled
# by `get_spiketrains_input()`.
i = len(self.layers) - 1 if kwargs[str('monitor_index')] == -1 else \
kwargs[str('monitor_index')] + 1
spiketrains_flat = self.layers[i].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_input(self):
shape = list(self.parsed_model.input_shape) + [self._num_timesteps]
spiketrains_flat = self.layers[0].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_output(self):
shape = [self.batch_size, self.num_classes, self._num_timesteps]
spiketrains_flat = self.layers[-1].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_vmem(self, **kwargs):
vs = kwargs[str('layer')].get_data().segments[-1].analogsignals
if len(vs) > 0:
return np.array([np.swapaxes(v, 0, 1) for v in vs])
def save_assembly(self, path, filename):
"""Write layers of neural network to disk.
The size, structure, labels of all the population of an assembly are
stored in a dictionary such that one can load them again using the
`load_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to save layers.
filename: str
Name of file to write layers to.
"""
filepath = os.path.join(path, filename)
if not (self.config.getboolean('output', 'overwrite') or
confirm_overwrite(filepath)):
return
print("Saving assembly...")
s = {}
labels = []
variables = ['size', 'structure', 'label']
for population in self.layers:
labels.append(population.label)
data = {}
for variable in variables:
if hasattr(population, variable):
data[variable] = getattr(population, variable)
if hasattr(population.celltype, 'describe'):
data['celltype'] = population.celltype.describe()
if population.label != 'InputLayer':
data['i_offset'] = population.get('i_offset')
s[population.label] = data
s['labels'] = labels # List of population labels describing the net.
s['variables'] = variables # List of variable names.
s['size'] = len(self.layers) # Number of populations in assembly.
cPickle.dump(s, open(filepath, 'wb'), -1)
def save_connections(self, path):
"""Write parameters of a neural network to disk.
The parameters between two layers are saved in a text file.
They can then be used to connect pyNN populations e.g. with
``sim.Projection(layer1, layer2, sim.FromListConnector(filename))``,
where ``sim`` is a simulator supported by pyNN, e.g. Brian, NEURON, or
NEST.
Parameters
----------
path: str
Path to directory where connections are saved.
Return
------
Text files containing the layer connections. Each file is named
after the layer it connects to, e.g. ``layer2.txt`` if connecting
layer1 to layer2.
"""
print("Saving connections...")
# Iterate over layers to save each projection in a separate txt file.
for projection in self.connections:
filepath = os.path.join(path, projection.label.partition('→')[-1])
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
projection.save('connections', filepath)
def save_biases(self, path):
"""Write biases of a neural network to disk.
Parameters
----------
path: str
Path to directory where connections are saved.
"""
print("Saving biases...")
for layer in self.layers:
filepath = os.path.join(path, layer.label + '_biases')
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
if 'Input' in layer.label:
continue
try:
biases = layer.get('i_offset')
except KeyError:
continue
if np.isscalar(biases):
continue
np.savetxt(filepath, biases)
def load_assembly(self, path, filename):
"""Load the populations in an assembly.
Loads the populations in an assembly that was saved with the
`save_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to load model from.
filename: str
Name of file to load model from.
Returns
-------
layers: list[pyNN.Population]
List of pyNN ``Population`` objects.
"""
filepath = os.path.join(path, filename)
assert os.path.isfile(filepath), \
"Spiking neuron layers were not found at specified location."
if sys.version_info < (3,):
s = cPickle.load(open(filepath, 'rb'))
else:
s = cPickle.load(open(filepath, 'rb'), encoding='bytes')
# Iterate over populations in assembly
layers = []
for label in s['labels']:
celltype = getattr(self.sim, s[label]['celltype'])
population = self.sim.Population(s[label]['size'], celltype,
celltype.default_parameters,
structure=s[label]['structure'],
label=label)
# Set the rest of the specified variables, if any.
for variable in s['variables']:
if getattr(population, variable, None) is None:
setattr(population, variable, s[label][variable])
if label != 'InputLayer':
population.set(i_offset=s[label]['i_offset'])
layers.append(population)
return layers
def set_spiketrain_stats_input(self):
AbstractSNN.set_spiketrain_stats_input(self)
class MyProgressBar(object):
"""
A callback which draws a progress bar in the terminal.
"""
def __init__(self, interval, t_stop):
self.interval = interval
self.t_stop = t_stop
from pyNN.utility import ProgressBar
self.pb = ProgressBar(width=int(t_stop / interval), char=".")
def __call__(self, t):
self.pb(t / self.t_stop)
return t + self.interval
| print("Resetting simulator...")
self.sim.reset()
print("Done.") | conditional_block |
pyNN_target_sim.py | # -*- coding: utf-8 -*-
"""
Building and simulating spiking neural networks using
`pyNN <http://neuralensemble.org/docs/PyNN/>`_.
@author: rbodo
"""
import os
import sys
import time
import warnings
import numpy as np
from six.moves import cPickle
from snntoolbox.utils.utils import confirm_overwrite, is_module_installed
from snntoolbox.simulation.utils import AbstractSNN, get_shape_from_label
from snntoolbox.bin.utils import config_string_to_set_of_strings
class SNN(AbstractSNN):
"""Class to hold the compiled spiking neural network.
Represents the compiled spiking neural network, ready for testing in a
spiking simulator.
Attributes
----------
layers: list[pyNN.Population]
Each entry represents a layer, i.e. a population of neurons, in form of
pyNN ``Population`` objects.
connections: list[pyNN.Projection]
pyNN ``Projection`` objects representing the connections between
individual layers.
cellparams: dict
Neuron cell parameters determining properties of the spiking neurons in
pyNN simulators.
"""
def __init__(self, config, queue=None):
AbstractSNN.__init__(self, config, queue)
self.layers = []
self.connections = []
self.cellparams = {key: config.getfloat('cell', key) for key in
config_string_to_set_of_strings(config.get(
'restrictions', 'cellparams_pyNN'))}
if 'i_offset' in self.cellparams.keys():
print("SNN toolbox WARNING: The cell parameter 'i_offset' is "
"reserved for the biases and should not be set globally.")
self.cellparams.pop('i_offset')
self.change_padding = False
@property
def is_parallelizable(self):
return False
def add_input_layer(self, input_shape):
celltype = self.sim.SpikeSourcePoisson() if self._poisson_input \
else self.sim.SpikeSourceArray()
self.layers.append(self.sim.Population(
np.prod(input_shape[1:], dtype=np.int).item(), celltype,
label='InputLayer'))
def add_layer(self, layer):
# This implementation of ZeroPadding layers assumes symmetric single
# padding ((1, 1), (1, 1)).
# Todo: Generalize for asymmetric padding or arbitrary size.
if 'ZeroPadding' in layer.__class__.__name__:
# noinspection PyUnresolvedReferences
padding = layer.padding
if set(padding).issubset((1, (1, 1))):
self.change_padding = True
return
else:
raise NotImplementedError(
"Border_mode {} not supported.".format(padding))
# Latest Keras versions need special permutation after Flatten layers.
if 'Flatten' in layer.__class__.__name__ and \
self.config.get('input', 'model_lib') == 'keras':
self.flatten_shapes.append(
(layer.name, get_shape_from_label(self.layers[-1].label)))
return
self.layers.append(self.sim.Population(
np.prod(layer.output_shape[1:], dtype=np.int).item(),
self.sim.IF_curr_exp, self.cellparams, label=layer.name))
self.layers[-1].initialize(v=self.layers[-1].get('v_rest'))
def build_dense(self, layer):
"""
Parameters
----------
layer : keras.layers.Dense
Returns
-------
"""
if layer.activation.__name__ == 'softmax':
warnings.warn("Activation 'softmax' not implemented. Using 'relu' "
"activation instead.", RuntimeWarning)
weights, biases = layer.get_weights()
self.set_biases(np.array(biases, 'float64'))
delay = self.config.getfloat('cell', 'delay')
connections = []
if len(self.flatten_shapes) == 1:
print("Swapping data_format of Flatten layer.")
flatten_name, shape = self.flatten_shapes.pop()
if self.data_format == 'channels_last':
y_in, x_in, f_in = shape
else:
f_in, y_in, x_in = shape
for i in range(weights.shape[0]): # Input neurons
# Sweep across channel axis of feature map. Assumes that each
# consecutive input neuron lies in a different channel. This is
# the case for channels_last, but not for channels_first.
f = i % f_in
# Sweep across height of feature map. Increase y by one if all
# rows along the channel axis were seen.
y = i // (f_in * x_in)
# Sweep across width of feature map.
x = (i // f_in) % x_in
new_i = f * x_in * y_in + x_in * y + x
for j in range(weights.shape[1]): # Output neurons
connections.append((new_i, j, weights[i, j], delay))
elif len(self.flatten_shapes) > 1:
raise RuntimeWarning("Not all Flatten layers have been consumed.")
else:
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
connections.append((i, j, weights[i, j], delay))
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_convolution(self, layer):
from snntoolbox.simulation.utils import build_convolution
# If the parsed model contains a ZeroPadding layer, we need to tell the
# Conv layer about it here, because ZeroPadding layers are removed when
# building the pyNN model.
if self.change_padding:
if layer.padding == 'valid':
self.change_padding = False
layer.padding = 'ZeroPadding'
else:
raise NotImplementedError(
"Border_mode {} in combination with ZeroPadding is not "
"supported.".format(layer.padding))
delay = self.config.getfloat('cell', 'delay')
transpose_kernel = \
self.config.get('simulation', 'keras_backend') == 'tensorflow'
connections, biases = build_convolution(layer, delay, transpose_kernel)
self.set_biases(biases)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def build_pooling(self, layer):
from snntoolbox.simulation.utils import build_pooling
delay = self.config.getfloat('cell', 'delay')
connections = build_pooling(layer, delay)
if self.config.getboolean('tools', 'simulate'):
self.connections.append(self.sim.Projection(
self.layers[-2], self.layers[-1],
self.sim.FromListConnector(connections, ['weight', 'delay'])))
def compile(self):
pass
def simulate(self, **kwargs):
data = kwargs[str('x_b_l')]
if self.data_format == 'channels_last' and data.ndim == 4:
data = np.moveaxis(data, 3, 1)
x_flat = np.ravel(data)
if self._poisson_input:
self.layers[0].set(rate=list(x_flat / self.rescale_fac * 1000))
elif self._is_aedat_input:
raise NotImplementedError
else:
spike_times = []
for amplitude in x_flat:
st = np.linspace(0, self._duration,
int(self._duration * amplitude))
spike_times.append(st)
self.layers[0].set(spike_times=spike_times)
if is_module_installed('pynn_object_serialisation'):
from pynn_object_serialisation.functions import intercept_simulator
current_time = time.strftime("_%H%M%S_%d%m%Y")
intercept_simulator(self.sim, "snn_toolbox_pynn_" + current_time)
self.sim.run(self._duration - self._dt,
callbacks=[MyProgressBar(self._dt, self._duration)])
print("\nCollecting results...")
output_b_l_t = self.get_recorded_vars(self.layers)
return output_b_l_t
def reset(self, sample_idx):
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
if sample_idx % mod == 0:
print("Resetting simulator...")
self.sim.reset()
print("Done.")
def end_sim(self):
self.sim.end()
def save(self, path, filename):
print("Saving model to {}...".format(path))
self.save_assembly(path, filename)
self.save_connections(path)
self.save_biases(path)
print("Done.\n")
def load(self, path, filename):
self.layers = self.load_assembly(path, filename)
for i in range(len(self.layers) - 1):
filepath = os.path.join(path, self.layers[i + 1].label)
assert os.path.isfile(filepath), \
"Connections were not found at specified location."
self.sim.Projection(self.layers[i], self.layers[i + 1],
self.sim.FromFileConnector(filepath))
self.layers[i + 1].set(**self.cellparams)
self.layers[i + 1].initialize(v=self.layers[i + 1].get('v_rest'))
# Biases should be already be loaded from the assembly file.
# Otherwise do this:
# filepath = os.path.join(path, self.layers[i + 1].label+'_biases')
# biases = np.loadtxt(filepath)
# self.layers[i + 1].set(i_offset=biases*self._dt/1e2)
def init_cells(self):
vars_to_record = self.get_vars_to_record()
if 'spikes' in vars_to_record:
self.layers[0].record([str('spikes')]) # Input layer has no 'v'
for layer in self.layers[1:]:
layer.record(vars_to_record)
# The spikes of the last layer are recorded by default because they
# contain the networks output (classification guess).
if 'spikes' not in vars_to_record:
vars_to_record.append(str('spikes'))
self.layers[-1].record(vars_to_record)
def set_biases(self, biases):
"""Set biases.
Notes
-----
This assumes no leak.
"""
if not np.any(biases):
return
v_rest = self.config.getfloat('cell', 'v_rest')
v_thresh = self.config.getfloat('cell', 'v_thresh')
cm = self.config.getfloat('cell', 'cm')
i_offset = biases * cm * (v_thresh - v_rest) / self._duration
self.layers[-1].set(i_offset=i_offset)
def get_vars_to_record(self):
"""Get variables to record during simulation.
Returns
-------
vars_to_record: list[str]
The names of variables to record during simulation.
"""
vars_to_record = []
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations'} & self._plot_keys) \
or 'spiketrains_n_b_l_t' in self._log_keys:
vars_to_record.append(str('spikes'))
if 'mem_n_b_l_t' in self._log_keys or 'v_mem' in self._plot_keys:
vars_to_record.append(str('v'))
return vars_to_record
def get_spiketrains(self, **kwargs):
j = self._spiketrains_container_counter
if self.spiketrains_n_b_l_t is None \
or j >= len(self.spiketrains_n_b_l_t):
return None
shape = self.spiketrains_n_b_l_t[j][0].shape
# Outer for-loop that calls this function starts with
# 'monitor_index' = 0, but this is reserved for the input and handled
# by `get_spiketrains_input()`.
i = len(self.layers) - 1 if kwargs[str('monitor_index')] == -1 else \
kwargs[str('monitor_index')] + 1
spiketrains_flat = self.layers[i].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_input(self):
shape = list(self.parsed_model.input_shape) + [self._num_timesteps]
spiketrains_flat = self.layers[0].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_spiketrains_output(self):
shape = [self.batch_size, self.num_classes, self._num_timesteps]
spiketrains_flat = self.layers[-1].get_data().segments[-1].spiketrains
spiketrains_b_l_t = self.reshape_flattened_spiketrains(
spiketrains_flat, shape)
return spiketrains_b_l_t
def get_vmem(self, **kwargs):
vs = kwargs[str('layer')].get_data().segments[-1].analogsignals
if len(vs) > 0:
return np.array([np.swapaxes(v, 0, 1) for v in vs])
def save_assembly(self, path, filename):
"""Write layers of neural network to disk.
The size, structure, labels of all the population of an assembly are
stored in a dictionary such that one can load them again using the
`load_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to save layers.
filename: str
Name of file to write layers to.
"""
filepath = os.path.join(path, filename)
if not (self.config.getboolean('output', 'overwrite') or
confirm_overwrite(filepath)):
return
print("Saving assembly...")
s = {}
labels = []
variables = ['size', 'structure', 'label']
for population in self.layers:
labels.append(population.label)
data = {}
for variable in variables:
if hasattr(population, variable):
data[variable] = getattr(population, variable)
if hasattr(population.celltype, 'describe'):
data['celltype'] = population.celltype.describe()
if population.label != 'InputLayer':
data['i_offset'] = population.get('i_offset')
s[population.label] = data
s['labels'] = labels # List of population labels describing the net.
s['variables'] = variables # List of variable names.
s['size'] = len(self.layers) # Number of populations in assembly.
cPickle.dump(s, open(filepath, 'wb'), -1)
def save_connections(self, path):
"""Write parameters of a neural network to disk.
The parameters between two layers are saved in a text file.
They can then be used to connect pyNN populations e.g. with
``sim.Projection(layer1, layer2, sim.FromListConnector(filename))``,
where ``sim`` is a simulator supported by pyNN, e.g. Brian, NEURON, or | ----------
path: str
Path to directory where connections are saved.
Return
------
Text files containing the layer connections. Each file is named
after the layer it connects to, e.g. ``layer2.txt`` if connecting
layer1 to layer2.
"""
print("Saving connections...")
# Iterate over layers to save each projection in a separate txt file.
for projection in self.connections:
filepath = os.path.join(path, projection.label.partition('→')[-1])
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
projection.save('connections', filepath)
def save_biases(self, path):
"""Write biases of a neural network to disk.
Parameters
----------
path: str
Path to directory where connections are saved.
"""
print("Saving biases...")
for layer in self.layers:
filepath = os.path.join(path, layer.label + '_biases')
if self.config.getboolean('output', 'overwrite') or \
confirm_overwrite(filepath):
if 'Input' in layer.label:
continue
try:
biases = layer.get('i_offset')
except KeyError:
continue
if np.isscalar(biases):
continue
np.savetxt(filepath, biases)
def load_assembly(self, path, filename):
"""Load the populations in an assembly.
Loads the populations in an assembly that was saved with the
`save_assembly` function.
The term "assembly" refers to pyNN internal nomenclature, where
``Assembly`` is a collection of layers (``Populations``), which in turn
consist of a number of neurons (``cells``).
Parameters
----------
path: str
Path to directory where to load model from.
filename: str
Name of file to load model from.
Returns
-------
layers: list[pyNN.Population]
List of pyNN ``Population`` objects.
"""
filepath = os.path.join(path, filename)
assert os.path.isfile(filepath), \
"Spiking neuron layers were not found at specified location."
if sys.version_info < (3,):
s = cPickle.load(open(filepath, 'rb'))
else:
s = cPickle.load(open(filepath, 'rb'), encoding='bytes')
# Iterate over populations in assembly
layers = []
for label in s['labels']:
celltype = getattr(self.sim, s[label]['celltype'])
population = self.sim.Population(s[label]['size'], celltype,
celltype.default_parameters,
structure=s[label]['structure'],
label=label)
# Set the rest of the specified variables, if any.
for variable in s['variables']:
if getattr(population, variable, None) is None:
setattr(population, variable, s[label][variable])
if label != 'InputLayer':
population.set(i_offset=s[label]['i_offset'])
layers.append(population)
return layers
def set_spiketrain_stats_input(self):
AbstractSNN.set_spiketrain_stats_input(self)
class MyProgressBar(object):
"""
A callback which draws a progress bar in the terminal.
"""
def __init__(self, interval, t_stop):
self.interval = interval
self.t_stop = t_stop
from pyNN.utility import ProgressBar
self.pb = ProgressBar(width=int(t_stop / interval), char=".")
def __call__(self, t):
self.pb(t / self.t_stop)
return t + self.interval | NEST.
Parameters | random_line_split |
models.py | # DeerTrees (Django App)
# By Natasha L.
# www.lupinia.net | github.com/lupinia
#
# =================
# Models
# =================
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.text import slugify
from datetime import timedelta
from mptt.models import MPTTModel, TreeForeignKey
from awi_utils.utils import format_html, summarize
from awi_access.models import access_control
def viewtype_options():
blocks_map = settings.DEERTREES_BLOCK_MAP
viewtypes = []
for map_name, map in blocks_map.iteritems():
if map.get('meta',{}).get('option_name',False) and map.get('meta',{}).get('selectable',True):
viewtypes.append((map_name, map.get('meta',{}).get('option_name',map_name),))
return viewtypes
class category(MPTTModel, access_control):
CONTENT_SUMMARY_CHOICES = (
('misc', 'Miscellaneous'),
('image', 'Images/Photos'),
('page', 'Writing'),
('link', 'External Links'),
)
title = models.CharField(max_length=60)
slug = models.SlugField()
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
summary = models.CharField(max_length=255, null=True, blank=True)
desc = models.TextField(null=True, blank=True, verbose_name='description body text')
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this category is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this category in sitemap views.')
trash = models.BooleanField(default=False, db_index=True, verbose_name='recycle bin', help_text='System field: Indicates whether this category is the "recycle bin" for deleted items.')
background_tag = models.ForeignKey('sunset.background_tag', null=True, blank=True, on_delete=models.SET_NULL, help_text='Set this to indicate the preferred background image themes for this category.')
icon = models.ForeignKey('sunset.image_asset', null=True, blank=True, on_delete=models.SET_NULL, help_text='System field: Image asset used as a thumbnail for this category.')
icon_manual = models.BooleanField(default=False, db_index=True, help_text='System field: Indicates whether the Icon field was set manually; if so, it will not be replaced by the automatic thumbnail assignment script.')
content_summary = models.CharField(max_length=20, default='misc', choices=CONTENT_SUMMARY_CHOICES, help_text='System field: Stores the main content type for this category, used to display an icon when no image asset is selected. Will be set by the automatic thumbnail assignment script.')
always_fresh = models.BooleanField(default=False, blank=True, help_text='If checked, the "old content" note will not be added to older content in this category. Useful for things like policy directories.')
cached_url = models.CharField(max_length=255, null=True, blank=True, unique=True, help_text='System field: Full unique slug for this category, including all parents.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('category', kwargs={'cached_url':self.cached_url,})
@property
def content_summary_choices_simplified(self):
choices_simplified = []
for choice in self.CONTENT_SUMMARY_CHOICES:
choices_simplified.append(choice[0])
return choices_simplified
def set_content_summary(self, summary='misc'):
if summary != self.content_summary:
choices_simplified = self.content_summary_choices_simplified
if summary in choices_simplified:
self.content_summary = summary
self.save()
else:
if self.content_summary != 'misc':
self.content_summary = 'misc'
self.save()
return self.content_summary
@property
def body_html(self):
if self.desc:
return format_html(self.desc)
else:
return format_html(self.summary)
@property
def icon_url(self):
if self.icon:
return self.icon.get_url()
elif self.mature:
return "%simages/icons/mature128.png" % settings.STATIC_URL
else:
return "%simages/icons/default-category-%s-128.png" % (settings.STATIC_URL, self.content_summary)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, summary=self.summary, length=length, prefer_long=True)
else:
return summarize(body=self.desc, summary=self.summary, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
def save(self, *args, **kwargs):
if self.parent:
self.cached_url = '%s/%s' % (self.parent.cached_url, self.slug)
else:
self.cached_url = self.slug
super(category, self).save(*args, **kwargs)
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(category, self).can_edit(request, perm_check='deertrees.change_category')
return (False,'')
class MPTTMeta:
order_insertion_by = ['title']
class Meta:
verbose_name_plural = 'categories'
class tag(models.Model):
title = models.CharField(max_length=200,null=True,blank=True)
slug = models.SlugField(max_length=200,unique=True)
desc = models.TextField(null=True,blank=True)
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this tag is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this tag in sitemap views.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
@property
def display_title(self):
if self.title:
return self.title
else:
return self.slug
def __unicode__(self):
return self.display_title
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.slug,})
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
return (request.user.has_perm('deertrees.change_tag'), 'access_perms')
return (False,'')
@property
def synonym_list(self):
sluglist = []
if self.title:
sluglist.append(self.slug)
synonyms = self.synonyms.all().values_list('slug', flat=True)
if synonyms:
sluglist += list(synonyms)
return sluglist
@property
def body_html(self):
return format_html(self.desc)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True, fallback='')
else:
return summarize(body=self.desc, length=length, fallback='')
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
ordering = ['slug',]
class tag_synonym(models.Model):
parent = models.ForeignKey(tag, on_delete=models.CASCADE, related_name='synonyms')
slug = models.SlugField(max_length=200,unique=True)
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.parent.slug,})
def __unicode__(self):
return self.slug
class Meta:
ordering = ['slug',]
class external_link_type(models.Model):
name = models.CharField(max_length=200, verbose_name='site name')
label = models.CharField(max_length=200, verbose_name='link label')
icon = models.ImageField(upload_to='linkicons_ext', null=True, blank=True)
url_format = models.CharField(max_length=250, blank=True, null=True, verbose_name='URL format', help_text='Use <id> to create a placeholder for remote_id on links of this type.')
featured = models.BooleanField(db_index=True, blank=True, default=False)
public = models.BooleanField(db_index=True, blank=True, default=True)
notes = models.TextField(null=True, blank=True)
sites = models.ManyToManyField(Site, db_index=True, help_text='Sites/domains on which this item will appear.')
def __unicode__(self):
return self.name
@property
def icon_url(self):
if self.icon:
return "%s%s" % (settings.MEDIA_URL,self.icon.name)
else:
return "%simages/icons/default-link-32.png" % settings.STATIC_URL
class Meta:
verbose_name = 'external platform'
class external_link(models.Model):
link_type = models.ForeignKey(external_link_type, on_delete=models.CASCADE, related_name='links', verbose_name='platform')
parent = models.ForeignKey('leaf', on_delete=models.CASCADE, related_name='external_links')
full_url = models.URLField(max_length=500, blank=True, null=True, verbose_name='URL')
remote_id = models.CharField(max_length=250, blank=True, null=True, verbose_name='remote object ID')
label_override = models.CharField(max_length=250, blank=True, null=True, verbose_name='label override')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
published = models.BooleanField(db_index=True, blank=True, default=False)
automated = models.BooleanField(db_index=True, blank=True, default=False)
notes = models.TextField(null=True, blank=True)
def __unicode__(self):
return '%s: %s' % (self.link_type.name, unicode(self.parent))
def get_absolute_url(self):
return self.url
@property
def url(self):
if self.full_url:
return self.full_url
elif self.remote_id and self.link_type.url_format:
return self.link_type.url_format.replace('<id>', self.remote_id)
else:
return ''
@property
def label(self):
if self.label_override:
return self.label_override
else:
return self.link_type.label
def clean(self):
if not self.full_url and not self.remote_id:
raise ValidationError('Either a full URL or a remote ID are required.')
if not self.link_type.url_format and not self.full_url:
raise ValidationError('A full URL is required for this link type')
return super(external_link,self).clean()
class Meta:
verbose_name = 'external platform link'
ordering = ['-link_type__featured']
# This model has been modified for the Awi website, and requires the Awi Access app
# This is a single categorized node; everything else that belongs to a category should extend this class
class leaf(access_control):
TIMEDISP_OPTIONS = (('post','Posted'),('mod','Modified'))
author_override = models.CharField(max_length=100, null=True, blank=True, help_text="If this was written by a guest author, enter their name here. Enter 'none' to hide the author info from display (only use this for things like system directories and site policies where authorship is irrelevant).")
cat = models.ForeignKey(category, null=True, blank=True, on_delete=models.PROTECT, verbose_name='category', related_name='leaves')
tags = models.ManyToManyField(tag, blank=True, related_name='leaves')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created', help_text='Set this to a future date to schedule it.')
timedisp = models.CharField(max_length=10, choices=TIMEDISP_OPTIONS, default='post', verbose_name='preferred timestamp', help_text='Determines which timestamp (modified, or created) will be publicly displayed. The other option will only be visible to users who can edit this item.')
type = models.CharField(max_length=20, default='unknown', db_index=True, help_text='System field: Indicates which model this leaf is.')
def __unicode__(self):
return '%s: %d' % (self.type.capitalize(), self.pk)
# An extension of get_absolute_url() to include the domain
def get_complete_url(self, request=None):
if request:
domain = request.get_host()
else:
primary_site = self.sites.all().order_by('pk').first()
if not primary_site:
primary_site = Site.objects.get(pk=settings.SITE_ID)
domain = primary_site.domain
if 'www' not in domain:
domain = 'www.%s' % domain
return 'https://%s%s' % (domain, self.get_absolute_url())
def save(self, *args, **kwargs):
if not self.pk:
self.type = self.__class__.__name__
super(leaf, self).save(*args, **kwargs)
def scheduled(self):
if self.published and self.timestamp_post > timezone.now():
return True
else:
return False
def can_view(self, request=False):
if not request:
return (False,'access_norequest')
public_check = self.is_public()
if public_check[0]:
return (True, '')
else:
canview = super(leaf, self).can_view(request)
if canview[0] and self.scheduled() and (self.owner != request.user or not request.user.has_perm('deertrees.change_leaf')):
canview = (False,'access_404') # If it's scheduled, and we don't have elevated privileges, it doesn't exist.
return canview
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(leaf, self).can_edit(request, perm_check='deertrees.change_leaf')
return (False,'')
def is_public(self):
public, restrictions = super(leaf, self).is_public()
if self.scheduled():
public = False
restrictions.append('Scheduled future postdate')
return (public, restrictions)
# Helper method for extracting a reason for non-public status that's easier to work with programmaticly
@property
def restriction(self):
cur_restriction = super(leaf, self).restriction
if self.scheduled() and not self.is_public()[0]:
return 'scheduled'
else:
return cur_restriction
def tag_item(self, taglist):
return_data = {'skipped':[], 'added':[], 'created':[]}
if ', ' in taglist:
new_tags = taglist.split(', ')
elif ',' in taglist:
new_tags = taglist.split(',')
else:
new_tags = [taglist,]
old_tags = {}
cur_tags = self.tags.all()
if cur_tags:
for old_tag in cur_tags:
old_tags[old_tag.slug] = old_tag
new_tag_objs = []
for new_tag in new_tags:
if old_tags.get(new_tag, False):
return_data['skipped'].append(new_tag)
else:
new_tag = slugify(new_tag)
new_tag_obj = tag.objects.get_or_create(slug=new_tag)
new_tag_objs.append(new_tag_obj[0])
if new_tag_obj[1]:
return_data['created'].append(new_tag)
else:
return_data['added'].append(new_tag)
if new_tag_objs:
self.tags.add(*new_tag_objs)
return return_data
def display_times(self):
return_times = [{},{}]
if self.timedisp == 'post':
return_mod = 1
return_post = 0
else:
return_mod = 0
return_post = 1
return_times[return_post]['timestamp'] = self.timestamp_post
return_times[return_mod]['timestamp'] = self.timestamp_mod
return_times[return_post]['label'] = 'Posted'
return_times[return_mod]['label'] = 'Updated'
return return_times
def get_links(self, request=False):
link_query = self.external_links.select_related('link_type')
if request:
if self.can_edit(request)[0]:
return link_query.all()
elif request.user.is_authenticated():
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True)
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True, link_type__public=True)
@property
def timestamp(self):
if self.timedisp == 'post':
return self.timestamp_post
else:
return self.timestamp_mod
@property
def rss_description(self):
return 'No Description'
@property
def is_recent(self):
if self.timestamp > (timezone.now() - timedelta(days=30*6)):
return True
else:
return False
@property
def admin_owned(self):
if self.owner.pk == settings.SITE_OWNER_ACCOUNT_ID and not self.author_override:
|
else:
return False
@property
def is_old(self):
if self.cat.always_fresh or not self.admin_owned:
return False
else:
if self.timestamp_mod < (timezone.now() - timedelta(days=365*10)):
return True
elif (self.timestamp_post < (timezone.now() - timedelta(days=365*10))) and (self.timestamp_mod < (timezone.now() - timedelta(days=365*2))):
return True
else:
return False
@property
def author(self):
if self.author_override:
if self.author_override.lower() == 'none':
return ''
else:
return self.author_override
else:
if self.owner.get_full_name():
return self.owner.get_full_name()
else:
return self.owner.get_username()
@property
def tags_list(self):
return self.tags.all().values_list('slug', flat=True)
# Create a leaf that links to something else that isn't part of this category system.
# Handy for things like third-party apps, or self-contained apps with their own organizational structure.
class special_feature(leaf):
url = models.CharField(max_length=60, unique=True, verbose_name='URL', help_text='Similar to a Slug field, but can accept any character, to make it easier to link to non-DeerTrees URLs.')
url_reverse = models.CharField(max_length=250, null=True, blank=True, help_text='Enter the keyword used by Django to look up this special feature in urls.py.')
title = models.CharField(max_length=60)
desc = models.CharField(max_length=255, null=True, blank=True, verbose_name='Description')
def get_absolute_url(self):
return '%s%s' % (reverse('category', kwargs={'cached_url':self.cat.cached_url,}), self.url)
def __unicode__(self):
return self.title
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True)
else:
return summarize(body=self.desc, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
verbose_name = 'special feature'
| return True | conditional_block |
models.py | # DeerTrees (Django App)
# By Natasha L.
# www.lupinia.net | github.com/lupinia
#
# =================
# Models
# =================
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.text import slugify
from datetime import timedelta
from mptt.models import MPTTModel, TreeForeignKey
from awi_utils.utils import format_html, summarize
from awi_access.models import access_control
def viewtype_options():
blocks_map = settings.DEERTREES_BLOCK_MAP
viewtypes = []
for map_name, map in blocks_map.iteritems():
if map.get('meta',{}).get('option_name',False) and map.get('meta',{}).get('selectable',True):
viewtypes.append((map_name, map.get('meta',{}).get('option_name',map_name),))
return viewtypes
class category(MPTTModel, access_control):
CONTENT_SUMMARY_CHOICES = (
('misc', 'Miscellaneous'),
('image', 'Images/Photos'),
('page', 'Writing'),
('link', 'External Links'),
)
title = models.CharField(max_length=60)
slug = models.SlugField()
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
summary = models.CharField(max_length=255, null=True, blank=True)
desc = models.TextField(null=True, blank=True, verbose_name='description body text')
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this category is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this category in sitemap views.')
trash = models.BooleanField(default=False, db_index=True, verbose_name='recycle bin', help_text='System field: Indicates whether this category is the "recycle bin" for deleted items.')
background_tag = models.ForeignKey('sunset.background_tag', null=True, blank=True, on_delete=models.SET_NULL, help_text='Set this to indicate the preferred background image themes for this category.')
icon = models.ForeignKey('sunset.image_asset', null=True, blank=True, on_delete=models.SET_NULL, help_text='System field: Image asset used as a thumbnail for this category.')
icon_manual = models.BooleanField(default=False, db_index=True, help_text='System field: Indicates whether the Icon field was set manually; if so, it will not be replaced by the automatic thumbnail assignment script.')
content_summary = models.CharField(max_length=20, default='misc', choices=CONTENT_SUMMARY_CHOICES, help_text='System field: Stores the main content type for this category, used to display an icon when no image asset is selected. Will be set by the automatic thumbnail assignment script.')
always_fresh = models.BooleanField(default=False, blank=True, help_text='If checked, the "old content" note will not be added to older content in this category. Useful for things like policy directories.')
cached_url = models.CharField(max_length=255, null=True, blank=True, unique=True, help_text='System field: Full unique slug for this category, including all parents.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('category', kwargs={'cached_url':self.cached_url,})
@property
def content_summary_choices_simplified(self):
choices_simplified = []
for choice in self.CONTENT_SUMMARY_CHOICES:
choices_simplified.append(choice[0])
return choices_simplified
def set_content_summary(self, summary='misc'):
if summary != self.content_summary:
choices_simplified = self.content_summary_choices_simplified
if summary in choices_simplified:
self.content_summary = summary
self.save()
else:
if self.content_summary != 'misc':
self.content_summary = 'misc'
self.save()
return self.content_summary
@property
def body_html(self):
if self.desc:
return format_html(self.desc)
else:
return format_html(self.summary)
@property
def icon_url(self):
if self.icon:
return self.icon.get_url()
elif self.mature:
return "%simages/icons/mature128.png" % settings.STATIC_URL
else:
return "%simages/icons/default-category-%s-128.png" % (settings.STATIC_URL, self.content_summary)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, summary=self.summary, length=length, prefer_long=True)
else:
return summarize(body=self.desc, summary=self.summary, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self): |
def save(self, *args, **kwargs):
if self.parent:
self.cached_url = '%s/%s' % (self.parent.cached_url, self.slug)
else:
self.cached_url = self.slug
super(category, self).save(*args, **kwargs)
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(category, self).can_edit(request, perm_check='deertrees.change_category')
return (False,'')
class MPTTMeta:
order_insertion_by = ['title']
class Meta:
verbose_name_plural = 'categories'
class tag(models.Model):
title = models.CharField(max_length=200,null=True,blank=True)
slug = models.SlugField(max_length=200,unique=True)
desc = models.TextField(null=True,blank=True)
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this tag is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this tag in sitemap views.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
@property
def display_title(self):
if self.title:
return self.title
else:
return self.slug
def __unicode__(self):
return self.display_title
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.slug,})
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
return (request.user.has_perm('deertrees.change_tag'), 'access_perms')
return (False,'')
@property
def synonym_list(self):
sluglist = []
if self.title:
sluglist.append(self.slug)
synonyms = self.synonyms.all().values_list('slug', flat=True)
if synonyms:
sluglist += list(synonyms)
return sluglist
@property
def body_html(self):
return format_html(self.desc)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True, fallback='')
else:
return summarize(body=self.desc, length=length, fallback='')
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
ordering = ['slug',]
class tag_synonym(models.Model):
parent = models.ForeignKey(tag, on_delete=models.CASCADE, related_name='synonyms')
slug = models.SlugField(max_length=200,unique=True)
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.parent.slug,})
def __unicode__(self):
return self.slug
class Meta:
ordering = ['slug',]
class external_link_type(models.Model):
name = models.CharField(max_length=200, verbose_name='site name')
label = models.CharField(max_length=200, verbose_name='link label')
icon = models.ImageField(upload_to='linkicons_ext', null=True, blank=True)
url_format = models.CharField(max_length=250, blank=True, null=True, verbose_name='URL format', help_text='Use <id> to create a placeholder for remote_id on links of this type.')
featured = models.BooleanField(db_index=True, blank=True, default=False)
public = models.BooleanField(db_index=True, blank=True, default=True)
notes = models.TextField(null=True, blank=True)
sites = models.ManyToManyField(Site, db_index=True, help_text='Sites/domains on which this item will appear.')
def __unicode__(self):
return self.name
@property
def icon_url(self):
if self.icon:
return "%s%s" % (settings.MEDIA_URL,self.icon.name)
else:
return "%simages/icons/default-link-32.png" % settings.STATIC_URL
class Meta:
verbose_name = 'external platform'
class external_link(models.Model):
link_type = models.ForeignKey(external_link_type, on_delete=models.CASCADE, related_name='links', verbose_name='platform')
parent = models.ForeignKey('leaf', on_delete=models.CASCADE, related_name='external_links')
full_url = models.URLField(max_length=500, blank=True, null=True, verbose_name='URL')
remote_id = models.CharField(max_length=250, blank=True, null=True, verbose_name='remote object ID')
label_override = models.CharField(max_length=250, blank=True, null=True, verbose_name='label override')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
published = models.BooleanField(db_index=True, blank=True, default=False)
automated = models.BooleanField(db_index=True, blank=True, default=False)
notes = models.TextField(null=True, blank=True)
def __unicode__(self):
return '%s: %s' % (self.link_type.name, unicode(self.parent))
def get_absolute_url(self):
return self.url
@property
def url(self):
if self.full_url:
return self.full_url
elif self.remote_id and self.link_type.url_format:
return self.link_type.url_format.replace('<id>', self.remote_id)
else:
return ''
@property
def label(self):
if self.label_override:
return self.label_override
else:
return self.link_type.label
def clean(self):
if not self.full_url and not self.remote_id:
raise ValidationError('Either a full URL or a remote ID are required.')
if not self.link_type.url_format and not self.full_url:
raise ValidationError('A full URL is required for this link type')
return super(external_link,self).clean()
class Meta:
verbose_name = 'external platform link'
ordering = ['-link_type__featured']
# This model has been modified for the Awi website, and requires the Awi Access app
# This is a single categorized node; everything else that belongs to a category should extend this class
class leaf(access_control):
TIMEDISP_OPTIONS = (('post','Posted'),('mod','Modified'))
author_override = models.CharField(max_length=100, null=True, blank=True, help_text="If this was written by a guest author, enter their name here. Enter 'none' to hide the author info from display (only use this for things like system directories and site policies where authorship is irrelevant).")
cat = models.ForeignKey(category, null=True, blank=True, on_delete=models.PROTECT, verbose_name='category', related_name='leaves')
tags = models.ManyToManyField(tag, blank=True, related_name='leaves')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created', help_text='Set this to a future date to schedule it.')
timedisp = models.CharField(max_length=10, choices=TIMEDISP_OPTIONS, default='post', verbose_name='preferred timestamp', help_text='Determines which timestamp (modified, or created) will be publicly displayed. The other option will only be visible to users who can edit this item.')
type = models.CharField(max_length=20, default='unknown', db_index=True, help_text='System field: Indicates which model this leaf is.')
def __unicode__(self):
return '%s: %d' % (self.type.capitalize(), self.pk)
# An extension of get_absolute_url() to include the domain
def get_complete_url(self, request=None):
if request:
domain = request.get_host()
else:
primary_site = self.sites.all().order_by('pk').first()
if not primary_site:
primary_site = Site.objects.get(pk=settings.SITE_ID)
domain = primary_site.domain
if 'www' not in domain:
domain = 'www.%s' % domain
return 'https://%s%s' % (domain, self.get_absolute_url())
def save(self, *args, **kwargs):
if not self.pk:
self.type = self.__class__.__name__
super(leaf, self).save(*args, **kwargs)
def scheduled(self):
if self.published and self.timestamp_post > timezone.now():
return True
else:
return False
def can_view(self, request=False):
if not request:
return (False,'access_norequest')
public_check = self.is_public()
if public_check[0]:
return (True, '')
else:
canview = super(leaf, self).can_view(request)
if canview[0] and self.scheduled() and (self.owner != request.user or not request.user.has_perm('deertrees.change_leaf')):
canview = (False,'access_404') # If it's scheduled, and we don't have elevated privileges, it doesn't exist.
return canview
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(leaf, self).can_edit(request, perm_check='deertrees.change_leaf')
return (False,'')
def is_public(self):
public, restrictions = super(leaf, self).is_public()
if self.scheduled():
public = False
restrictions.append('Scheduled future postdate')
return (public, restrictions)
# Helper method for extracting a reason for non-public status that's easier to work with programmaticly
@property
def restriction(self):
cur_restriction = super(leaf, self).restriction
if self.scheduled() and not self.is_public()[0]:
return 'scheduled'
else:
return cur_restriction
def tag_item(self, taglist):
return_data = {'skipped':[], 'added':[], 'created':[]}
if ', ' in taglist:
new_tags = taglist.split(', ')
elif ',' in taglist:
new_tags = taglist.split(',')
else:
new_tags = [taglist,]
old_tags = {}
cur_tags = self.tags.all()
if cur_tags:
for old_tag in cur_tags:
old_tags[old_tag.slug] = old_tag
new_tag_objs = []
for new_tag in new_tags:
if old_tags.get(new_tag, False):
return_data['skipped'].append(new_tag)
else:
new_tag = slugify(new_tag)
new_tag_obj = tag.objects.get_or_create(slug=new_tag)
new_tag_objs.append(new_tag_obj[0])
if new_tag_obj[1]:
return_data['created'].append(new_tag)
else:
return_data['added'].append(new_tag)
if new_tag_objs:
self.tags.add(*new_tag_objs)
return return_data
def display_times(self):
return_times = [{},{}]
if self.timedisp == 'post':
return_mod = 1
return_post = 0
else:
return_mod = 0
return_post = 1
return_times[return_post]['timestamp'] = self.timestamp_post
return_times[return_mod]['timestamp'] = self.timestamp_mod
return_times[return_post]['label'] = 'Posted'
return_times[return_mod]['label'] = 'Updated'
return return_times
def get_links(self, request=False):
link_query = self.external_links.select_related('link_type')
if request:
if self.can_edit(request)[0]:
return link_query.all()
elif request.user.is_authenticated():
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True)
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True, link_type__public=True)
@property
def timestamp(self):
if self.timedisp == 'post':
return self.timestamp_post
else:
return self.timestamp_mod
@property
def rss_description(self):
return 'No Description'
@property
def is_recent(self):
if self.timestamp > (timezone.now() - timedelta(days=30*6)):
return True
else:
return False
@property
def admin_owned(self):
if self.owner.pk == settings.SITE_OWNER_ACCOUNT_ID and not self.author_override:
return True
else:
return False
@property
def is_old(self):
if self.cat.always_fresh or not self.admin_owned:
return False
else:
if self.timestamp_mod < (timezone.now() - timedelta(days=365*10)):
return True
elif (self.timestamp_post < (timezone.now() - timedelta(days=365*10))) and (self.timestamp_mod < (timezone.now() - timedelta(days=365*2))):
return True
else:
return False
@property
def author(self):
if self.author_override:
if self.author_override.lower() == 'none':
return ''
else:
return self.author_override
else:
if self.owner.get_full_name():
return self.owner.get_full_name()
else:
return self.owner.get_username()
@property
def tags_list(self):
return self.tags.all().values_list('slug', flat=True)
# Create a leaf that links to something else that isn't part of this category system.
# Handy for things like third-party apps, or self-contained apps with their own organizational structure.
class special_feature(leaf):
url = models.CharField(max_length=60, unique=True, verbose_name='URL', help_text='Similar to a Slug field, but can accept any character, to make it easier to link to non-DeerTrees URLs.')
url_reverse = models.CharField(max_length=250, null=True, blank=True, help_text='Enter the keyword used by Django to look up this special feature in urls.py.')
title = models.CharField(max_length=60)
desc = models.CharField(max_length=255, null=True, blank=True, verbose_name='Description')
def get_absolute_url(self):
return '%s%s' % (reverse('category', kwargs={'cached_url':self.cat.cached_url,}), self.url)
def __unicode__(self):
return self.title
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True)
else:
return summarize(body=self.desc, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
verbose_name = 'special feature' | return self.summary_short | random_line_split |
models.py | # DeerTrees (Django App)
# By Natasha L.
# www.lupinia.net | github.com/lupinia
#
# =================
# Models
# =================
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.text import slugify
from datetime import timedelta
from mptt.models import MPTTModel, TreeForeignKey
from awi_utils.utils import format_html, summarize
from awi_access.models import access_control
def viewtype_options():
blocks_map = settings.DEERTREES_BLOCK_MAP
viewtypes = []
for map_name, map in blocks_map.iteritems():
if map.get('meta',{}).get('option_name',False) and map.get('meta',{}).get('selectable',True):
viewtypes.append((map_name, map.get('meta',{}).get('option_name',map_name),))
return viewtypes
class category(MPTTModel, access_control):
CONTENT_SUMMARY_CHOICES = (
('misc', 'Miscellaneous'),
('image', 'Images/Photos'),
('page', 'Writing'),
('link', 'External Links'),
)
title = models.CharField(max_length=60)
slug = models.SlugField()
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
summary = models.CharField(max_length=255, null=True, blank=True)
desc = models.TextField(null=True, blank=True, verbose_name='description body text')
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this category is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this category in sitemap views.')
trash = models.BooleanField(default=False, db_index=True, verbose_name='recycle bin', help_text='System field: Indicates whether this category is the "recycle bin" for deleted items.')
background_tag = models.ForeignKey('sunset.background_tag', null=True, blank=True, on_delete=models.SET_NULL, help_text='Set this to indicate the preferred background image themes for this category.')
icon = models.ForeignKey('sunset.image_asset', null=True, blank=True, on_delete=models.SET_NULL, help_text='System field: Image asset used as a thumbnail for this category.')
icon_manual = models.BooleanField(default=False, db_index=True, help_text='System field: Indicates whether the Icon field was set manually; if so, it will not be replaced by the automatic thumbnail assignment script.')
content_summary = models.CharField(max_length=20, default='misc', choices=CONTENT_SUMMARY_CHOICES, help_text='System field: Stores the main content type for this category, used to display an icon when no image asset is selected. Will be set by the automatic thumbnail assignment script.')
always_fresh = models.BooleanField(default=False, blank=True, help_text='If checked, the "old content" note will not be added to older content in this category. Useful for things like policy directories.')
cached_url = models.CharField(max_length=255, null=True, blank=True, unique=True, help_text='System field: Full unique slug for this category, including all parents.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('category', kwargs={'cached_url':self.cached_url,})
@property
def content_summary_choices_simplified(self):
choices_simplified = []
for choice in self.CONTENT_SUMMARY_CHOICES:
choices_simplified.append(choice[0])
return choices_simplified
def set_content_summary(self, summary='misc'):
if summary != self.content_summary:
choices_simplified = self.content_summary_choices_simplified
if summary in choices_simplified:
self.content_summary = summary
self.save()
else:
if self.content_summary != 'misc':
self.content_summary = 'misc'
self.save()
return self.content_summary
@property
def body_html(self):
if self.desc:
return format_html(self.desc)
else:
return format_html(self.summary)
@property
def icon_url(self):
if self.icon:
return self.icon.get_url()
elif self.mature:
return "%simages/icons/mature128.png" % settings.STATIC_URL
else:
return "%simages/icons/default-category-%s-128.png" % (settings.STATIC_URL, self.content_summary)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, summary=self.summary, length=length, prefer_long=True)
else:
return summarize(body=self.desc, summary=self.summary, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
def save(self, *args, **kwargs):
if self.parent:
self.cached_url = '%s/%s' % (self.parent.cached_url, self.slug)
else:
self.cached_url = self.slug
super(category, self).save(*args, **kwargs)
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(category, self).can_edit(request, perm_check='deertrees.change_category')
return (False,'')
class MPTTMeta:
order_insertion_by = ['title']
class Meta:
verbose_name_plural = 'categories'
class tag(models.Model):
title = models.CharField(max_length=200,null=True,blank=True)
slug = models.SlugField(max_length=200,unique=True)
desc = models.TextField(null=True,blank=True)
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this tag is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this tag in sitemap views.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
@property
def display_title(self):
if self.title:
return self.title
else:
return self.slug
def __unicode__(self):
return self.display_title
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.slug,})
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
return (request.user.has_perm('deertrees.change_tag'), 'access_perms')
return (False,'')
@property
def synonym_list(self):
sluglist = []
if self.title:
sluglist.append(self.slug)
synonyms = self.synonyms.all().values_list('slug', flat=True)
if synonyms:
sluglist += list(synonyms)
return sluglist
@property
def body_html(self):
return format_html(self.desc)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True, fallback='')
else:
return summarize(body=self.desc, length=length, fallback='')
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
ordering = ['slug',]
class tag_synonym(models.Model):
parent = models.ForeignKey(tag, on_delete=models.CASCADE, related_name='synonyms')
slug = models.SlugField(max_length=200,unique=True)
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.parent.slug,})
def __unicode__(self):
return self.slug
class Meta:
ordering = ['slug',]
class external_link_type(models.Model):
name = models.CharField(max_length=200, verbose_name='site name')
label = models.CharField(max_length=200, verbose_name='link label')
icon = models.ImageField(upload_to='linkicons_ext', null=True, blank=True)
url_format = models.CharField(max_length=250, blank=True, null=True, verbose_name='URL format', help_text='Use <id> to create a placeholder for remote_id on links of this type.')
featured = models.BooleanField(db_index=True, blank=True, default=False)
public = models.BooleanField(db_index=True, blank=True, default=True)
notes = models.TextField(null=True, blank=True)
sites = models.ManyToManyField(Site, db_index=True, help_text='Sites/domains on which this item will appear.')
def __unicode__(self):
return self.name
@property
def icon_url(self):
if self.icon:
return "%s%s" % (settings.MEDIA_URL,self.icon.name)
else:
return "%simages/icons/default-link-32.png" % settings.STATIC_URL
class Meta:
verbose_name = 'external platform'
class external_link(models.Model):
link_type = models.ForeignKey(external_link_type, on_delete=models.CASCADE, related_name='links', verbose_name='platform')
parent = models.ForeignKey('leaf', on_delete=models.CASCADE, related_name='external_links')
full_url = models.URLField(max_length=500, blank=True, null=True, verbose_name='URL')
remote_id = models.CharField(max_length=250, blank=True, null=True, verbose_name='remote object ID')
label_override = models.CharField(max_length=250, blank=True, null=True, verbose_name='label override')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
published = models.BooleanField(db_index=True, blank=True, default=False)
automated = models.BooleanField(db_index=True, blank=True, default=False)
notes = models.TextField(null=True, blank=True)
def __unicode__(self):
return '%s: %s' % (self.link_type.name, unicode(self.parent))
def get_absolute_url(self):
return self.url
@property
def url(self):
if self.full_url:
return self.full_url
elif self.remote_id and self.link_type.url_format:
return self.link_type.url_format.replace('<id>', self.remote_id)
else:
return ''
@property
def label(self):
if self.label_override:
return self.label_override
else:
return self.link_type.label
def clean(self):
if not self.full_url and not self.remote_id:
raise ValidationError('Either a full URL or a remote ID are required.')
if not self.link_type.url_format and not self.full_url:
raise ValidationError('A full URL is required for this link type')
return super(external_link,self).clean()
class Meta:
verbose_name = 'external platform link'
ordering = ['-link_type__featured']
# This model has been modified for the Awi website, and requires the Awi Access app
# This is a single categorized node; everything else that belongs to a category should extend this class
class leaf(access_control):
TIMEDISP_OPTIONS = (('post','Posted'),('mod','Modified'))
author_override = models.CharField(max_length=100, null=True, blank=True, help_text="If this was written by a guest author, enter their name here. Enter 'none' to hide the author info from display (only use this for things like system directories and site policies where authorship is irrelevant).")
cat = models.ForeignKey(category, null=True, blank=True, on_delete=models.PROTECT, verbose_name='category', related_name='leaves')
tags = models.ManyToManyField(tag, blank=True, related_name='leaves')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created', help_text='Set this to a future date to schedule it.')
timedisp = models.CharField(max_length=10, choices=TIMEDISP_OPTIONS, default='post', verbose_name='preferred timestamp', help_text='Determines which timestamp (modified, or created) will be publicly displayed. The other option will only be visible to users who can edit this item.')
type = models.CharField(max_length=20, default='unknown', db_index=True, help_text='System field: Indicates which model this leaf is.')
def __unicode__(self):
return '%s: %d' % (self.type.capitalize(), self.pk)
# An extension of get_absolute_url() to include the domain
def get_complete_url(self, request=None):
if request:
domain = request.get_host()
else:
primary_site = self.sites.all().order_by('pk').first()
if not primary_site:
primary_site = Site.objects.get(pk=settings.SITE_ID)
domain = primary_site.domain
if 'www' not in domain:
domain = 'www.%s' % domain
return 'https://%s%s' % (domain, self.get_absolute_url())
def save(self, *args, **kwargs):
if not self.pk:
self.type = self.__class__.__name__
super(leaf, self).save(*args, **kwargs)
def scheduled(self):
if self.published and self.timestamp_post > timezone.now():
return True
else:
return False
def can_view(self, request=False):
if not request:
return (False,'access_norequest')
public_check = self.is_public()
if public_check[0]:
return (True, '')
else:
canview = super(leaf, self).can_view(request)
if canview[0] and self.scheduled() and (self.owner != request.user or not request.user.has_perm('deertrees.change_leaf')):
canview = (False,'access_404') # If it's scheduled, and we don't have elevated privileges, it doesn't exist.
return canview
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(leaf, self).can_edit(request, perm_check='deertrees.change_leaf')
return (False,'')
def is_public(self):
public, restrictions = super(leaf, self).is_public()
if self.scheduled():
public = False
restrictions.append('Scheduled future postdate')
return (public, restrictions)
# Helper method for extracting a reason for non-public status that's easier to work with programmaticly
@property
def restriction(self):
cur_restriction = super(leaf, self).restriction
if self.scheduled() and not self.is_public()[0]:
return 'scheduled'
else:
return cur_restriction
def tag_item(self, taglist):
return_data = {'skipped':[], 'added':[], 'created':[]}
if ', ' in taglist:
new_tags = taglist.split(', ')
elif ',' in taglist:
new_tags = taglist.split(',')
else:
new_tags = [taglist,]
old_tags = {}
cur_tags = self.tags.all()
if cur_tags:
for old_tag in cur_tags:
old_tags[old_tag.slug] = old_tag
new_tag_objs = []
for new_tag in new_tags:
if old_tags.get(new_tag, False):
return_data['skipped'].append(new_tag)
else:
new_tag = slugify(new_tag)
new_tag_obj = tag.objects.get_or_create(slug=new_tag)
new_tag_objs.append(new_tag_obj[0])
if new_tag_obj[1]:
return_data['created'].append(new_tag)
else:
return_data['added'].append(new_tag)
if new_tag_objs:
self.tags.add(*new_tag_objs)
return return_data
def display_times(self):
return_times = [{},{}]
if self.timedisp == 'post':
return_mod = 1
return_post = 0
else:
return_mod = 0
return_post = 1
return_times[return_post]['timestamp'] = self.timestamp_post
return_times[return_mod]['timestamp'] = self.timestamp_mod
return_times[return_post]['label'] = 'Posted'
return_times[return_mod]['label'] = 'Updated'
return return_times
def get_links(self, request=False):
link_query = self.external_links.select_related('link_type')
if request:
if self.can_edit(request)[0]:
return link_query.all()
elif request.user.is_authenticated():
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True)
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True, link_type__public=True)
@property
def timestamp(self):
if self.timedisp == 'post':
return self.timestamp_post
else:
return self.timestamp_mod
@property
def rss_description(self):
return 'No Description'
@property
def is_recent(self):
if self.timestamp > (timezone.now() - timedelta(days=30*6)):
return True
else:
return False
@property
def admin_owned(self):
if self.owner.pk == settings.SITE_OWNER_ACCOUNT_ID and not self.author_override:
return True
else:
return False
@property
def is_old(self):
if self.cat.always_fresh or not self.admin_owned:
return False
else:
if self.timestamp_mod < (timezone.now() - timedelta(days=365*10)):
return True
elif (self.timestamp_post < (timezone.now() - timedelta(days=365*10))) and (self.timestamp_mod < (timezone.now() - timedelta(days=365*2))):
return True
else:
return False
@property
def author(self):
if self.author_override:
if self.author_override.lower() == 'none':
return ''
else:
return self.author_override
else:
if self.owner.get_full_name():
return self.owner.get_full_name()
else:
return self.owner.get_username()
@property
def tags_list(self):
return self.tags.all().values_list('slug', flat=True)
# Create a leaf that links to something else that isn't part of this category system.
# Handy for things like third-party apps, or self-contained apps with their own organizational structure.
class special_feature(leaf):
url = models.CharField(max_length=60, unique=True, verbose_name='URL', help_text='Similar to a Slug field, but can accept any character, to make it easier to link to non-DeerTrees URLs.')
url_reverse = models.CharField(max_length=250, null=True, blank=True, help_text='Enter the keyword used by Django to look up this special feature in urls.py.')
title = models.CharField(max_length=60)
desc = models.CharField(max_length=255, null=True, blank=True, verbose_name='Description')
def | (self):
return '%s%s' % (reverse('category', kwargs={'cached_url':self.cat.cached_url,}), self.url)
def __unicode__(self):
return self.title
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True)
else:
return summarize(body=self.desc, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
verbose_name = 'special feature'
| get_absolute_url | identifier_name |
models.py | # DeerTrees (Django App)
# By Natasha L.
# www.lupinia.net | github.com/lupinia
#
# =================
# Models
# =================
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.text import slugify
from datetime import timedelta
from mptt.models import MPTTModel, TreeForeignKey
from awi_utils.utils import format_html, summarize
from awi_access.models import access_control
def viewtype_options():
blocks_map = settings.DEERTREES_BLOCK_MAP
viewtypes = []
for map_name, map in blocks_map.iteritems():
if map.get('meta',{}).get('option_name',False) and map.get('meta',{}).get('selectable',True):
viewtypes.append((map_name, map.get('meta',{}).get('option_name',map_name),))
return viewtypes
class category(MPTTModel, access_control):
CONTENT_SUMMARY_CHOICES = (
('misc', 'Miscellaneous'),
('image', 'Images/Photos'),
('page', 'Writing'),
('link', 'External Links'),
)
title = models.CharField(max_length=60)
slug = models.SlugField()
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
summary = models.CharField(max_length=255, null=True, blank=True)
desc = models.TextField(null=True, blank=True, verbose_name='description body text')
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this category is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this category in sitemap views.')
trash = models.BooleanField(default=False, db_index=True, verbose_name='recycle bin', help_text='System field: Indicates whether this category is the "recycle bin" for deleted items.')
background_tag = models.ForeignKey('sunset.background_tag', null=True, blank=True, on_delete=models.SET_NULL, help_text='Set this to indicate the preferred background image themes for this category.')
icon = models.ForeignKey('sunset.image_asset', null=True, blank=True, on_delete=models.SET_NULL, help_text='System field: Image asset used as a thumbnail for this category.')
icon_manual = models.BooleanField(default=False, db_index=True, help_text='System field: Indicates whether the Icon field was set manually; if so, it will not be replaced by the automatic thumbnail assignment script.')
content_summary = models.CharField(max_length=20, default='misc', choices=CONTENT_SUMMARY_CHOICES, help_text='System field: Stores the main content type for this category, used to display an icon when no image asset is selected. Will be set by the automatic thumbnail assignment script.')
always_fresh = models.BooleanField(default=False, blank=True, help_text='If checked, the "old content" note will not be added to older content in this category. Useful for things like policy directories.')
cached_url = models.CharField(max_length=255, null=True, blank=True, unique=True, help_text='System field: Full unique slug for this category, including all parents.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('category', kwargs={'cached_url':self.cached_url,})
@property
def content_summary_choices_simplified(self):
choices_simplified = []
for choice in self.CONTENT_SUMMARY_CHOICES:
choices_simplified.append(choice[0])
return choices_simplified
def set_content_summary(self, summary='misc'):
if summary != self.content_summary:
choices_simplified = self.content_summary_choices_simplified
if summary in choices_simplified:
self.content_summary = summary
self.save()
else:
if self.content_summary != 'misc':
self.content_summary = 'misc'
self.save()
return self.content_summary
@property
def body_html(self):
if self.desc:
return format_html(self.desc)
else:
return format_html(self.summary)
@property
def icon_url(self):
if self.icon:
return self.icon.get_url()
elif self.mature:
return "%simages/icons/mature128.png" % settings.STATIC_URL
else:
return "%simages/icons/default-category-%s-128.png" % (settings.STATIC_URL, self.content_summary)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, summary=self.summary, length=length, prefer_long=True)
else:
return summarize(body=self.desc, summary=self.summary, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
def save(self, *args, **kwargs):
if self.parent:
self.cached_url = '%s/%s' % (self.parent.cached_url, self.slug)
else:
self.cached_url = self.slug
super(category, self).save(*args, **kwargs)
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(category, self).can_edit(request, perm_check='deertrees.change_category')
return (False,'')
class MPTTMeta:
order_insertion_by = ['title']
class Meta:
verbose_name_plural = 'categories'
class tag(models.Model):
title = models.CharField(max_length=200,null=True,blank=True)
slug = models.SlugField(max_length=200,unique=True)
desc = models.TextField(null=True,blank=True)
view_type = models.CharField(choices=viewtype_options(), max_length=15, default='default', help_text='Determines the placement of content when this tag is displayed.')
sitemap_include = models.BooleanField(default=True, verbose_name='include in sitemap', help_text='Check this box to include this tag in sitemap views.')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
@property
def display_title(self):
if self.title:
return self.title
else:
return self.slug
def __unicode__(self):
return self.display_title
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.slug,})
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
return (request.user.has_perm('deertrees.change_tag'), 'access_perms')
return (False,'')
@property
def synonym_list(self):
sluglist = []
if self.title:
sluglist.append(self.slug)
synonyms = self.synonyms.all().values_list('slug', flat=True)
if synonyms:
sluglist += list(synonyms)
return sluglist
@property
def body_html(self):
return format_html(self.desc)
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True, fallback='')
else:
return summarize(body=self.desc, length=length, fallback='')
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
|
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
ordering = ['slug',]
class tag_synonym(models.Model):
parent = models.ForeignKey(tag, on_delete=models.CASCADE, related_name='synonyms')
slug = models.SlugField(max_length=200,unique=True)
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
def get_absolute_url(self):
return reverse('tag', kwargs={'slug':self.parent.slug,})
def __unicode__(self):
return self.slug
class Meta:
ordering = ['slug',]
class external_link_type(models.Model):
name = models.CharField(max_length=200, verbose_name='site name')
label = models.CharField(max_length=200, verbose_name='link label')
icon = models.ImageField(upload_to='linkicons_ext', null=True, blank=True)
url_format = models.CharField(max_length=250, blank=True, null=True, verbose_name='URL format', help_text='Use <id> to create a placeholder for remote_id on links of this type.')
featured = models.BooleanField(db_index=True, blank=True, default=False)
public = models.BooleanField(db_index=True, blank=True, default=True)
notes = models.TextField(null=True, blank=True)
sites = models.ManyToManyField(Site, db_index=True, help_text='Sites/domains on which this item will appear.')
def __unicode__(self):
return self.name
@property
def icon_url(self):
if self.icon:
return "%s%s" % (settings.MEDIA_URL,self.icon.name)
else:
return "%simages/icons/default-link-32.png" % settings.STATIC_URL
class Meta:
verbose_name = 'external platform'
class external_link(models.Model):
link_type = models.ForeignKey(external_link_type, on_delete=models.CASCADE, related_name='links', verbose_name='platform')
parent = models.ForeignKey('leaf', on_delete=models.CASCADE, related_name='external_links')
full_url = models.URLField(max_length=500, blank=True, null=True, verbose_name='URL')
remote_id = models.CharField(max_length=250, blank=True, null=True, verbose_name='remote object ID')
label_override = models.CharField(max_length=250, blank=True, null=True, verbose_name='label override')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created')
published = models.BooleanField(db_index=True, blank=True, default=False)
automated = models.BooleanField(db_index=True, blank=True, default=False)
notes = models.TextField(null=True, blank=True)
def __unicode__(self):
return '%s: %s' % (self.link_type.name, unicode(self.parent))
def get_absolute_url(self):
return self.url
@property
def url(self):
if self.full_url:
return self.full_url
elif self.remote_id and self.link_type.url_format:
return self.link_type.url_format.replace('<id>', self.remote_id)
else:
return ''
@property
def label(self):
if self.label_override:
return self.label_override
else:
return self.link_type.label
def clean(self):
if not self.full_url and not self.remote_id:
raise ValidationError('Either a full URL or a remote ID are required.')
if not self.link_type.url_format and not self.full_url:
raise ValidationError('A full URL is required for this link type')
return super(external_link,self).clean()
class Meta:
verbose_name = 'external platform link'
ordering = ['-link_type__featured']
# This model has been modified for the Awi website, and requires the Awi Access app
# This is a single categorized node; everything else that belongs to a category should extend this class
class leaf(access_control):
TIMEDISP_OPTIONS = (('post','Posted'),('mod','Modified'))
author_override = models.CharField(max_length=100, null=True, blank=True, help_text="If this was written by a guest author, enter their name here. Enter 'none' to hide the author info from display (only use this for things like system directories and site policies where authorship is irrelevant).")
cat = models.ForeignKey(category, null=True, blank=True, on_delete=models.PROTECT, verbose_name='category', related_name='leaves')
tags = models.ManyToManyField(tag, blank=True, related_name='leaves')
timestamp_mod = models.DateTimeField(auto_now=True, db_index=True, verbose_name='date/time modified')
timestamp_post = models.DateTimeField(default=timezone.now, db_index=True, verbose_name='date/time created', help_text='Set this to a future date to schedule it.')
timedisp = models.CharField(max_length=10, choices=TIMEDISP_OPTIONS, default='post', verbose_name='preferred timestamp', help_text='Determines which timestamp (modified, or created) will be publicly displayed. The other option will only be visible to users who can edit this item.')
type = models.CharField(max_length=20, default='unknown', db_index=True, help_text='System field: Indicates which model this leaf is.')
def __unicode__(self):
return '%s: %d' % (self.type.capitalize(), self.pk)
# An extension of get_absolute_url() to include the domain
def get_complete_url(self, request=None):
if request:
domain = request.get_host()
else:
primary_site = self.sites.all().order_by('pk').first()
if not primary_site:
primary_site = Site.objects.get(pk=settings.SITE_ID)
domain = primary_site.domain
if 'www' not in domain:
domain = 'www.%s' % domain
return 'https://%s%s' % (domain, self.get_absolute_url())
def save(self, *args, **kwargs):
if not self.pk:
self.type = self.__class__.__name__
super(leaf, self).save(*args, **kwargs)
def scheduled(self):
if self.published and self.timestamp_post > timezone.now():
return True
else:
return False
def can_view(self, request=False):
if not request:
return (False,'access_norequest')
public_check = self.is_public()
if public_check[0]:
return (True, '')
else:
canview = super(leaf, self).can_view(request)
if canview[0] and self.scheduled() and (self.owner != request.user or not request.user.has_perm('deertrees.change_leaf')):
canview = (False,'access_404') # If it's scheduled, and we don't have elevated privileges, it doesn't exist.
return canview
def can_edit(self, request=False):
if not request:
return (False,'access_norequest')
else:
canview = self.can_view(request)
if not canview[0]:
return canview
else:
return super(leaf, self).can_edit(request, perm_check='deertrees.change_leaf')
return (False,'')
def is_public(self):
public, restrictions = super(leaf, self).is_public()
if self.scheduled():
public = False
restrictions.append('Scheduled future postdate')
return (public, restrictions)
# Helper method for extracting a reason for non-public status that's easier to work with programmaticly
@property
def restriction(self):
cur_restriction = super(leaf, self).restriction
if self.scheduled() and not self.is_public()[0]:
return 'scheduled'
else:
return cur_restriction
def tag_item(self, taglist):
return_data = {'skipped':[], 'added':[], 'created':[]}
if ', ' in taglist:
new_tags = taglist.split(', ')
elif ',' in taglist:
new_tags = taglist.split(',')
else:
new_tags = [taglist,]
old_tags = {}
cur_tags = self.tags.all()
if cur_tags:
for old_tag in cur_tags:
old_tags[old_tag.slug] = old_tag
new_tag_objs = []
for new_tag in new_tags:
if old_tags.get(new_tag, False):
return_data['skipped'].append(new_tag)
else:
new_tag = slugify(new_tag)
new_tag_obj = tag.objects.get_or_create(slug=new_tag)
new_tag_objs.append(new_tag_obj[0])
if new_tag_obj[1]:
return_data['created'].append(new_tag)
else:
return_data['added'].append(new_tag)
if new_tag_objs:
self.tags.add(*new_tag_objs)
return return_data
def display_times(self):
return_times = [{},{}]
if self.timedisp == 'post':
return_mod = 1
return_post = 0
else:
return_mod = 0
return_post = 1
return_times[return_post]['timestamp'] = self.timestamp_post
return_times[return_mod]['timestamp'] = self.timestamp_mod
return_times[return_post]['label'] = 'Posted'
return_times[return_mod]['label'] = 'Updated'
return return_times
def get_links(self, request=False):
link_query = self.external_links.select_related('link_type')
if request:
if self.can_edit(request)[0]:
return link_query.all()
elif request.user.is_authenticated():
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True)
return link_query.filter(link_type__sites__id=settings.SITE_ID, published=True, link_type__public=True)
@property
def timestamp(self):
if self.timedisp == 'post':
return self.timestamp_post
else:
return self.timestamp_mod
@property
def rss_description(self):
return 'No Description'
@property
def is_recent(self):
if self.timestamp > (timezone.now() - timedelta(days=30*6)):
return True
else:
return False
@property
def admin_owned(self):
if self.owner.pk == settings.SITE_OWNER_ACCOUNT_ID and not self.author_override:
return True
else:
return False
@property
def is_old(self):
if self.cat.always_fresh or not self.admin_owned:
return False
else:
if self.timestamp_mod < (timezone.now() - timedelta(days=365*10)):
return True
elif (self.timestamp_post < (timezone.now() - timedelta(days=365*10))) and (self.timestamp_mod < (timezone.now() - timedelta(days=365*2))):
return True
else:
return False
@property
def author(self):
if self.author_override:
if self.author_override.lower() == 'none':
return ''
else:
return self.author_override
else:
if self.owner.get_full_name():
return self.owner.get_full_name()
else:
return self.owner.get_username()
@property
def tags_list(self):
return self.tags.all().values_list('slug', flat=True)
# Create a leaf that links to something else that isn't part of this category system.
# Handy for things like third-party apps, or self-contained apps with their own organizational structure.
class special_feature(leaf):
url = models.CharField(max_length=60, unique=True, verbose_name='URL', help_text='Similar to a Slug field, but can accept any character, to make it easier to link to non-DeerTrees URLs.')
url_reverse = models.CharField(max_length=250, null=True, blank=True, help_text='Enter the keyword used by Django to look up this special feature in urls.py.')
title = models.CharField(max_length=60)
desc = models.CharField(max_length=255, null=True, blank=True, verbose_name='Description')
def get_absolute_url(self):
return '%s%s' % (reverse('category', kwargs={'cached_url':self.cat.cached_url,}), self.url)
def __unicode__(self):
return self.title
def get_summary(self,length=255):
if length > 255:
return summarize(body=self.desc, length=length, prefer_long=True)
else:
return summarize(body=self.desc, length=length)
@property
def summary_short(self):
return self.get_summary()
@property
def summary_long(self):
return self.get_summary(512)
# ALIAS
@property
def rss_description(self):
return self.summary_short
class Meta:
verbose_name = 'special feature'
| return self.get_summary(512) | identifier_body |
util.py | # Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import hashlib
import json
import logging
import tarfile
import tempfile
import typing
import deprecated
import ccc.oci
import ci.util
import oci
import oci.client as oc
import oci.convert as oconv
import oci.model as om
import tarutil
logger = logging.getLogger(__name__)
def image_exists(image_reference: str):
oci_client = ccc.oci.oci_client()
return bool(oci_client.head_manifest(image_reference=image_reference, absent_ok=True))
def filter_image(
source_ref:str,
target_ref:str,
remove_files: typing.Sequence[str]=(),
oci_client: oc.Client=None,
):
if not oci_client:
oci_client = ccc.oci.oci_client()
# shortcut in case there are no filtering-rules
if not remove_files:
return oci.replicate_artifact(
src_image_reference=source_ref,
tgt_image_reference=target_ref,
oci_client=oci_client,
)
manifest = oci_client.manifest(image_reference=source_ref)
cp_cfg_blob = True
if isinstance(manifest, om.OciImageManifestV1):
logger.info(f'converting v1-manifest -> v2 {source_ref=} {target_ref=}')
manifest, cfg_blob = oconv.v1_manifest_to_v2(
manifest=manifest,
oci_client=oci_client,
tgt_image_ref=target_ref,
)
cp_cfg_blob = False # we synthesise new cfg - thus we cannot cp from src
elif not isinstance(manifest, om.OciImageManifest):
raise NotImplementedError(manifest)
# allow / ignore leading '/'
remove_files = [p.lstrip('/') for p in remove_files]
def tarmember_filter(tar_info: tarfile.TarInfo):
stripped_name = tar_info.name.lstrip('./')
if stripped_name in remove_files:
logger.debug(f'rm: {tar_info.name=}')
return False # rm member
return True # keep member
# prepare copy of layers to avoid modification while iterating
layers_copy = manifest.layers.copy()
for layer in manifest.layers:
layer_hash = hashlib.sha256()
leng = 0
# unfortunately, GCR (our most important oci-registry) does not support chunked uploads,
# so we have to resort to writing the streaming result into a local tempfile to be able
# to calculate digest-hash prior to upload to tgt; XXX: we might use streaming
# when interacting w/ oci-registries that support chunked-uploads
with tempfile.TemporaryFile() as f:
src_tar_stream = oci_client.blob(
image_reference=source_ref,
digest=layer.digest,
stream=True,
).iter_content(chunk_size=tarfile.BLOCKSIZE)
src_tar_fobj = tarutil._FilelikeProxy(generator=src_tar_stream)
filtered_stream = tarutil.filtered_tarfile_generator(
src_tf=tarfile.open(fileobj=src_tar_fobj, mode='r|*'),
filter_func=tarmember_filter,
)
for chunk in filtered_stream:
layer_hash.update(chunk)
leng += len(chunk)
f.write(chunk)
f.seek(0)
oci_client.put_blob(
image_reference=target_ref,
digest=(layer_digest := 'sha256:' + layer_hash.hexdigest()),
octets_count=leng,
data=f,
)
# update copy of layers-list with new layer
new_layer = dataclasses.replace(layer, digest=layer_digest, size=leng)
layers_copy[layers_copy.index(layer)] = new_layer
# switch layers in manifest to announce changes w/ manifest-upload
manifest.layers = layers_copy
# need to patch cfg-object, in case layer-digests changed
if cp_cfg_blob:
cfg_blob = oci_client.blob(
image_reference=source_ref,
digest=manifest.config.digest,
stream=False,
).json() # cfg-blobs are small - no point in streaming
if not 'rootfs' in cfg_blob:
raise ValueError('expected attr `rootfs` not present on cfg-blob')
else:
cfg_blob = json.loads(cfg_blob)
cfg_blob['rootfs'] = {
'diff_ids': [
layer.digest for layer in manifest.layers
],
'type': 'layers',
}
cfg_blob = json.dumps(cfg_blob).encode('utf-8')
cfg_digest = f'sha256:{hashlib.sha256(cfg_blob).hexdigest()}'
cfg_leng = len(cfg_blob)
oci_client.put_blob(
image_reference=target_ref,
digest=cfg_digest,
octets_count=cfg_leng,
data=cfg_blob,
)
manifest.config = dataclasses.replace(manifest.config, digest=cfg_digest, size=cfg_leng)
manifest_raw = json.dumps(dataclasses.asdict(manifest)).encode('utf-8')
oci_client.put_manifest(image_reference=target_ref, manifest=manifest_raw)
@deprecated.deprecated
def filter_container_image(
image_file,
out_file,
remove_entries,
):
ci.util.existing_file(image_file)
if not remove_entries:
raise ValueError('remove_entries must not be empty')
# allow absolute paths
remove_entries = [e.lstrip('/') for e in remove_entries]
with tarfile.open(image_file) as tf:
manifest = json.load(tf.extractfile('manifest.json'))
if not len(manifest) == 1:
raise NotImplementedError()
manifest = manifest[0]
cfg_name = manifest['Config']
with tarfile.open(image_file, 'r') as in_tf, tarfile.open(out_file, 'w') as out_tf:
_filter_files(
manifest=manifest,
cfg_name=cfg_name,
in_tarfile=in_tf,
out_tarfile=out_tf,
remove_entries=set(remove_entries),
)
@deprecated.deprecated
def _filter_files(
manifest,
cfg_name,
in_tarfile: tarfile.TarFile,
out_tarfile: tarfile.TarFile,
remove_entries,
):
|
def _filter_single_tar(
in_file: tarfile.TarFile,
remove_entries,
):
temp_fh = tempfile.TemporaryFile()
temptar = tarfile.TarFile(fileobj=temp_fh, mode='w')
for tar_info in in_file:
if not tar_info.isfile():
temptar.addfile(tar_info)
continue
if tar_info.name.lstrip('./') in remove_entries:
logging.debug(f'purging entry: {tar_info.name}')
continue
# copy entry
entry = in_file.extractfile(tar_info)
temptar.addfile(tar_info, fileobj=entry)
size = temp_fh.tell()
temp_fh.flush()
temp_fh.seek(0)
return temp_fh, size
| layer_paths = set(manifest['Layers'])
changed_layer_hashes = [] # [(old, new),]
# copy everything that does not need to be patched
for tar_info in in_tarfile:
if not tar_info.isfile():
out_tarfile.addfile(tar_info)
continue
# cfg needs to be rewritten - so do not cp
if tar_info.name in (cfg_name, 'manifest.json'):
continue
fileobj = in_tarfile.extractfile(tar_info)
if tar_info.name not in layer_paths:
out_tarfile.addfile(tar_info, fileobj=fileobj)
continue
# assumption: layers are always tarfiles
# check if we need to patch
layer_tar = tarfile.open(fileobj=fileobj)
# normalise paths
layer_tar_paths = {
path.lstrip('./') for path in layer_tar.getnames()
}
have_match = bool(layer_tar_paths & remove_entries)
fileobj.seek(0)
if not have_match:
out_tarfile.addfile(tar_info, fileobj=fileobj)
else:
old_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while fileobj.peek():
old_hash.update(fileobj.read(2048))
fileobj.seek(0)
patched_tar, size = _filter_single_tar(
in_file=layer_tar,
remove_entries=remove_entries,
)
# patch tar_info to reduced size
tar_info.size = size
new_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while patched_tar.peek():
new_hash.update(patched_tar.read(2048))
patched_tar.seek(0)
out_tarfile.addfile(tar_info, fileobj=patched_tar)
logging.debug(f'patched: {tar_info.name}')
changed_layer_hashes.append((old_hash.hexdigest(), new_hash.hexdigest()))
# update cfg
cfg = json.load(in_tarfile.extractfile(cfg_name))
root_fs = cfg['rootfs']
if not root_fs['type'] == 'layers':
raise NotImplementedError()
# XXX hard-code hash algorithm (assume all entries are prefixed w/ sha256)
diff_ids = root_fs['diff_ids']
for old_hash, new_hash in changed_layer_hashes:
idx = diff_ids.index('sha256:' + old_hash)
diff_ids[idx] = 'sha256:' + new_hash
# hash cfg again (as its name is derived from its hash)
cfg_raw = json.dumps(cfg)
cfg_hash = hashlib.sha256(cfg_raw.encode('utf-8')).hexdigest()
cfg_name = cfg_hash + '.json'
# add cfg to resulting archive
# unfortunately, tarfile requires us to use a tempfile :-(
with tempfile.TemporaryFile() as tmp_fh:
tmp_fh.write(cfg_raw.encode('utf-8'))
cfg_size = tmp_fh.tell()
tmp_fh.seek(0)
cfg_info = tarfile.TarInfo(name=cfg_name)
cfg_info.type = tarfile.REGTYPE
cfg_info.size = cfg_size
out_tarfile.addfile(cfg_info, fileobj=tmp_fh)
# now new finally need to patch the manifest
manifest['Config'] = cfg_name
# wrap it in a list again
manifest = [manifest]
with tempfile.TemporaryFile() as fh:
manifest_raw = json.dumps(manifest)
fh.write(manifest_raw.encode('utf-8'))
size = fh.tell()
fh.seek(0)
manifest_info = tarfile.TarInfo(name='manifest.json')
manifest_info.type = tarfile.REGTYPE
manifest_info.size = size
out_tarfile.addfile(manifest_info, fh) | identifier_body |
util.py | # Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import hashlib
import json
import logging
import tarfile
import tempfile
import typing
import deprecated
import ccc.oci
import ci.util
import oci
import oci.client as oc
import oci.convert as oconv
import oci.model as om
import tarutil
logger = logging.getLogger(__name__)
def image_exists(image_reference: str):
oci_client = ccc.oci.oci_client()
return bool(oci_client.head_manifest(image_reference=image_reference, absent_ok=True))
def filter_image(
source_ref:str,
target_ref:str,
remove_files: typing.Sequence[str]=(),
oci_client: oc.Client=None,
):
if not oci_client:
oci_client = ccc.oci.oci_client()
# shortcut in case there are no filtering-rules
if not remove_files:
return oci.replicate_artifact(
src_image_reference=source_ref,
tgt_image_reference=target_ref,
oci_client=oci_client,
)
manifest = oci_client.manifest(image_reference=source_ref)
cp_cfg_blob = True
if isinstance(manifest, om.OciImageManifestV1):
logger.info(f'converting v1-manifest -> v2 {source_ref=} {target_ref=}')
manifest, cfg_blob = oconv.v1_manifest_to_v2(
manifest=manifest,
oci_client=oci_client,
tgt_image_ref=target_ref,
)
cp_cfg_blob = False # we synthesise new cfg - thus we cannot cp from src
elif not isinstance(manifest, om.OciImageManifest):
raise NotImplementedError(manifest)
# allow / ignore leading '/'
remove_files = [p.lstrip('/') for p in remove_files]
def tarmember_filter(tar_info: tarfile.TarInfo):
stripped_name = tar_info.name.lstrip('./')
if stripped_name in remove_files:
logger.debug(f'rm: {tar_info.name=}')
return False # rm member
return True # keep member
# prepare copy of layers to avoid modification while iterating
layers_copy = manifest.layers.copy()
for layer in manifest.layers:
layer_hash = hashlib.sha256()
leng = 0
# unfortunately, GCR (our most important oci-registry) does not support chunked uploads,
# so we have to resort to writing the streaming result into a local tempfile to be able
# to calculate digest-hash prior to upload to tgt; XXX: we might use streaming
# when interacting w/ oci-registries that support chunked-uploads
with tempfile.TemporaryFile() as f:
src_tar_stream = oci_client.blob(
image_reference=source_ref,
digest=layer.digest,
stream=True,
).iter_content(chunk_size=tarfile.BLOCKSIZE)
src_tar_fobj = tarutil._FilelikeProxy(generator=src_tar_stream)
filtered_stream = tarutil.filtered_tarfile_generator(
src_tf=tarfile.open(fileobj=src_tar_fobj, mode='r|*'),
filter_func=tarmember_filter,
)
for chunk in filtered_stream:
layer_hash.update(chunk)
leng += len(chunk)
f.write(chunk)
f.seek(0)
oci_client.put_blob(
image_reference=target_ref,
digest=(layer_digest := 'sha256:' + layer_hash.hexdigest()),
octets_count=leng,
data=f,
)
# update copy of layers-list with new layer
new_layer = dataclasses.replace(layer, digest=layer_digest, size=leng)
layers_copy[layers_copy.index(layer)] = new_layer
# switch layers in manifest to announce changes w/ manifest-upload
manifest.layers = layers_copy
# need to patch cfg-object, in case layer-digests changed
if cp_cfg_blob:
cfg_blob = oci_client.blob(
image_reference=source_ref,
digest=manifest.config.digest,
stream=False,
).json() # cfg-blobs are small - no point in streaming
if not 'rootfs' in cfg_blob:
raise ValueError('expected attr `rootfs` not present on cfg-blob')
else:
cfg_blob = json.loads(cfg_blob)
cfg_blob['rootfs'] = {
'diff_ids': [
layer.digest for layer in manifest.layers
],
'type': 'layers',
}
cfg_blob = json.dumps(cfg_blob).encode('utf-8')
cfg_digest = f'sha256:{hashlib.sha256(cfg_blob).hexdigest()}'
cfg_leng = len(cfg_blob)
oci_client.put_blob(
image_reference=target_ref,
digest=cfg_digest,
octets_count=cfg_leng,
data=cfg_blob,
)
manifest.config = dataclasses.replace(manifest.config, digest=cfg_digest, size=cfg_leng)
manifest_raw = json.dumps(dataclasses.asdict(manifest)).encode('utf-8')
oci_client.put_manifest(image_reference=target_ref, manifest=manifest_raw)
@deprecated.deprecated
def | (
image_file,
out_file,
remove_entries,
):
ci.util.existing_file(image_file)
if not remove_entries:
raise ValueError('remove_entries must not be empty')
# allow absolute paths
remove_entries = [e.lstrip('/') for e in remove_entries]
with tarfile.open(image_file) as tf:
manifest = json.load(tf.extractfile('manifest.json'))
if not len(manifest) == 1:
raise NotImplementedError()
manifest = manifest[0]
cfg_name = manifest['Config']
with tarfile.open(image_file, 'r') as in_tf, tarfile.open(out_file, 'w') as out_tf:
_filter_files(
manifest=manifest,
cfg_name=cfg_name,
in_tarfile=in_tf,
out_tarfile=out_tf,
remove_entries=set(remove_entries),
)
@deprecated.deprecated
def _filter_files(
manifest,
cfg_name,
in_tarfile: tarfile.TarFile,
out_tarfile: tarfile.TarFile,
remove_entries,
):
layer_paths = set(manifest['Layers'])
changed_layer_hashes = [] # [(old, new),]
# copy everything that does not need to be patched
for tar_info in in_tarfile:
if not tar_info.isfile():
out_tarfile.addfile(tar_info)
continue
# cfg needs to be rewritten - so do not cp
if tar_info.name in (cfg_name, 'manifest.json'):
continue
fileobj = in_tarfile.extractfile(tar_info)
if tar_info.name not in layer_paths:
out_tarfile.addfile(tar_info, fileobj=fileobj)
continue
# assumption: layers are always tarfiles
# check if we need to patch
layer_tar = tarfile.open(fileobj=fileobj)
# normalise paths
layer_tar_paths = {
path.lstrip('./') for path in layer_tar.getnames()
}
have_match = bool(layer_tar_paths & remove_entries)
fileobj.seek(0)
if not have_match:
out_tarfile.addfile(tar_info, fileobj=fileobj)
else:
old_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while fileobj.peek():
old_hash.update(fileobj.read(2048))
fileobj.seek(0)
patched_tar, size = _filter_single_tar(
in_file=layer_tar,
remove_entries=remove_entries,
)
# patch tar_info to reduced size
tar_info.size = size
new_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while patched_tar.peek():
new_hash.update(patched_tar.read(2048))
patched_tar.seek(0)
out_tarfile.addfile(tar_info, fileobj=patched_tar)
logging.debug(f'patched: {tar_info.name}')
changed_layer_hashes.append((old_hash.hexdigest(), new_hash.hexdigest()))
# update cfg
cfg = json.load(in_tarfile.extractfile(cfg_name))
root_fs = cfg['rootfs']
if not root_fs['type'] == 'layers':
raise NotImplementedError()
# XXX hard-code hash algorithm (assume all entries are prefixed w/ sha256)
diff_ids = root_fs['diff_ids']
for old_hash, new_hash in changed_layer_hashes:
idx = diff_ids.index('sha256:' + old_hash)
diff_ids[idx] = 'sha256:' + new_hash
# hash cfg again (as its name is derived from its hash)
cfg_raw = json.dumps(cfg)
cfg_hash = hashlib.sha256(cfg_raw.encode('utf-8')).hexdigest()
cfg_name = cfg_hash + '.json'
# add cfg to resulting archive
# unfortunately, tarfile requires us to use a tempfile :-(
with tempfile.TemporaryFile() as tmp_fh:
tmp_fh.write(cfg_raw.encode('utf-8'))
cfg_size = tmp_fh.tell()
tmp_fh.seek(0)
cfg_info = tarfile.TarInfo(name=cfg_name)
cfg_info.type = tarfile.REGTYPE
cfg_info.size = cfg_size
out_tarfile.addfile(cfg_info, fileobj=tmp_fh)
# now new finally need to patch the manifest
manifest['Config'] = cfg_name
# wrap it in a list again
manifest = [manifest]
with tempfile.TemporaryFile() as fh:
manifest_raw = json.dumps(manifest)
fh.write(manifest_raw.encode('utf-8'))
size = fh.tell()
fh.seek(0)
manifest_info = tarfile.TarInfo(name='manifest.json')
manifest_info.type = tarfile.REGTYPE
manifest_info.size = size
out_tarfile.addfile(manifest_info, fh)
def _filter_single_tar(
in_file: tarfile.TarFile,
remove_entries,
):
temp_fh = tempfile.TemporaryFile()
temptar = tarfile.TarFile(fileobj=temp_fh, mode='w')
for tar_info in in_file:
if not tar_info.isfile():
temptar.addfile(tar_info)
continue
if tar_info.name.lstrip('./') in remove_entries:
logging.debug(f'purging entry: {tar_info.name}')
continue
# copy entry
entry = in_file.extractfile(tar_info)
temptar.addfile(tar_info, fileobj=entry)
size = temp_fh.tell()
temp_fh.flush()
temp_fh.seek(0)
return temp_fh, size
| filter_container_image | identifier_name |
util.py | # Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import hashlib
import json
import logging
import tarfile
import tempfile
import typing
import deprecated
import ccc.oci
import ci.util
import oci
import oci.client as oc
import oci.convert as oconv
import oci.model as om
import tarutil
logger = logging.getLogger(__name__)
def image_exists(image_reference: str):
oci_client = ccc.oci.oci_client()
return bool(oci_client.head_manifest(image_reference=image_reference, absent_ok=True))
def filter_image(
source_ref:str,
target_ref:str,
remove_files: typing.Sequence[str]=(),
oci_client: oc.Client=None,
):
if not oci_client:
oci_client = ccc.oci.oci_client()
# shortcut in case there are no filtering-rules
if not remove_files:
return oci.replicate_artifact(
src_image_reference=source_ref,
tgt_image_reference=target_ref,
oci_client=oci_client,
)
manifest = oci_client.manifest(image_reference=source_ref)
cp_cfg_blob = True
if isinstance(manifest, om.OciImageManifestV1):
logger.info(f'converting v1-manifest -> v2 {source_ref=} {target_ref=}')
manifest, cfg_blob = oconv.v1_manifest_to_v2(
manifest=manifest,
oci_client=oci_client,
tgt_image_ref=target_ref,
)
cp_cfg_blob = False # we synthesise new cfg - thus we cannot cp from src
elif not isinstance(manifest, om.OciImageManifest):
raise NotImplementedError(manifest)
# allow / ignore leading '/'
remove_files = [p.lstrip('/') for p in remove_files]
def tarmember_filter(tar_info: tarfile.TarInfo):
stripped_name = tar_info.name.lstrip('./')
if stripped_name in remove_files:
logger.debug(f'rm: {tar_info.name=}')
return False # rm member
return True # keep member
# prepare copy of layers to avoid modification while iterating
layers_copy = manifest.layers.copy()
for layer in manifest.layers:
layer_hash = hashlib.sha256()
leng = 0
# unfortunately, GCR (our most important oci-registry) does not support chunked uploads,
# so we have to resort to writing the streaming result into a local tempfile to be able
# to calculate digest-hash prior to upload to tgt; XXX: we might use streaming
# when interacting w/ oci-registries that support chunked-uploads
with tempfile.TemporaryFile() as f:
src_tar_stream = oci_client.blob(
image_reference=source_ref,
digest=layer.digest,
stream=True,
).iter_content(chunk_size=tarfile.BLOCKSIZE)
src_tar_fobj = tarutil._FilelikeProxy(generator=src_tar_stream)
filtered_stream = tarutil.filtered_tarfile_generator(
src_tf=tarfile.open(fileobj=src_tar_fobj, mode='r|*'),
filter_func=tarmember_filter,
)
for chunk in filtered_stream:
layer_hash.update(chunk)
leng += len(chunk) | image_reference=target_ref,
digest=(layer_digest := 'sha256:' + layer_hash.hexdigest()),
octets_count=leng,
data=f,
)
# update copy of layers-list with new layer
new_layer = dataclasses.replace(layer, digest=layer_digest, size=leng)
layers_copy[layers_copy.index(layer)] = new_layer
# switch layers in manifest to announce changes w/ manifest-upload
manifest.layers = layers_copy
# need to patch cfg-object, in case layer-digests changed
if cp_cfg_blob:
cfg_blob = oci_client.blob(
image_reference=source_ref,
digest=manifest.config.digest,
stream=False,
).json() # cfg-blobs are small - no point in streaming
if not 'rootfs' in cfg_blob:
raise ValueError('expected attr `rootfs` not present on cfg-blob')
else:
cfg_blob = json.loads(cfg_blob)
cfg_blob['rootfs'] = {
'diff_ids': [
layer.digest for layer in manifest.layers
],
'type': 'layers',
}
cfg_blob = json.dumps(cfg_blob).encode('utf-8')
cfg_digest = f'sha256:{hashlib.sha256(cfg_blob).hexdigest()}'
cfg_leng = len(cfg_blob)
oci_client.put_blob(
image_reference=target_ref,
digest=cfg_digest,
octets_count=cfg_leng,
data=cfg_blob,
)
manifest.config = dataclasses.replace(manifest.config, digest=cfg_digest, size=cfg_leng)
manifest_raw = json.dumps(dataclasses.asdict(manifest)).encode('utf-8')
oci_client.put_manifest(image_reference=target_ref, manifest=manifest_raw)
@deprecated.deprecated
def filter_container_image(
image_file,
out_file,
remove_entries,
):
ci.util.existing_file(image_file)
if not remove_entries:
raise ValueError('remove_entries must not be empty')
# allow absolute paths
remove_entries = [e.lstrip('/') for e in remove_entries]
with tarfile.open(image_file) as tf:
manifest = json.load(tf.extractfile('manifest.json'))
if not len(manifest) == 1:
raise NotImplementedError()
manifest = manifest[0]
cfg_name = manifest['Config']
with tarfile.open(image_file, 'r') as in_tf, tarfile.open(out_file, 'w') as out_tf:
_filter_files(
manifest=manifest,
cfg_name=cfg_name,
in_tarfile=in_tf,
out_tarfile=out_tf,
remove_entries=set(remove_entries),
)
@deprecated.deprecated
def _filter_files(
manifest,
cfg_name,
in_tarfile: tarfile.TarFile,
out_tarfile: tarfile.TarFile,
remove_entries,
):
layer_paths = set(manifest['Layers'])
changed_layer_hashes = [] # [(old, new),]
# copy everything that does not need to be patched
for tar_info in in_tarfile:
if not tar_info.isfile():
out_tarfile.addfile(tar_info)
continue
# cfg needs to be rewritten - so do not cp
if tar_info.name in (cfg_name, 'manifest.json'):
continue
fileobj = in_tarfile.extractfile(tar_info)
if tar_info.name not in layer_paths:
out_tarfile.addfile(tar_info, fileobj=fileobj)
continue
# assumption: layers are always tarfiles
# check if we need to patch
layer_tar = tarfile.open(fileobj=fileobj)
# normalise paths
layer_tar_paths = {
path.lstrip('./') for path in layer_tar.getnames()
}
have_match = bool(layer_tar_paths & remove_entries)
fileobj.seek(0)
if not have_match:
out_tarfile.addfile(tar_info, fileobj=fileobj)
else:
old_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while fileobj.peek():
old_hash.update(fileobj.read(2048))
fileobj.seek(0)
patched_tar, size = _filter_single_tar(
in_file=layer_tar,
remove_entries=remove_entries,
)
# patch tar_info to reduced size
tar_info.size = size
new_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while patched_tar.peek():
new_hash.update(patched_tar.read(2048))
patched_tar.seek(0)
out_tarfile.addfile(tar_info, fileobj=patched_tar)
logging.debug(f'patched: {tar_info.name}')
changed_layer_hashes.append((old_hash.hexdigest(), new_hash.hexdigest()))
# update cfg
cfg = json.load(in_tarfile.extractfile(cfg_name))
root_fs = cfg['rootfs']
if not root_fs['type'] == 'layers':
raise NotImplementedError()
# XXX hard-code hash algorithm (assume all entries are prefixed w/ sha256)
diff_ids = root_fs['diff_ids']
for old_hash, new_hash in changed_layer_hashes:
idx = diff_ids.index('sha256:' + old_hash)
diff_ids[idx] = 'sha256:' + new_hash
# hash cfg again (as its name is derived from its hash)
cfg_raw = json.dumps(cfg)
cfg_hash = hashlib.sha256(cfg_raw.encode('utf-8')).hexdigest()
cfg_name = cfg_hash + '.json'
# add cfg to resulting archive
# unfortunately, tarfile requires us to use a tempfile :-(
with tempfile.TemporaryFile() as tmp_fh:
tmp_fh.write(cfg_raw.encode('utf-8'))
cfg_size = tmp_fh.tell()
tmp_fh.seek(0)
cfg_info = tarfile.TarInfo(name=cfg_name)
cfg_info.type = tarfile.REGTYPE
cfg_info.size = cfg_size
out_tarfile.addfile(cfg_info, fileobj=tmp_fh)
# now new finally need to patch the manifest
manifest['Config'] = cfg_name
# wrap it in a list again
manifest = [manifest]
with tempfile.TemporaryFile() as fh:
manifest_raw = json.dumps(manifest)
fh.write(manifest_raw.encode('utf-8'))
size = fh.tell()
fh.seek(0)
manifest_info = tarfile.TarInfo(name='manifest.json')
manifest_info.type = tarfile.REGTYPE
manifest_info.size = size
out_tarfile.addfile(manifest_info, fh)
def _filter_single_tar(
in_file: tarfile.TarFile,
remove_entries,
):
temp_fh = tempfile.TemporaryFile()
temptar = tarfile.TarFile(fileobj=temp_fh, mode='w')
for tar_info in in_file:
if not tar_info.isfile():
temptar.addfile(tar_info)
continue
if tar_info.name.lstrip('./') in remove_entries:
logging.debug(f'purging entry: {tar_info.name}')
continue
# copy entry
entry = in_file.extractfile(tar_info)
temptar.addfile(tar_info, fileobj=entry)
size = temp_fh.tell()
temp_fh.flush()
temp_fh.seek(0)
return temp_fh, size | f.write(chunk)
f.seek(0)
oci_client.put_blob( | random_line_split |
util.py | # Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import hashlib
import json
import logging
import tarfile
import tempfile
import typing
import deprecated
import ccc.oci
import ci.util
import oci
import oci.client as oc
import oci.convert as oconv
import oci.model as om
import tarutil
logger = logging.getLogger(__name__)
def image_exists(image_reference: str):
oci_client = ccc.oci.oci_client()
return bool(oci_client.head_manifest(image_reference=image_reference, absent_ok=True))
def filter_image(
source_ref:str,
target_ref:str,
remove_files: typing.Sequence[str]=(),
oci_client: oc.Client=None,
):
if not oci_client:
oci_client = ccc.oci.oci_client()
# shortcut in case there are no filtering-rules
if not remove_files:
return oci.replicate_artifact(
src_image_reference=source_ref,
tgt_image_reference=target_ref,
oci_client=oci_client,
)
manifest = oci_client.manifest(image_reference=source_ref)
cp_cfg_blob = True
if isinstance(manifest, om.OciImageManifestV1):
logger.info(f'converting v1-manifest -> v2 {source_ref=} {target_ref=}')
manifest, cfg_blob = oconv.v1_manifest_to_v2(
manifest=manifest,
oci_client=oci_client,
tgt_image_ref=target_ref,
)
cp_cfg_blob = False # we synthesise new cfg - thus we cannot cp from src
elif not isinstance(manifest, om.OciImageManifest):
raise NotImplementedError(manifest)
# allow / ignore leading '/'
remove_files = [p.lstrip('/') for p in remove_files]
def tarmember_filter(tar_info: tarfile.TarInfo):
stripped_name = tar_info.name.lstrip('./')
if stripped_name in remove_files:
logger.debug(f'rm: {tar_info.name=}')
return False # rm member
return True # keep member
# prepare copy of layers to avoid modification while iterating
layers_copy = manifest.layers.copy()
for layer in manifest.layers:
layer_hash = hashlib.sha256()
leng = 0
# unfortunately, GCR (our most important oci-registry) does not support chunked uploads,
# so we have to resort to writing the streaming result into a local tempfile to be able
# to calculate digest-hash prior to upload to tgt; XXX: we might use streaming
# when interacting w/ oci-registries that support chunked-uploads
with tempfile.TemporaryFile() as f:
src_tar_stream = oci_client.blob(
image_reference=source_ref,
digest=layer.digest,
stream=True,
).iter_content(chunk_size=tarfile.BLOCKSIZE)
src_tar_fobj = tarutil._FilelikeProxy(generator=src_tar_stream)
filtered_stream = tarutil.filtered_tarfile_generator(
src_tf=tarfile.open(fileobj=src_tar_fobj, mode='r|*'),
filter_func=tarmember_filter,
)
for chunk in filtered_stream:
layer_hash.update(chunk)
leng += len(chunk)
f.write(chunk)
f.seek(0)
oci_client.put_blob(
image_reference=target_ref,
digest=(layer_digest := 'sha256:' + layer_hash.hexdigest()),
octets_count=leng,
data=f,
)
# update copy of layers-list with new layer
new_layer = dataclasses.replace(layer, digest=layer_digest, size=leng)
layers_copy[layers_copy.index(layer)] = new_layer
# switch layers in manifest to announce changes w/ manifest-upload
manifest.layers = layers_copy
# need to patch cfg-object, in case layer-digests changed
if cp_cfg_blob:
cfg_blob = oci_client.blob(
image_reference=source_ref,
digest=manifest.config.digest,
stream=False,
).json() # cfg-blobs are small - no point in streaming
if not 'rootfs' in cfg_blob:
raise ValueError('expected attr `rootfs` not present on cfg-blob')
else:
cfg_blob = json.loads(cfg_blob)
cfg_blob['rootfs'] = {
'diff_ids': [
layer.digest for layer in manifest.layers
],
'type': 'layers',
}
cfg_blob = json.dumps(cfg_blob).encode('utf-8')
cfg_digest = f'sha256:{hashlib.sha256(cfg_blob).hexdigest()}'
cfg_leng = len(cfg_blob)
oci_client.put_blob(
image_reference=target_ref,
digest=cfg_digest,
octets_count=cfg_leng,
data=cfg_blob,
)
manifest.config = dataclasses.replace(manifest.config, digest=cfg_digest, size=cfg_leng)
manifest_raw = json.dumps(dataclasses.asdict(manifest)).encode('utf-8')
oci_client.put_manifest(image_reference=target_ref, manifest=manifest_raw)
@deprecated.deprecated
def filter_container_image(
image_file,
out_file,
remove_entries,
):
ci.util.existing_file(image_file)
if not remove_entries:
raise ValueError('remove_entries must not be empty')
# allow absolute paths
remove_entries = [e.lstrip('/') for e in remove_entries]
with tarfile.open(image_file) as tf:
manifest = json.load(tf.extractfile('manifest.json'))
if not len(manifest) == 1:
raise NotImplementedError()
manifest = manifest[0]
cfg_name = manifest['Config']
with tarfile.open(image_file, 'r') as in_tf, tarfile.open(out_file, 'w') as out_tf:
_filter_files(
manifest=manifest,
cfg_name=cfg_name,
in_tarfile=in_tf,
out_tarfile=out_tf,
remove_entries=set(remove_entries),
)
@deprecated.deprecated
def _filter_files(
manifest,
cfg_name,
in_tarfile: tarfile.TarFile,
out_tarfile: tarfile.TarFile,
remove_entries,
):
layer_paths = set(manifest['Layers'])
changed_layer_hashes = [] # [(old, new),]
# copy everything that does not need to be patched
for tar_info in in_tarfile:
if not tar_info.isfile():
out_tarfile.addfile(tar_info)
continue
# cfg needs to be rewritten - so do not cp
if tar_info.name in (cfg_name, 'manifest.json'):
continue
fileobj = in_tarfile.extractfile(tar_info)
if tar_info.name not in layer_paths:
out_tarfile.addfile(tar_info, fileobj=fileobj)
continue
# assumption: layers are always tarfiles
# check if we need to patch
layer_tar = tarfile.open(fileobj=fileobj)
# normalise paths
layer_tar_paths = {
path.lstrip('./') for path in layer_tar.getnames()
}
have_match = bool(layer_tar_paths & remove_entries)
fileobj.seek(0)
if not have_match:
out_tarfile.addfile(tar_info, fileobj=fileobj)
else:
old_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while fileobj.peek():
old_hash.update(fileobj.read(2048))
fileobj.seek(0)
patched_tar, size = _filter_single_tar(
in_file=layer_tar,
remove_entries=remove_entries,
)
# patch tar_info to reduced size
tar_info.size = size
new_hash = hashlib.sha256() # XXX hard-code hash algorithm for now
while patched_tar.peek():
new_hash.update(patched_tar.read(2048))
patched_tar.seek(0)
out_tarfile.addfile(tar_info, fileobj=patched_tar)
logging.debug(f'patched: {tar_info.name}')
changed_layer_hashes.append((old_hash.hexdigest(), new_hash.hexdigest()))
# update cfg
cfg = json.load(in_tarfile.extractfile(cfg_name))
root_fs = cfg['rootfs']
if not root_fs['type'] == 'layers':
raise NotImplementedError()
# XXX hard-code hash algorithm (assume all entries are prefixed w/ sha256)
diff_ids = root_fs['diff_ids']
for old_hash, new_hash in changed_layer_hashes:
idx = diff_ids.index('sha256:' + old_hash)
diff_ids[idx] = 'sha256:' + new_hash
# hash cfg again (as its name is derived from its hash)
cfg_raw = json.dumps(cfg)
cfg_hash = hashlib.sha256(cfg_raw.encode('utf-8')).hexdigest()
cfg_name = cfg_hash + '.json'
# add cfg to resulting archive
# unfortunately, tarfile requires us to use a tempfile :-(
with tempfile.TemporaryFile() as tmp_fh:
tmp_fh.write(cfg_raw.encode('utf-8'))
cfg_size = tmp_fh.tell()
tmp_fh.seek(0)
cfg_info = tarfile.TarInfo(name=cfg_name)
cfg_info.type = tarfile.REGTYPE
cfg_info.size = cfg_size
out_tarfile.addfile(cfg_info, fileobj=tmp_fh)
# now new finally need to patch the manifest
manifest['Config'] = cfg_name
# wrap it in a list again
manifest = [manifest]
with tempfile.TemporaryFile() as fh:
manifest_raw = json.dumps(manifest)
fh.write(manifest_raw.encode('utf-8'))
size = fh.tell()
fh.seek(0)
manifest_info = tarfile.TarInfo(name='manifest.json')
manifest_info.type = tarfile.REGTYPE
manifest_info.size = size
out_tarfile.addfile(manifest_info, fh)
def _filter_single_tar(
in_file: tarfile.TarFile,
remove_entries,
):
temp_fh = tempfile.TemporaryFile()
temptar = tarfile.TarFile(fileobj=temp_fh, mode='w')
for tar_info in in_file:
if not tar_info.isfile():
|
if tar_info.name.lstrip('./') in remove_entries:
logging.debug(f'purging entry: {tar_info.name}')
continue
# copy entry
entry = in_file.extractfile(tar_info)
temptar.addfile(tar_info, fileobj=entry)
size = temp_fh.tell()
temp_fh.flush()
temp_fh.seek(0)
return temp_fh, size
| temptar.addfile(tar_info)
continue | conditional_block |
kibana.py | # -*- coding: utf-8 -*-
"""Kibana module."""
import pandas
import requests
import json
from . import util
class Kibana(object):
def __init__(self, host='localhost', port=5601, protocol='http',
verify_certs=True, **kwargs):
self._host = host
self._port = port
self._protocol = protocol
self._verify_certs = verify_certs
self._defaultIndexPatternUID = None
self._defaultSearchUID = None
def kibanaUrl(self, path=""):
# TODO maybe URLEncode path?
if path and path[0] != '/':
# TODO Warn about missing initial '/'?
path = '/'+path
return f"{self._protocol}://{self._host}:{self._port}{path}"
def alive(self, verbose=True):
resp = requests.head(self.kibanaUrl())
return resp.status_code == 200
def show_kibana(self, how=None, *args, **kwargs):
if how is None:
how = 'jupyter' if util.__IS_JUPYTER else 'webbrowser'
# TODO can we figure out "non-interactive" to put how='print' then?
how = how if isinstance(how, list) else [how]
url = self.kibanaUrl(*args, **kwargs)
if 'print' in how:
print(f"Open: {url}")
if 'webbrowser' in how:
import webbrowser
webbrowser.open(url)
if 'jupyter' in how or 'ipython' in how:
from IPython.core.display import HTML
return HTML(self._repr_html_())
def __repr__(self):
return f"Kibana on {self.kibanaUrl()}"
def _repr_html_(self):
return f"Kibana on <a href='{self.kibanaUrl()}'>{self.kibanaUrl()}</a>"
def getKibanaSavedObjects(self, type='index-pattern', search=None, fields=None):
type = '&type=' + type if type else ''
search = '&search=' + search if search else ''
fields = '&fields=' + fields if fields else ''
resp = requests.get(self.kibanaUrl(f'/api/saved_objects/_find?{type}{search}{fields}'))
resp.raise_for_status()
result = resp.json()['saved_objects']
return result
def postKibanaSavedObject(self, type, attributes, id=None):
body = { "attributes": attributes }
id = "/"+id if id else ""
result = requests.post(self.kibanaUrl(f'/api/saved_objects/{type}{id}?overwrite=true'), headers={"kbn-xsrf": "true"}, json=body)
result.raise_for_status()
# return result.json()
return result.json()['id'], result.json()
def deleteKibanaSavedObject(self, type, uid):
u = self.kibanaUrl(f'/api/saved_objects/{type}/{uid}')
resp = requests.delete(u, headers={"kbn-xsrf": "true"})
resp.raise_for_status
print(resp.json())
return resp.json()
def truncateKibanaSavedObjects(self, types=['dashboard','visualization','search','index-pattern'], search=None):
for t in types:
if search is not None and t=='index-pattern_________':
continue
objs = self.getKibanaSavedObjects(type=t, fields='name', search=search)
print(f'deleting {len(objs)} objects of type {t}...')
for i in objs:
# print(i['id'])
self.deleteKibanaSavedObject(t, i['id'])
print('finished deleting')
def getKibanaConfig(self, name=None, onlyLastSetValue=True, defaultValue=None):
assert onlyLastSetValue
config = self.getKibanaSavedObjects('config')
# TODO need to implement for onlyLastSetValue=False as well or warn if multiple values?
result = dict()
for i in config:
c = i['attributes']
result.update(c)
if name is not None:
if name in result:
return result[name]
else:
return defaultValue
return result
def addKibanaConfig(self, name, value, addToList=False, id=None):
assert not addToList
attributes = { name: value }
res = self.postKibanaSavedObject('config', attributes, id=id)
return res
def addKibanaIndexPattern(self, indexPattern, timeField=None, setDefaultIndexPattern=True, overwrite=False):
if not overwrite:
# TODO handle in postKibana URL parameter?
self.getKibanaSavedObjects('index-pattern', indexPattern)
for i in self.getKibanaSavedObjects('index-pattern',indexPattern,'title'):
if i['attributes']['title'] == indexPattern:
return
attributes = {
"title": indexPattern,
}
if timeField is not None:
attributes["timeFieldName"] = timeField
uid, result = self.postKibanaSavedObject('index-pattern', attributes)
if setDefaultIndexPattern:
self._defaultIndexPatternUID = uid
return uid, result
def addKibanaSearch(self, title, columns, description=None, sort=None, setDefaultSearch=True, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
searchSourceJSON = {
"index": indexPatternUID,
# "highlightAll": True,
# "version": True,
"query":{"query":"","language":"kuery"},
"filter":[]
}
attributes = {
"title": title, 'columns': columns,
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
}
if description is not None:
attributes['description'] = description
if sort is not None:
attributes['sort'] = sort
uid, res = self.postKibanaSavedObject(type='search', attributes=attributes)
if setDefaultSearch:
self._defaultSearchUID = uid
return uid, res
def addVisualization(self, title, viz, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
# visState = {
# 'aggs':[
# {'id': '1', 'schema':'metric', 'type': 'count'},
# {
# 'id': '2', 'schema':'segment', 'type': 'terms',
# 'params': {'field': field, 'size': size, 'order': 'desc', 'orderBy': '1', }
# }, | # },
# 'title': title,
# 'type': visType,
# }
assert isinstance(viz, Visualization)
visState = viz.visState(title)
searchSourceJSON = {
"index":indexPatternUID,
"filter":[],
"query":{"language":"kuery","query":""}
}
uid, res = self.postKibanaSavedObject('visualization', attributes={
'title': title, 'visState': json.dumps(visState), 'uiStateJSON': '{"vis":{"legendOpen":false}}',
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
})
return uid, res
def addDashboard(self, title, searchUID, visUIDs, timeFrom=None, timeTo=None, nVisCols=3, visW=16, visH=16, searchW=48, searchH=16):
panels = [{
'panelIndex': '1',
'gridData': {'x': 0, 'y': 0, 'w': searchW, 'h': searchH, 'i': '1'},
'version': '6.3.2',
'type': 'search',
'id': searchUID,
'embeddableConfig': {}
}]
for i, v in enumerate(visUIDs):
ix, iy = i % nVisCols, i // nVisCols
x, y = ix*visW , searchH + iy*visH
# print(ix,iy, x,y)
iStr = str(i+2)
panels.append({
'panelIndex': iStr,
'gridData': {'x': x, 'y': y, 'w': visW, 'h': visH, 'i': iStr},
'version': '6.3.2',
'type': 'visualization',
'id': v,
'embeddableConfig': {}
})
attributes = {
'title': title,
# 'hits': 0,
'description': '',
'panelsJSON': json.dumps(panels),
'optionsJSON': '{"darkTheme":false,"useMargins":true,"hidePanelTitles":false}',
# 'version': 1,
# 'refreshInterval': {'display': 'Off', 'pause': False, 'value': 0},
'kibanaSavedObjectMeta': {'searchSourceJSON': '{"query":{"query":"","language":"kuery"},"filter":[],"highlightAll":true,"version":true}'}
}
if timeFrom is not None and timeTo is not None:
attributes['timeRestore'] = True
attributes['timeTo'] = str(timeTo)
attributes['timeFrom'] = str(timeFrom)
uid, res = self.postKibanaSavedObject('dashboard',attributes)
return uid, res
def setup_kibana(self, index, timeField=None, searchCols=[], visCols=None, dashboard=True, timeFrom=None, timeTo=None, sets=True):
print(f'{index}: adding index-pattern')
ipUID, _ipRes = self.addKibanaIndexPattern(index, timeField, overwrite=True)
if self.getKibanaConfig('defaultIndex') is None:
# BUG the following is not really setting the defaultIndex as the Kibana UI see it...
print(f'{index}: setting default index-pattern')
self.addKibanaConfig('defaultIndex', ipUID)
print(f'{index}: adding search')
seUID, _seRes = self.addKibanaSearch(index+"-search", searchCols)
visUIDs = []
for i in visCols:
if isinstance(i, str):
i = HorizontalBar(i)
print(f'{index}: adding visualisation for {i.field}')
uid, _res = self.addVisualization(f'[{index}] {i.field}', i)
visUIDs.append(uid)
if dashboard:
print(f'{index}: adding dashboard')
daUID, _daRes = self.addDashboard(f'[{index}] Dashboard', seUID, visUIDs, timeFrom=timeFrom, timeTo=timeTo)
if sets:
print(f'{index}: setting time defaults')
self.set_kibana_timeDefaults(timeFrom, timeTo)
return {'index-patterh': ipUID, 'search': seUID, 'visualization': visUIDs, 'dashboard': daUID}
def set_kibana_timeDefaults(self, timeFrom="now-15m", timeTo='now', mode='quick'):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
# more configs on https://www.elastic.co/guide/en/kibana/current/advanced-options.html
# maybe also set the timepicker:quickRanges key to a list of interesting time ranges.
# value is a JSON, but as string.
# btw we need to escape the outer {} in the f'...' string
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = { "from": str(timeFrom), "to": str(timeTo), "mode": "{mode}" }
uid, res = self.addKibanaConfig("timepickerts", json.dumps(value))
return uid, res
def set_kibana_timeQuickRange(self, display, timeFrom, timeTo, section=3, id=None):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
if id is None:
result = requests.get(self.kibanaUrl('api/status'))
result.raise_for_status()
id = result.json()['version']['number']
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = [{"from": str(timeFrom), "to": str(timeTo), "display": display, "section": section}]
uid, res = self.addKibanaConfig("timepicker:quickRanges", json.dumps(value), id=id)
return uid, res
def show_kibana_jupyter(self, height=500):
# see e.g. https://github.com/tensorflow/tensorboard/blob/d9092143511cb04e4bfc904820305f1be45c67b3/tensorboard/notebook.py
from IPython.display import IFrame
url = self.kibanaUrl()
iframe = IFrame(src=url, height=500, width="100%")
return iframe
class Visualization(object):
def __init__(self, field, agg='count'):
self.field = field
self.agg = agg
self.visType = None
def visState(self, title):
visState = {
'aggs':[
{'id': '1', 'schema':'metric', 'type': self.agg},
self.agg2(),
],
'params': {
'type': self.visType
},
'title': title,
'type': self.visType,
}
return visState
def agg2(self):
raise NotImplementedError()
class HorizontalBar(Visualization):
def __init__(self, field, size=20):
super(HorizontalBar, self).__init__(field)
self.visType = 'horizontal_bar'
self.size = size
def agg2(self):
return {
'id': '2', 'schema':'segment', 'type': 'terms',
'params': {'field': self.field, 'size': self.size, 'order': 'desc', 'orderBy': '1', }
}
class TagCloud(HorizontalBar):
def __init__(self, field, size=20):
super(TagCloud, self).__init__(field, size)
self.visType = 'tagcloud'
self.params = {
"scale": "linear",
"orientation": "single",
"minFontSize": 18,
"maxFontSize": 72,
"showLabel": True
}
class Histogram(Visualization):
def __init__(self, field, interval):
super(Histogram, self).__init__(field)
self.visType = 'histogram'
self.interval = interval
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": self.interval,
"extended_bounds": {}
}
}
class DateHistogram(Visualization):
def __init__(self, field):
super(DateHistogram, self).__init__(field)
self.visType = 'histogram'
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "date_histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": "auto",
"customInterval": "2h",
"min_doc_count": 1,
"extended_bounds": {}
}
} | # ],
# 'params': {
# 'type': 'histogram' | random_line_split |
kibana.py | # -*- coding: utf-8 -*-
"""Kibana module."""
import pandas
import requests
import json
from . import util
class Kibana(object):
def __init__(self, host='localhost', port=5601, protocol='http',
verify_certs=True, **kwargs):
self._host = host
self._port = port
self._protocol = protocol
self._verify_certs = verify_certs
self._defaultIndexPatternUID = None
self._defaultSearchUID = None
def kibanaUrl(self, path=""):
# TODO maybe URLEncode path?
if path and path[0] != '/':
# TODO Warn about missing initial '/'?
path = '/'+path
return f"{self._protocol}://{self._host}:{self._port}{path}"
def alive(self, verbose=True):
resp = requests.head(self.kibanaUrl())
return resp.status_code == 200
def show_kibana(self, how=None, *args, **kwargs):
if how is None:
how = 'jupyter' if util.__IS_JUPYTER else 'webbrowser'
# TODO can we figure out "non-interactive" to put how='print' then?
how = how if isinstance(how, list) else [how]
url = self.kibanaUrl(*args, **kwargs)
if 'print' in how:
print(f"Open: {url}")
if 'webbrowser' in how:
import webbrowser
webbrowser.open(url)
if 'jupyter' in how or 'ipython' in how:
from IPython.core.display import HTML
return HTML(self._repr_html_())
def __repr__(self):
return f"Kibana on {self.kibanaUrl()}"
def _repr_html_(self):
return f"Kibana on <a href='{self.kibanaUrl()}'>{self.kibanaUrl()}</a>"
def getKibanaSavedObjects(self, type='index-pattern', search=None, fields=None):
type = '&type=' + type if type else ''
search = '&search=' + search if search else ''
fields = '&fields=' + fields if fields else ''
resp = requests.get(self.kibanaUrl(f'/api/saved_objects/_find?{type}{search}{fields}'))
resp.raise_for_status()
result = resp.json()['saved_objects']
return result
def postKibanaSavedObject(self, type, attributes, id=None):
body = { "attributes": attributes }
id = "/"+id if id else ""
result = requests.post(self.kibanaUrl(f'/api/saved_objects/{type}{id}?overwrite=true'), headers={"kbn-xsrf": "true"}, json=body)
result.raise_for_status()
# return result.json()
return result.json()['id'], result.json()
def deleteKibanaSavedObject(self, type, uid):
u = self.kibanaUrl(f'/api/saved_objects/{type}/{uid}')
resp = requests.delete(u, headers={"kbn-xsrf": "true"})
resp.raise_for_status
print(resp.json())
return resp.json()
def truncateKibanaSavedObjects(self, types=['dashboard','visualization','search','index-pattern'], search=None):
for t in types:
if search is not None and t=='index-pattern_________':
continue
objs = self.getKibanaSavedObjects(type=t, fields='name', search=search)
print(f'deleting {len(objs)} objects of type {t}...')
for i in objs:
# print(i['id'])
self.deleteKibanaSavedObject(t, i['id'])
print('finished deleting')
def getKibanaConfig(self, name=None, onlyLastSetValue=True, defaultValue=None):
assert onlyLastSetValue
config = self.getKibanaSavedObjects('config')
# TODO need to implement for onlyLastSetValue=False as well or warn if multiple values?
result = dict()
for i in config:
c = i['attributes']
result.update(c)
if name is not None:
if name in result:
return result[name]
else:
return defaultValue
return result
def addKibanaConfig(self, name, value, addToList=False, id=None):
assert not addToList
attributes = { name: value }
res = self.postKibanaSavedObject('config', attributes, id=id)
return res
def addKibanaIndexPattern(self, indexPattern, timeField=None, setDefaultIndexPattern=True, overwrite=False):
if not overwrite:
# TODO handle in postKibana URL parameter?
self.getKibanaSavedObjects('index-pattern', indexPattern)
for i in self.getKibanaSavedObjects('index-pattern',indexPattern,'title'):
if i['attributes']['title'] == indexPattern:
return
attributes = {
"title": indexPattern,
}
if timeField is not None:
attributes["timeFieldName"] = timeField
uid, result = self.postKibanaSavedObject('index-pattern', attributes)
if setDefaultIndexPattern:
self._defaultIndexPatternUID = uid
return uid, result
def addKibanaSearch(self, title, columns, description=None, sort=None, setDefaultSearch=True, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
searchSourceJSON = {
"index": indexPatternUID,
# "highlightAll": True,
# "version": True,
"query":{"query":"","language":"kuery"},
"filter":[]
}
attributes = {
"title": title, 'columns': columns,
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
}
if description is not None:
attributes['description'] = description
if sort is not None:
attributes['sort'] = sort
uid, res = self.postKibanaSavedObject(type='search', attributes=attributes)
if setDefaultSearch:
self._defaultSearchUID = uid
return uid, res
def addVisualization(self, title, viz, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
# visState = {
# 'aggs':[
# {'id': '1', 'schema':'metric', 'type': 'count'},
# {
# 'id': '2', 'schema':'segment', 'type': 'terms',
# 'params': {'field': field, 'size': size, 'order': 'desc', 'orderBy': '1', }
# },
# ],
# 'params': {
# 'type': 'histogram'
# },
# 'title': title,
# 'type': visType,
# }
assert isinstance(viz, Visualization)
visState = viz.visState(title)
searchSourceJSON = {
"index":indexPatternUID,
"filter":[],
"query":{"language":"kuery","query":""}
}
uid, res = self.postKibanaSavedObject('visualization', attributes={
'title': title, 'visState': json.dumps(visState), 'uiStateJSON': '{"vis":{"legendOpen":false}}',
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
})
return uid, res
def addDashboard(self, title, searchUID, visUIDs, timeFrom=None, timeTo=None, nVisCols=3, visW=16, visH=16, searchW=48, searchH=16):
panels = [{
'panelIndex': '1',
'gridData': {'x': 0, 'y': 0, 'w': searchW, 'h': searchH, 'i': '1'},
'version': '6.3.2',
'type': 'search',
'id': searchUID,
'embeddableConfig': {}
}]
for i, v in enumerate(visUIDs):
ix, iy = i % nVisCols, i // nVisCols
x, y = ix*visW , searchH + iy*visH
# print(ix,iy, x,y)
iStr = str(i+2)
panels.append({
'panelIndex': iStr,
'gridData': {'x': x, 'y': y, 'w': visW, 'h': visH, 'i': iStr},
'version': '6.3.2',
'type': 'visualization',
'id': v,
'embeddableConfig': {}
})
attributes = {
'title': title,
# 'hits': 0,
'description': '',
'panelsJSON': json.dumps(panels),
'optionsJSON': '{"darkTheme":false,"useMargins":true,"hidePanelTitles":false}',
# 'version': 1,
# 'refreshInterval': {'display': 'Off', 'pause': False, 'value': 0},
'kibanaSavedObjectMeta': {'searchSourceJSON': '{"query":{"query":"","language":"kuery"},"filter":[],"highlightAll":true,"version":true}'}
}
if timeFrom is not None and timeTo is not None:
attributes['timeRestore'] = True
attributes['timeTo'] = str(timeTo)
attributes['timeFrom'] = str(timeFrom)
uid, res = self.postKibanaSavedObject('dashboard',attributes)
return uid, res
def setup_kibana(self, index, timeField=None, searchCols=[], visCols=None, dashboard=True, timeFrom=None, timeTo=None, sets=True):
print(f'{index}: adding index-pattern')
ipUID, _ipRes = self.addKibanaIndexPattern(index, timeField, overwrite=True)
if self.getKibanaConfig('defaultIndex') is None:
# BUG the following is not really setting the defaultIndex as the Kibana UI see it...
print(f'{index}: setting default index-pattern')
self.addKibanaConfig('defaultIndex', ipUID)
print(f'{index}: adding search')
seUID, _seRes = self.addKibanaSearch(index+"-search", searchCols)
visUIDs = []
for i in visCols:
if isinstance(i, str):
i = HorizontalBar(i)
print(f'{index}: adding visualisation for {i.field}')
uid, _res = self.addVisualization(f'[{index}] {i.field}', i)
visUIDs.append(uid)
if dashboard:
print(f'{index}: adding dashboard')
daUID, _daRes = self.addDashboard(f'[{index}] Dashboard', seUID, visUIDs, timeFrom=timeFrom, timeTo=timeTo)
if sets:
print(f'{index}: setting time defaults')
self.set_kibana_timeDefaults(timeFrom, timeTo)
return {'index-patterh': ipUID, 'search': seUID, 'visualization': visUIDs, 'dashboard': daUID}
def set_kibana_timeDefaults(self, timeFrom="now-15m", timeTo='now', mode='quick'):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
# more configs on https://www.elastic.co/guide/en/kibana/current/advanced-options.html
# maybe also set the timepicker:quickRanges key to a list of interesting time ranges.
# value is a JSON, but as string.
# btw we need to escape the outer {} in the f'...' string
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = { "from": str(timeFrom), "to": str(timeTo), "mode": "{mode}" }
uid, res = self.addKibanaConfig("timepickerts", json.dumps(value))
return uid, res
def set_kibana_timeQuickRange(self, display, timeFrom, timeTo, section=3, id=None):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
if id is None:
result = requests.get(self.kibanaUrl('api/status'))
result.raise_for_status()
id = result.json()['version']['number']
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = [{"from": str(timeFrom), "to": str(timeTo), "display": display, "section": section}]
uid, res = self.addKibanaConfig("timepicker:quickRanges", json.dumps(value), id=id)
return uid, res
def show_kibana_jupyter(self, height=500):
# see e.g. https://github.com/tensorflow/tensorboard/blob/d9092143511cb04e4bfc904820305f1be45c67b3/tensorboard/notebook.py
from IPython.display import IFrame
url = self.kibanaUrl()
iframe = IFrame(src=url, height=500, width="100%")
return iframe
class Visualization(object):
def __init__(self, field, agg='count'):
self.field = field
self.agg = agg
self.visType = None
def visState(self, title):
visState = {
'aggs':[
{'id': '1', 'schema':'metric', 'type': self.agg},
self.agg2(),
],
'params': {
'type': self.visType
},
'title': title,
'type': self.visType,
}
return visState
def agg2(self):
raise NotImplementedError()
class HorizontalBar(Visualization):
def __init__(self, field, size=20):
super(HorizontalBar, self).__init__(field)
self.visType = 'horizontal_bar'
self.size = size
def agg2(self):
return {
'id': '2', 'schema':'segment', 'type': 'terms',
'params': {'field': self.field, 'size': self.size, 'order': 'desc', 'orderBy': '1', }
}
class TagCloud(HorizontalBar):
def __init__(self, field, size=20):
super(TagCloud, self).__init__(field, size)
self.visType = 'tagcloud'
self.params = {
"scale": "linear",
"orientation": "single",
"minFontSize": 18,
"maxFontSize": 72,
"showLabel": True
}
class Histogram(Visualization):
def __init__(self, field, interval):
|
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": self.interval,
"extended_bounds": {}
}
}
class DateHistogram(Visualization):
def __init__(self, field):
super(DateHistogram, self).__init__(field)
self.visType = 'histogram'
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "date_histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": "auto",
"customInterval": "2h",
"min_doc_count": 1,
"extended_bounds": {}
}
}
| super(Histogram, self).__init__(field)
self.visType = 'histogram'
self.interval = interval | identifier_body |
kibana.py | # -*- coding: utf-8 -*-
"""Kibana module."""
import pandas
import requests
import json
from . import util
class Kibana(object):
def __init__(self, host='localhost', port=5601, protocol='http',
verify_certs=True, **kwargs):
self._host = host
self._port = port
self._protocol = protocol
self._verify_certs = verify_certs
self._defaultIndexPatternUID = None
self._defaultSearchUID = None
def kibanaUrl(self, path=""):
# TODO maybe URLEncode path?
if path and path[0] != '/':
# TODO Warn about missing initial '/'?
path = '/'+path
return f"{self._protocol}://{self._host}:{self._port}{path}"
def alive(self, verbose=True):
resp = requests.head(self.kibanaUrl())
return resp.status_code == 200
def show_kibana(self, how=None, *args, **kwargs):
if how is None:
|
how = how if isinstance(how, list) else [how]
url = self.kibanaUrl(*args, **kwargs)
if 'print' in how:
print(f"Open: {url}")
if 'webbrowser' in how:
import webbrowser
webbrowser.open(url)
if 'jupyter' in how or 'ipython' in how:
from IPython.core.display import HTML
return HTML(self._repr_html_())
def __repr__(self):
return f"Kibana on {self.kibanaUrl()}"
def _repr_html_(self):
return f"Kibana on <a href='{self.kibanaUrl()}'>{self.kibanaUrl()}</a>"
def getKibanaSavedObjects(self, type='index-pattern', search=None, fields=None):
type = '&type=' + type if type else ''
search = '&search=' + search if search else ''
fields = '&fields=' + fields if fields else ''
resp = requests.get(self.kibanaUrl(f'/api/saved_objects/_find?{type}{search}{fields}'))
resp.raise_for_status()
result = resp.json()['saved_objects']
return result
def postKibanaSavedObject(self, type, attributes, id=None):
body = { "attributes": attributes }
id = "/"+id if id else ""
result = requests.post(self.kibanaUrl(f'/api/saved_objects/{type}{id}?overwrite=true'), headers={"kbn-xsrf": "true"}, json=body)
result.raise_for_status()
# return result.json()
return result.json()['id'], result.json()
def deleteKibanaSavedObject(self, type, uid):
u = self.kibanaUrl(f'/api/saved_objects/{type}/{uid}')
resp = requests.delete(u, headers={"kbn-xsrf": "true"})
resp.raise_for_status
print(resp.json())
return resp.json()
def truncateKibanaSavedObjects(self, types=['dashboard','visualization','search','index-pattern'], search=None):
for t in types:
if search is not None and t=='index-pattern_________':
continue
objs = self.getKibanaSavedObjects(type=t, fields='name', search=search)
print(f'deleting {len(objs)} objects of type {t}...')
for i in objs:
# print(i['id'])
self.deleteKibanaSavedObject(t, i['id'])
print('finished deleting')
def getKibanaConfig(self, name=None, onlyLastSetValue=True, defaultValue=None):
assert onlyLastSetValue
config = self.getKibanaSavedObjects('config')
# TODO need to implement for onlyLastSetValue=False as well or warn if multiple values?
result = dict()
for i in config:
c = i['attributes']
result.update(c)
if name is not None:
if name in result:
return result[name]
else:
return defaultValue
return result
def addKibanaConfig(self, name, value, addToList=False, id=None):
assert not addToList
attributes = { name: value }
res = self.postKibanaSavedObject('config', attributes, id=id)
return res
def addKibanaIndexPattern(self, indexPattern, timeField=None, setDefaultIndexPattern=True, overwrite=False):
if not overwrite:
# TODO handle in postKibana URL parameter?
self.getKibanaSavedObjects('index-pattern', indexPattern)
for i in self.getKibanaSavedObjects('index-pattern',indexPattern,'title'):
if i['attributes']['title'] == indexPattern:
return
attributes = {
"title": indexPattern,
}
if timeField is not None:
attributes["timeFieldName"] = timeField
uid, result = self.postKibanaSavedObject('index-pattern', attributes)
if setDefaultIndexPattern:
self._defaultIndexPatternUID = uid
return uid, result
def addKibanaSearch(self, title, columns, description=None, sort=None, setDefaultSearch=True, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
searchSourceJSON = {
"index": indexPatternUID,
# "highlightAll": True,
# "version": True,
"query":{"query":"","language":"kuery"},
"filter":[]
}
attributes = {
"title": title, 'columns': columns,
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
}
if description is not None:
attributes['description'] = description
if sort is not None:
attributes['sort'] = sort
uid, res = self.postKibanaSavedObject(type='search', attributes=attributes)
if setDefaultSearch:
self._defaultSearchUID = uid
return uid, res
def addVisualization(self, title, viz, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
# visState = {
# 'aggs':[
# {'id': '1', 'schema':'metric', 'type': 'count'},
# {
# 'id': '2', 'schema':'segment', 'type': 'terms',
# 'params': {'field': field, 'size': size, 'order': 'desc', 'orderBy': '1', }
# },
# ],
# 'params': {
# 'type': 'histogram'
# },
# 'title': title,
# 'type': visType,
# }
assert isinstance(viz, Visualization)
visState = viz.visState(title)
searchSourceJSON = {
"index":indexPatternUID,
"filter":[],
"query":{"language":"kuery","query":""}
}
uid, res = self.postKibanaSavedObject('visualization', attributes={
'title': title, 'visState': json.dumps(visState), 'uiStateJSON': '{"vis":{"legendOpen":false}}',
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
})
return uid, res
def addDashboard(self, title, searchUID, visUIDs, timeFrom=None, timeTo=None, nVisCols=3, visW=16, visH=16, searchW=48, searchH=16):
panels = [{
'panelIndex': '1',
'gridData': {'x': 0, 'y': 0, 'w': searchW, 'h': searchH, 'i': '1'},
'version': '6.3.2',
'type': 'search',
'id': searchUID,
'embeddableConfig': {}
}]
for i, v in enumerate(visUIDs):
ix, iy = i % nVisCols, i // nVisCols
x, y = ix*visW , searchH + iy*visH
# print(ix,iy, x,y)
iStr = str(i+2)
panels.append({
'panelIndex': iStr,
'gridData': {'x': x, 'y': y, 'w': visW, 'h': visH, 'i': iStr},
'version': '6.3.2',
'type': 'visualization',
'id': v,
'embeddableConfig': {}
})
attributes = {
'title': title,
# 'hits': 0,
'description': '',
'panelsJSON': json.dumps(panels),
'optionsJSON': '{"darkTheme":false,"useMargins":true,"hidePanelTitles":false}',
# 'version': 1,
# 'refreshInterval': {'display': 'Off', 'pause': False, 'value': 0},
'kibanaSavedObjectMeta': {'searchSourceJSON': '{"query":{"query":"","language":"kuery"},"filter":[],"highlightAll":true,"version":true}'}
}
if timeFrom is not None and timeTo is not None:
attributes['timeRestore'] = True
attributes['timeTo'] = str(timeTo)
attributes['timeFrom'] = str(timeFrom)
uid, res = self.postKibanaSavedObject('dashboard',attributes)
return uid, res
def setup_kibana(self, index, timeField=None, searchCols=[], visCols=None, dashboard=True, timeFrom=None, timeTo=None, sets=True):
print(f'{index}: adding index-pattern')
ipUID, _ipRes = self.addKibanaIndexPattern(index, timeField, overwrite=True)
if self.getKibanaConfig('defaultIndex') is None:
# BUG the following is not really setting the defaultIndex as the Kibana UI see it...
print(f'{index}: setting default index-pattern')
self.addKibanaConfig('defaultIndex', ipUID)
print(f'{index}: adding search')
seUID, _seRes = self.addKibanaSearch(index+"-search", searchCols)
visUIDs = []
for i in visCols:
if isinstance(i, str):
i = HorizontalBar(i)
print(f'{index}: adding visualisation for {i.field}')
uid, _res = self.addVisualization(f'[{index}] {i.field}', i)
visUIDs.append(uid)
if dashboard:
print(f'{index}: adding dashboard')
daUID, _daRes = self.addDashboard(f'[{index}] Dashboard', seUID, visUIDs, timeFrom=timeFrom, timeTo=timeTo)
if sets:
print(f'{index}: setting time defaults')
self.set_kibana_timeDefaults(timeFrom, timeTo)
return {'index-patterh': ipUID, 'search': seUID, 'visualization': visUIDs, 'dashboard': daUID}
def set_kibana_timeDefaults(self, timeFrom="now-15m", timeTo='now', mode='quick'):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
# more configs on https://www.elastic.co/guide/en/kibana/current/advanced-options.html
# maybe also set the timepicker:quickRanges key to a list of interesting time ranges.
# value is a JSON, but as string.
# btw we need to escape the outer {} in the f'...' string
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = { "from": str(timeFrom), "to": str(timeTo), "mode": "{mode}" }
uid, res = self.addKibanaConfig("timepickerts", json.dumps(value))
return uid, res
def set_kibana_timeQuickRange(self, display, timeFrom, timeTo, section=3, id=None):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
if id is None:
result = requests.get(self.kibanaUrl('api/status'))
result.raise_for_status()
id = result.json()['version']['number']
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = [{"from": str(timeFrom), "to": str(timeTo), "display": display, "section": section}]
uid, res = self.addKibanaConfig("timepicker:quickRanges", json.dumps(value), id=id)
return uid, res
def show_kibana_jupyter(self, height=500):
# see e.g. https://github.com/tensorflow/tensorboard/blob/d9092143511cb04e4bfc904820305f1be45c67b3/tensorboard/notebook.py
from IPython.display import IFrame
url = self.kibanaUrl()
iframe = IFrame(src=url, height=500, width="100%")
return iframe
class Visualization(object):
def __init__(self, field, agg='count'):
self.field = field
self.agg = agg
self.visType = None
def visState(self, title):
visState = {
'aggs':[
{'id': '1', 'schema':'metric', 'type': self.agg},
self.agg2(),
],
'params': {
'type': self.visType
},
'title': title,
'type': self.visType,
}
return visState
def agg2(self):
raise NotImplementedError()
class HorizontalBar(Visualization):
def __init__(self, field, size=20):
super(HorizontalBar, self).__init__(field)
self.visType = 'horizontal_bar'
self.size = size
def agg2(self):
return {
'id': '2', 'schema':'segment', 'type': 'terms',
'params': {'field': self.field, 'size': self.size, 'order': 'desc', 'orderBy': '1', }
}
class TagCloud(HorizontalBar):
def __init__(self, field, size=20):
super(TagCloud, self).__init__(field, size)
self.visType = 'tagcloud'
self.params = {
"scale": "linear",
"orientation": "single",
"minFontSize": 18,
"maxFontSize": 72,
"showLabel": True
}
class Histogram(Visualization):
def __init__(self, field, interval):
super(Histogram, self).__init__(field)
self.visType = 'histogram'
self.interval = interval
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": self.interval,
"extended_bounds": {}
}
}
class DateHistogram(Visualization):
def __init__(self, field):
super(DateHistogram, self).__init__(field)
self.visType = 'histogram'
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "date_histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": "auto",
"customInterval": "2h",
"min_doc_count": 1,
"extended_bounds": {}
}
}
| how = 'jupyter' if util.__IS_JUPYTER else 'webbrowser'
# TODO can we figure out "non-interactive" to put how='print' then? | conditional_block |
kibana.py | # -*- coding: utf-8 -*-
"""Kibana module."""
import pandas
import requests
import json
from . import util
class Kibana(object):
def __init__(self, host='localhost', port=5601, protocol='http',
verify_certs=True, **kwargs):
self._host = host
self._port = port
self._protocol = protocol
self._verify_certs = verify_certs
self._defaultIndexPatternUID = None
self._defaultSearchUID = None
def kibanaUrl(self, path=""):
# TODO maybe URLEncode path?
if path and path[0] != '/':
# TODO Warn about missing initial '/'?
path = '/'+path
return f"{self._protocol}://{self._host}:{self._port}{path}"
def alive(self, verbose=True):
resp = requests.head(self.kibanaUrl())
return resp.status_code == 200
def show_kibana(self, how=None, *args, **kwargs):
if how is None:
how = 'jupyter' if util.__IS_JUPYTER else 'webbrowser'
# TODO can we figure out "non-interactive" to put how='print' then?
how = how if isinstance(how, list) else [how]
url = self.kibanaUrl(*args, **kwargs)
if 'print' in how:
print(f"Open: {url}")
if 'webbrowser' in how:
import webbrowser
webbrowser.open(url)
if 'jupyter' in how or 'ipython' in how:
from IPython.core.display import HTML
return HTML(self._repr_html_())
def __repr__(self):
return f"Kibana on {self.kibanaUrl()}"
def _repr_html_(self):
return f"Kibana on <a href='{self.kibanaUrl()}'>{self.kibanaUrl()}</a>"
def getKibanaSavedObjects(self, type='index-pattern', search=None, fields=None):
type = '&type=' + type if type else ''
search = '&search=' + search if search else ''
fields = '&fields=' + fields if fields else ''
resp = requests.get(self.kibanaUrl(f'/api/saved_objects/_find?{type}{search}{fields}'))
resp.raise_for_status()
result = resp.json()['saved_objects']
return result
def postKibanaSavedObject(self, type, attributes, id=None):
body = { "attributes": attributes }
id = "/"+id if id else ""
result = requests.post(self.kibanaUrl(f'/api/saved_objects/{type}{id}?overwrite=true'), headers={"kbn-xsrf": "true"}, json=body)
result.raise_for_status()
# return result.json()
return result.json()['id'], result.json()
def deleteKibanaSavedObject(self, type, uid):
u = self.kibanaUrl(f'/api/saved_objects/{type}/{uid}')
resp = requests.delete(u, headers={"kbn-xsrf": "true"})
resp.raise_for_status
print(resp.json())
return resp.json()
def truncateKibanaSavedObjects(self, types=['dashboard','visualization','search','index-pattern'], search=None):
for t in types:
if search is not None and t=='index-pattern_________':
continue
objs = self.getKibanaSavedObjects(type=t, fields='name', search=search)
print(f'deleting {len(objs)} objects of type {t}...')
for i in objs:
# print(i['id'])
self.deleteKibanaSavedObject(t, i['id'])
print('finished deleting')
def getKibanaConfig(self, name=None, onlyLastSetValue=True, defaultValue=None):
assert onlyLastSetValue
config = self.getKibanaSavedObjects('config')
# TODO need to implement for onlyLastSetValue=False as well or warn if multiple values?
result = dict()
for i in config:
c = i['attributes']
result.update(c)
if name is not None:
if name in result:
return result[name]
else:
return defaultValue
return result
def addKibanaConfig(self, name, value, addToList=False, id=None):
assert not addToList
attributes = { name: value }
res = self.postKibanaSavedObject('config', attributes, id=id)
return res
def addKibanaIndexPattern(self, indexPattern, timeField=None, setDefaultIndexPattern=True, overwrite=False):
if not overwrite:
# TODO handle in postKibana URL parameter?
self.getKibanaSavedObjects('index-pattern', indexPattern)
for i in self.getKibanaSavedObjects('index-pattern',indexPattern,'title'):
if i['attributes']['title'] == indexPattern:
return
attributes = {
"title": indexPattern,
}
if timeField is not None:
attributes["timeFieldName"] = timeField
uid, result = self.postKibanaSavedObject('index-pattern', attributes)
if setDefaultIndexPattern:
self._defaultIndexPatternUID = uid
return uid, result
def addKibanaSearch(self, title, columns, description=None, sort=None, setDefaultSearch=True, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
searchSourceJSON = {
"index": indexPatternUID,
# "highlightAll": True,
# "version": True,
"query":{"query":"","language":"kuery"},
"filter":[]
}
attributes = {
"title": title, 'columns': columns,
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
}
if description is not None:
attributes['description'] = description
if sort is not None:
attributes['sort'] = sort
uid, res = self.postKibanaSavedObject(type='search', attributes=attributes)
if setDefaultSearch:
self._defaultSearchUID = uid
return uid, res
def addVisualization(self, title, viz, indexPatternUID=None):
if indexPatternUID is None:
indexPatternUID = self._defaultIndexPatternUID
# visState = {
# 'aggs':[
# {'id': '1', 'schema':'metric', 'type': 'count'},
# {
# 'id': '2', 'schema':'segment', 'type': 'terms',
# 'params': {'field': field, 'size': size, 'order': 'desc', 'orderBy': '1', }
# },
# ],
# 'params': {
# 'type': 'histogram'
# },
# 'title': title,
# 'type': visType,
# }
assert isinstance(viz, Visualization)
visState = viz.visState(title)
searchSourceJSON = {
"index":indexPatternUID,
"filter":[],
"query":{"language":"kuery","query":""}
}
uid, res = self.postKibanaSavedObject('visualization', attributes={
'title': title, 'visState': json.dumps(visState), 'uiStateJSON': '{"vis":{"legendOpen":false}}',
'kibanaSavedObjectMeta': {'searchSourceJSON': json.dumps(searchSourceJSON)}
})
return uid, res
def addDashboard(self, title, searchUID, visUIDs, timeFrom=None, timeTo=None, nVisCols=3, visW=16, visH=16, searchW=48, searchH=16):
panels = [{
'panelIndex': '1',
'gridData': {'x': 0, 'y': 0, 'w': searchW, 'h': searchH, 'i': '1'},
'version': '6.3.2',
'type': 'search',
'id': searchUID,
'embeddableConfig': {}
}]
for i, v in enumerate(visUIDs):
ix, iy = i % nVisCols, i // nVisCols
x, y = ix*visW , searchH + iy*visH
# print(ix,iy, x,y)
iStr = str(i+2)
panels.append({
'panelIndex': iStr,
'gridData': {'x': x, 'y': y, 'w': visW, 'h': visH, 'i': iStr},
'version': '6.3.2',
'type': 'visualization',
'id': v,
'embeddableConfig': {}
})
attributes = {
'title': title,
# 'hits': 0,
'description': '',
'panelsJSON': json.dumps(panels),
'optionsJSON': '{"darkTheme":false,"useMargins":true,"hidePanelTitles":false}',
# 'version': 1,
# 'refreshInterval': {'display': 'Off', 'pause': False, 'value': 0},
'kibanaSavedObjectMeta': {'searchSourceJSON': '{"query":{"query":"","language":"kuery"},"filter":[],"highlightAll":true,"version":true}'}
}
if timeFrom is not None and timeTo is not None:
attributes['timeRestore'] = True
attributes['timeTo'] = str(timeTo)
attributes['timeFrom'] = str(timeFrom)
uid, res = self.postKibanaSavedObject('dashboard',attributes)
return uid, res
def setup_kibana(self, index, timeField=None, searchCols=[], visCols=None, dashboard=True, timeFrom=None, timeTo=None, sets=True):
print(f'{index}: adding index-pattern')
ipUID, _ipRes = self.addKibanaIndexPattern(index, timeField, overwrite=True)
if self.getKibanaConfig('defaultIndex') is None:
# BUG the following is not really setting the defaultIndex as the Kibana UI see it...
print(f'{index}: setting default index-pattern')
self.addKibanaConfig('defaultIndex', ipUID)
print(f'{index}: adding search')
seUID, _seRes = self.addKibanaSearch(index+"-search", searchCols)
visUIDs = []
for i in visCols:
if isinstance(i, str):
i = HorizontalBar(i)
print(f'{index}: adding visualisation for {i.field}')
uid, _res = self.addVisualization(f'[{index}] {i.field}', i)
visUIDs.append(uid)
if dashboard:
print(f'{index}: adding dashboard')
daUID, _daRes = self.addDashboard(f'[{index}] Dashboard', seUID, visUIDs, timeFrom=timeFrom, timeTo=timeTo)
if sets:
print(f'{index}: setting time defaults')
self.set_kibana_timeDefaults(timeFrom, timeTo)
return {'index-patterh': ipUID, 'search': seUID, 'visualization': visUIDs, 'dashboard': daUID}
def set_kibana_timeDefaults(self, timeFrom="now-15m", timeTo='now', mode='quick'):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
# more configs on https://www.elastic.co/guide/en/kibana/current/advanced-options.html
# maybe also set the timepicker:quickRanges key to a list of interesting time ranges.
# value is a JSON, but as string.
# btw we need to escape the outer {} in the f'...' string
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = { "from": str(timeFrom), "to": str(timeTo), "mode": "{mode}" }
uid, res = self.addKibanaConfig("timepickerts", json.dumps(value))
return uid, res
def set_kibana_timeQuickRange(self, display, timeFrom, timeTo, section=3, id=None):
'''
For accepted formats see https://www.elastic.co/guide/en/elasticsearch/reference/6.7/common-options.html#date-math
'''
if id is None:
result = requests.get(self.kibanaUrl('api/status'))
result.raise_for_status()
id = result.json()['version']['number']
timeFrom = timeFrom or 'now-15m'
timeTo = timeTo or 'now'
value = [{"from": str(timeFrom), "to": str(timeTo), "display": display, "section": section}]
uid, res = self.addKibanaConfig("timepicker:quickRanges", json.dumps(value), id=id)
return uid, res
def show_kibana_jupyter(self, height=500):
# see e.g. https://github.com/tensorflow/tensorboard/blob/d9092143511cb04e4bfc904820305f1be45c67b3/tensorboard/notebook.py
from IPython.display import IFrame
url = self.kibanaUrl()
iframe = IFrame(src=url, height=500, width="100%")
return iframe
class Visualization(object):
def __init__(self, field, agg='count'):
self.field = field
self.agg = agg
self.visType = None
def visState(self, title):
visState = {
'aggs':[
{'id': '1', 'schema':'metric', 'type': self.agg},
self.agg2(),
],
'params': {
'type': self.visType
},
'title': title,
'type': self.visType,
}
return visState
def agg2(self):
raise NotImplementedError()
class HorizontalBar(Visualization):
def __init__(self, field, size=20):
super(HorizontalBar, self).__init__(field)
self.visType = 'horizontal_bar'
self.size = size
def agg2(self):
return {
'id': '2', 'schema':'segment', 'type': 'terms',
'params': {'field': self.field, 'size': self.size, 'order': 'desc', 'orderBy': '1', }
}
class | (HorizontalBar):
def __init__(self, field, size=20):
super(TagCloud, self).__init__(field, size)
self.visType = 'tagcloud'
self.params = {
"scale": "linear",
"orientation": "single",
"minFontSize": 18,
"maxFontSize": 72,
"showLabel": True
}
class Histogram(Visualization):
def __init__(self, field, interval):
super(Histogram, self).__init__(field)
self.visType = 'histogram'
self.interval = interval
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": self.interval,
"extended_bounds": {}
}
}
class DateHistogram(Visualization):
def __init__(self, field):
super(DateHistogram, self).__init__(field)
self.visType = 'histogram'
def agg2(self):
return {
"id": "2",
"enabled": True,
"type": "date_histogram",
"schema": "segment",
"params": {
"field": self.field,
"interval": "auto",
"customInterval": "2h",
"min_doc_count": 1,
"extended_bounds": {}
}
}
| TagCloud | identifier_name |
mod.rs | //! Code to compute example inputs given a backtrace.
use crate::grammar::repr::*;
use crate::message::builder::InlineBuilder;
use crate::message::Content;
use crate::style::Style;
use crate::tls::Tls;
use ascii_canvas::AsciiView;
use std::{
cmp::Ordering,
fmt::{Debug, Error, Formatter},
};
#[cfg(test)]
mod test;
/// An "example" input and the way it was derived. This can be
/// serialized into useful text. For example, it might represent
/// something like this:
///
/// ```
/// Looking at
/// |
/// v
/// Ty "->" Ty "->" Ty
/// | | |
/// +-Ty-----+ |
/// | |
/// +-Ty-------------+
/// ```
///
/// The top-line is the `symbols` vector. The groupings below are
/// stored in the `reductions` vector, in order from smallest to
/// largest (they are always properly nested). The `cursor` field
/// indicates the current lookahead token.
///
/// The `symbols` vector is actually `Option<Symbol>` to account
/// for empty reductions:
///
/// ```
/// A B
/// | | | |
/// | +-Y-+ |
/// +-Z-----+
/// ```
///
/// The "empty space" between A and B would be represented as `None`.
#[derive(Clone, Debug)]
pub struct Example {
pub symbols: Vec<ExampleSymbol>,
pub cursor: usize,
pub reductions: Vec<Reduction>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ExampleSymbol {
Symbol(Symbol),
Epsilon,
}
#[derive(Copy, Clone, Default)]
pub struct ExampleStyles {
pub before_cursor: Style,
pub on_cursor: Style,
pub after_cursor: Style,
}
#[derive(Clone, Debug)]
pub struct Reduction {
pub start: usize,
pub end: usize,
pub nonterminal: NonterminalString,
}
impl Example {
/// Length of each symbol. Each will need *at least* that amount
/// of space. :) Measure in characters, under the assumption of a
/// mono-spaced font. Also add a final `0` marker which will serve
/// as the end position.
fn lengths(&self) -> Vec<usize> {
self.symbols
.iter()
.map(|s| match *s {
ExampleSymbol::Symbol(ref s) => format!("{}", s).chars().count(),
ExampleSymbol::Epsilon => 1, // display as " "
})
.chain(Some(0))
.collect()
}
/// Extract a prefix of the list of symbols from this `Example`
/// and make a styled list of them, like:
///
/// Ty "->" Ty -> "Ty"
pub fn to_symbol_list(&self, length: usize, styles: ExampleStyles) -> Box<dyn Content> {
let mut builder = InlineBuilder::new().begin_spaced();
for (index, symbol) in self.symbols[..length].iter().enumerate() {
let style = match index.cmp(&self.cursor) {
Ordering::Less => styles.before_cursor,
Ordering::Equal => match *symbol {
ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor,
ExampleSymbol::Symbol(Symbol::Nonterminal(_)) => styles.after_cursor,
ExampleSymbol::Epsilon => styles.after_cursor,
},
Ordering::Greater => styles.after_cursor,
};
if let ExampleSymbol::Symbol(ref s) = symbol {
builder = builder.push(s.clone()).styled(style);
}
}
builder.end().indented().end()
}
/// Render the example into a styled diagram suitable for
/// embedding in an error message.
pub fn into_picture(self, styles: ExampleStyles) -> Box<dyn Content> |
fn starting_positions(&self, lengths: &[usize]) -> Vec<usize> {
lengths
.iter()
.scan(0, |counter, &len| {
let start = *counter;
// Leave space for "NT " (if "NT" is the name
// of the nonterminal).
*counter = start + len + 1;
Some(start)
})
.collect()
}
/// Start index where each symbol in the example should appear,
/// measured in characters. These are spaced to leave enough room
/// for the reductions below.
fn positions(&self, lengths: &[usize]) -> Vec<usize> {
// Initially, position each symbol with one space in between,
// like:
//
// X Y Z
let mut positions = self.starting_positions(lengths);
// Adjust spacing to account for the nonterminal labels
// we will have to add. It will display
// like this:
//
// A1 B2 C3 D4 E5 F6
// | |
// +-Label---+
//
// But if the label is long we may have to adjust the spacing
// of the covered items (here, we changed them to two spaces,
// except the first gap, which got 3 spaces):
//
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
for &Reduction {
start,
end,
ref nonterminal,
} in &self.reductions
{
let nt_len = format!("{}", nonterminal).chars().count();
// Number of symbols we are reducing. This should always
// be non-zero because even in the case of a \epsilon
// rule, we ought to be have a `None` entry in the symbol array.
let num_syms = end - start;
assert!(num_syms > 0);
// Let's use the expansion from above as our running example.
// We start out with positions like this:
//
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
//
// But we want LongLabel to end at D4. No good.
// Start of first symbol to be reduced. Here, 0.
//
// A1 B2 C3 D4
// ^ here
let start_position = positions[start];
// End of last symbol to be reduced. Here, 11.
//
// A1 B2 C3 D4 E5
// ^ positions[end]
// ^ here -- positions[end] - 1
let end_position = positions[end] - 1;
// We need space to draw `+-Label-+` between
// start_position and end_position.
let required_len = nt_len + 4; // here, 15
let actual_len = end_position - start_position; // here, 10
if required_len < actual_len {
continue; // Got enough space, all set.
}
// Have to add `difference` characters altogether.
let difference = required_len - actual_len; // here, 4
// Increment over everything that is not part of this nonterminal.
// In the example above, that is E5 and F6.
shift(&mut positions[end..], difference);
if num_syms > 1 {
// If there is just one symbol being reduced here,
// then we have shifted over the things that follow
// it, and we are done. This would be a case like:
//
// X Y Z
// | |
// +-Label-+
//
// (which maybe ought to be rendered slightly
// differently).
//
// But if there are multiple symbols, we're not quite
// done, because there would be an unsightly gap:
//
// (gaps)
// | | |
// v v v
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
//
// we'd like to make things line up, so we have to
// distribute that extra space internally by
// increasing the "gaps" (marked above) as evenly as
// possible (basically, full justification).
//
// We do this by dividing up the spaces evenly and
// then taking the remainder `N` and distributing 1
// extra to the first N.
let num_gaps = num_syms - 1; // number of gaps we can adjust. Here, 3.
let amount = difference / num_gaps; // what to add to each gap. Here, 1.
let extra = difference % num_gaps; // the remainder. Here, 1.
// For the first `extra` symbols, give them amount + 1
// extra space. After that, just amount. (O(n^2). Sue me.)
for i in 0..extra {
shift(&mut positions[start + 1 + i..end], amount + 1);
}
for i in extra..num_gaps {
shift(&mut positions[start + 1 + i..end], amount);
}
}
}
positions
}
#[cfg(test)]
pub fn paint_unstyled(&self) -> Vec<::ascii_canvas::Row> {
let this = self.clone();
let content = this.into_picture(ExampleStyles::default());
let min_width = content.min_width();
let canvas = content.emit_to_canvas(min_width);
canvas.to_strings()
}
fn paint_on(&self, styles: &ExampleStyles, positions: &[usize], view: &mut dyn AsciiView) {
// Draw the brackets for each reduction:
for (index, reduction) in self.reductions.iter().enumerate() {
let start_column = positions[reduction.start];
let end_column = positions[reduction.end] - 1;
let row = 1 + index;
view.draw_vertical_line(0..row + 1, start_column);
view.draw_vertical_line(0..row + 1, end_column - 1);
view.draw_horizontal_line(row, start_column..end_column);
}
// Write the labels for each reduction. Do this after the
// brackets so that ascii canvas can convert `|` to `+`
// without interfering with the text (in case of weird overlap).
let session = Tls::session();
for (index, reduction) in self.reductions.iter().enumerate() {
let column = positions[reduction.start] + 2;
let row = 1 + index;
view.write_chars(
row,
column,
reduction.nonterminal.to_string().chars(),
session.nonterminal_symbol,
);
}
// Write the labels on top:
// A1 B2 C3 D4 E5 F6
self.paint_symbols_on(&self.symbols, positions, styles, view);
}
fn paint_symbols_on(
&self,
symbols: &[ExampleSymbol],
positions: &[usize],
styles: &ExampleStyles,
view: &mut dyn AsciiView,
) {
let session = Tls::session();
for (index, ex_symbol) in symbols.iter().enumerate() {
let style = match index.cmp(&self.cursor) {
Ordering::Less => styles.before_cursor,
Ordering::Equal => {
// Only display actual terminals in the "on-cursor"
// font, because it might be misleading to show a
// nonterminal that way. Really it'd be nice to expand
// so that the cursor is always a terminal.
match *ex_symbol {
ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor,
_ => styles.after_cursor,
}
}
Ordering::Greater => styles.after_cursor,
};
let column = positions[index];
match *ex_symbol {
ExampleSymbol::Symbol(Symbol::Terminal(ref term)) => {
view.write_chars(
0,
column,
term.to_string().chars(),
style.with(session.terminal_symbol),
);
}
ExampleSymbol::Symbol(Symbol::Nonterminal(ref nt)) => {
view.write_chars(
0,
column,
nt.to_string().chars(),
style.with(session.nonterminal_symbol),
);
}
ExampleSymbol::Epsilon => {}
}
}
}
}
struct ExamplePicture {
example: Example,
positions: Vec<usize>,
styles: ExampleStyles,
}
impl Content for ExamplePicture {
fn min_width(&self) -> usize {
*self.positions.last().unwrap()
}
fn emit(&self, view: &mut dyn AsciiView) {
self.example.paint_on(&self.styles, &self.positions, view);
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<dyn Content>>) {
wrap_items.push(self);
}
}
impl Debug for ExamplePicture {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
Debug::fmt(&self.example, fmt)
}
}
fn shift(positions: &mut [usize], amount: usize) {
for position in positions {
*position += amount;
}
}
impl ExampleStyles {
pub fn ambig() -> Self {
let session = Tls::session();
ExampleStyles {
before_cursor: session.ambig_symbols,
on_cursor: session.ambig_symbols,
after_cursor: session.ambig_symbols,
}
}
pub fn new() -> Self {
let session = Tls::session();
ExampleStyles {
before_cursor: session.observed_symbols,
on_cursor: session.cursor_symbol,
after_cursor: session.unobserved_symbols,
}
}
}
| {
let lengths = self.lengths();
let positions = self.positions(&lengths);
InlineBuilder::new()
.push(Box::new(ExamplePicture {
example: self,
positions,
styles,
}))
.indented()
.end()
} | identifier_body |
mod.rs | //! Code to compute example inputs given a backtrace.
use crate::grammar::repr::*;
use crate::message::builder::InlineBuilder;
use crate::message::Content;
use crate::style::Style;
use crate::tls::Tls;
use ascii_canvas::AsciiView;
use std::{
cmp::Ordering,
fmt::{Debug, Error, Formatter},
};
#[cfg(test)]
mod test;
/// An "example" input and the way it was derived. This can be
/// serialized into useful text. For example, it might represent
/// something like this:
///
/// ```
/// Looking at
/// |
/// v
/// Ty "->" Ty "->" Ty
/// | | |
/// +-Ty-----+ |
/// | |
/// +-Ty-------------+
/// ```
///
/// The top-line is the `symbols` vector. The groupings below are
/// stored in the `reductions` vector, in order from smallest to
/// largest (they are always properly nested). The `cursor` field
/// indicates the current lookahead token.
///
/// The `symbols` vector is actually `Option<Symbol>` to account
/// for empty reductions:
///
/// ```
/// A B
/// | | | |
/// | +-Y-+ |
/// +-Z-----+
/// ```
///
/// The "empty space" between A and B would be represented as `None`.
#[derive(Clone, Debug)]
pub struct Example {
pub symbols: Vec<ExampleSymbol>,
pub cursor: usize,
pub reductions: Vec<Reduction>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ExampleSymbol {
Symbol(Symbol),
Epsilon,
}
#[derive(Copy, Clone, Default)]
pub struct ExampleStyles {
pub before_cursor: Style,
pub on_cursor: Style,
pub after_cursor: Style,
}
#[derive(Clone, Debug)]
pub struct Reduction {
pub start: usize,
pub end: usize,
pub nonterminal: NonterminalString,
}
impl Example {
/// Length of each symbol. Each will need *at least* that amount
/// of space. :) Measure in characters, under the assumption of a
/// mono-spaced font. Also add a final `0` marker which will serve
/// as the end position.
fn lengths(&self) -> Vec<usize> {
self.symbols
.iter()
.map(|s| match *s {
ExampleSymbol::Symbol(ref s) => format!("{}", s).chars().count(),
ExampleSymbol::Epsilon => 1, // display as " "
})
.chain(Some(0))
.collect()
}
/// Extract a prefix of the list of symbols from this `Example`
/// and make a styled list of them, like:
///
/// Ty "->" Ty -> "Ty"
pub fn to_symbol_list(&self, length: usize, styles: ExampleStyles) -> Box<dyn Content> {
let mut builder = InlineBuilder::new().begin_spaced();
for (index, symbol) in self.symbols[..length].iter().enumerate() {
let style = match index.cmp(&self.cursor) {
Ordering::Less => styles.before_cursor,
Ordering::Equal => match *symbol {
ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor,
ExampleSymbol::Symbol(Symbol::Nonterminal(_)) => styles.after_cursor,
ExampleSymbol::Epsilon => styles.after_cursor,
},
Ordering::Greater => styles.after_cursor,
};
if let ExampleSymbol::Symbol(ref s) = symbol {
builder = builder.push(s.clone()).styled(style);
}
}
builder.end().indented().end()
}
/// Render the example into a styled diagram suitable for
/// embedding in an error message.
pub fn into_picture(self, styles: ExampleStyles) -> Box<dyn Content> {
let lengths = self.lengths();
let positions = self.positions(&lengths);
InlineBuilder::new()
.push(Box::new(ExamplePicture {
example: self,
positions,
styles,
}))
.indented()
.end()
}
fn starting_positions(&self, lengths: &[usize]) -> Vec<usize> {
lengths
.iter()
.scan(0, |counter, &len| {
let start = *counter;
// Leave space for "NT " (if "NT" is the name
// of the nonterminal).
*counter = start + len + 1;
Some(start)
})
.collect()
}
/// Start index where each symbol in the example should appear,
/// measured in characters. These are spaced to leave enough room
/// for the reductions below.
fn positions(&self, lengths: &[usize]) -> Vec<usize> {
// Initially, position each symbol with one space in between,
// like:
//
// X Y Z
let mut positions = self.starting_positions(lengths);
// Adjust spacing to account for the nonterminal labels
// we will have to add. It will display
// like this:
//
// A1 B2 C3 D4 E5 F6
// | |
// +-Label---+
//
// But if the label is long we may have to adjust the spacing
// of the covered items (here, we changed them to two spaces,
// except the first gap, which got 3 spaces):
//
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
for &Reduction {
start,
end,
ref nonterminal,
} in &self.reductions
{
let nt_len = format!("{}", nonterminal).chars().count();
// Number of symbols we are reducing. This should always
// be non-zero because even in the case of a \epsilon
// rule, we ought to be have a `None` entry in the symbol array.
let num_syms = end - start;
assert!(num_syms > 0);
// Let's use the expansion from above as our running example.
// We start out with positions like this:
//
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
//
// But we want LongLabel to end at D4. No good.
// Start of first symbol to be reduced. Here, 0.
//
// A1 B2 C3 D4
// ^ here
let start_position = positions[start];
// End of last symbol to be reduced. Here, 11.
//
// A1 B2 C3 D4 E5
// ^ positions[end]
// ^ here -- positions[end] - 1
let end_position = positions[end] - 1;
// We need space to draw `+-Label-+` between
// start_position and end_position.
let required_len = nt_len + 4; // here, 15
let actual_len = end_position - start_position; // here, 10
if required_len < actual_len {
continue; // Got enough space, all set.
}
// Have to add `difference` characters altogether.
let difference = required_len - actual_len; // here, 4
// Increment over everything that is not part of this nonterminal.
// In the example above, that is E5 and F6.
shift(&mut positions[end..], difference);
if num_syms > 1 {
// If there is just one symbol being reduced here,
// then we have shifted over the things that follow
// it, and we are done. This would be a case like:
//
// X Y Z
// | |
// +-Label-+
//
// (which maybe ought to be rendered slightly
// differently).
//
// But if there are multiple symbols, we're not quite
// done, because there would be an unsightly gap:
//
// (gaps)
// | | |
// v v v
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
//
// we'd like to make things line up, so we have to
// distribute that extra space internally by
// increasing the "gaps" (marked above) as evenly as
// possible (basically, full justification).
//
// We do this by dividing up the spaces evenly and
// then taking the remainder `N` and distributing 1
// extra to the first N.
let num_gaps = num_syms - 1; // number of gaps we can adjust. Here, 3.
let amount = difference / num_gaps; // what to add to each gap. Here, 1.
let extra = difference % num_gaps; // the remainder. Here, 1.
// For the first `extra` symbols, give them amount + 1
// extra space. After that, just amount. (O(n^2). Sue me.)
for i in 0..extra {
shift(&mut positions[start + 1 + i..end], amount + 1);
}
for i in extra..num_gaps {
shift(&mut positions[start + 1 + i..end], amount);
}
}
}
positions
}
#[cfg(test)]
pub fn | (&self) -> Vec<::ascii_canvas::Row> {
let this = self.clone();
let content = this.into_picture(ExampleStyles::default());
let min_width = content.min_width();
let canvas = content.emit_to_canvas(min_width);
canvas.to_strings()
}
fn paint_on(&self, styles: &ExampleStyles, positions: &[usize], view: &mut dyn AsciiView) {
// Draw the brackets for each reduction:
for (index, reduction) in self.reductions.iter().enumerate() {
let start_column = positions[reduction.start];
let end_column = positions[reduction.end] - 1;
let row = 1 + index;
view.draw_vertical_line(0..row + 1, start_column);
view.draw_vertical_line(0..row + 1, end_column - 1);
view.draw_horizontal_line(row, start_column..end_column);
}
// Write the labels for each reduction. Do this after the
// brackets so that ascii canvas can convert `|` to `+`
// without interfering with the text (in case of weird overlap).
let session = Tls::session();
for (index, reduction) in self.reductions.iter().enumerate() {
let column = positions[reduction.start] + 2;
let row = 1 + index;
view.write_chars(
row,
column,
reduction.nonterminal.to_string().chars(),
session.nonterminal_symbol,
);
}
// Write the labels on top:
// A1 B2 C3 D4 E5 F6
self.paint_symbols_on(&self.symbols, positions, styles, view);
}
fn paint_symbols_on(
&self,
symbols: &[ExampleSymbol],
positions: &[usize],
styles: &ExampleStyles,
view: &mut dyn AsciiView,
) {
let session = Tls::session();
for (index, ex_symbol) in symbols.iter().enumerate() {
let style = match index.cmp(&self.cursor) {
Ordering::Less => styles.before_cursor,
Ordering::Equal => {
// Only display actual terminals in the "on-cursor"
// font, because it might be misleading to show a
// nonterminal that way. Really it'd be nice to expand
// so that the cursor is always a terminal.
match *ex_symbol {
ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor,
_ => styles.after_cursor,
}
}
Ordering::Greater => styles.after_cursor,
};
let column = positions[index];
match *ex_symbol {
ExampleSymbol::Symbol(Symbol::Terminal(ref term)) => {
view.write_chars(
0,
column,
term.to_string().chars(),
style.with(session.terminal_symbol),
);
}
ExampleSymbol::Symbol(Symbol::Nonterminal(ref nt)) => {
view.write_chars(
0,
column,
nt.to_string().chars(),
style.with(session.nonterminal_symbol),
);
}
ExampleSymbol::Epsilon => {}
}
}
}
}
struct ExamplePicture {
example: Example,
positions: Vec<usize>,
styles: ExampleStyles,
}
impl Content for ExamplePicture {
fn min_width(&self) -> usize {
*self.positions.last().unwrap()
}
fn emit(&self, view: &mut dyn AsciiView) {
self.example.paint_on(&self.styles, &self.positions, view);
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<dyn Content>>) {
wrap_items.push(self);
}
}
impl Debug for ExamplePicture {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
Debug::fmt(&self.example, fmt)
}
}
fn shift(positions: &mut [usize], amount: usize) {
for position in positions {
*position += amount;
}
}
impl ExampleStyles {
pub fn ambig() -> Self {
let session = Tls::session();
ExampleStyles {
before_cursor: session.ambig_symbols,
on_cursor: session.ambig_symbols,
after_cursor: session.ambig_symbols,
}
}
pub fn new() -> Self {
let session = Tls::session();
ExampleStyles {
before_cursor: session.observed_symbols,
on_cursor: session.cursor_symbol,
after_cursor: session.unobserved_symbols,
}
}
}
| paint_unstyled | identifier_name |
mod.rs | //! Code to compute example inputs given a backtrace.
use crate::grammar::repr::*;
use crate::message::builder::InlineBuilder;
use crate::message::Content;
use crate::style::Style;
use crate::tls::Tls;
use ascii_canvas::AsciiView;
use std::{
cmp::Ordering,
fmt::{Debug, Error, Formatter},
};
#[cfg(test)]
mod test;
/// An "example" input and the way it was derived. This can be
/// serialized into useful text. For example, it might represent
/// something like this:
///
/// ```
/// Looking at
/// |
/// v
/// Ty "->" Ty "->" Ty
/// | | |
/// +-Ty-----+ |
/// | |
/// +-Ty-------------+ | ///
/// The top-line is the `symbols` vector. The groupings below are
/// stored in the `reductions` vector, in order from smallest to
/// largest (they are always properly nested). The `cursor` field
/// indicates the current lookahead token.
///
/// The `symbols` vector is actually `Option<Symbol>` to account
/// for empty reductions:
///
/// ```
/// A B
/// | | | |
/// | +-Y-+ |
/// +-Z-----+
/// ```
///
/// The "empty space" between A and B would be represented as `None`.
#[derive(Clone, Debug)]
pub struct Example {
pub symbols: Vec<ExampleSymbol>,
pub cursor: usize,
pub reductions: Vec<Reduction>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ExampleSymbol {
Symbol(Symbol),
Epsilon,
}
#[derive(Copy, Clone, Default)]
pub struct ExampleStyles {
pub before_cursor: Style,
pub on_cursor: Style,
pub after_cursor: Style,
}
#[derive(Clone, Debug)]
pub struct Reduction {
pub start: usize,
pub end: usize,
pub nonterminal: NonterminalString,
}
impl Example {
/// Length of each symbol. Each will need *at least* that amount
/// of space. :) Measure in characters, under the assumption of a
/// mono-spaced font. Also add a final `0` marker which will serve
/// as the end position.
fn lengths(&self) -> Vec<usize> {
self.symbols
.iter()
.map(|s| match *s {
ExampleSymbol::Symbol(ref s) => format!("{}", s).chars().count(),
ExampleSymbol::Epsilon => 1, // display as " "
})
.chain(Some(0))
.collect()
}
/// Extract a prefix of the list of symbols from this `Example`
/// and make a styled list of them, like:
///
/// Ty "->" Ty -> "Ty"
pub fn to_symbol_list(&self, length: usize, styles: ExampleStyles) -> Box<dyn Content> {
let mut builder = InlineBuilder::new().begin_spaced();
for (index, symbol) in self.symbols[..length].iter().enumerate() {
let style = match index.cmp(&self.cursor) {
Ordering::Less => styles.before_cursor,
Ordering::Equal => match *symbol {
ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor,
ExampleSymbol::Symbol(Symbol::Nonterminal(_)) => styles.after_cursor,
ExampleSymbol::Epsilon => styles.after_cursor,
},
Ordering::Greater => styles.after_cursor,
};
if let ExampleSymbol::Symbol(ref s) = symbol {
builder = builder.push(s.clone()).styled(style);
}
}
builder.end().indented().end()
}
/// Render the example into a styled diagram suitable for
/// embedding in an error message.
pub fn into_picture(self, styles: ExampleStyles) -> Box<dyn Content> {
let lengths = self.lengths();
let positions = self.positions(&lengths);
InlineBuilder::new()
.push(Box::new(ExamplePicture {
example: self,
positions,
styles,
}))
.indented()
.end()
}
fn starting_positions(&self, lengths: &[usize]) -> Vec<usize> {
lengths
.iter()
.scan(0, |counter, &len| {
let start = *counter;
// Leave space for "NT " (if "NT" is the name
// of the nonterminal).
*counter = start + len + 1;
Some(start)
})
.collect()
}
/// Start index where each symbol in the example should appear,
/// measured in characters. These are spaced to leave enough room
/// for the reductions below.
fn positions(&self, lengths: &[usize]) -> Vec<usize> {
// Initially, position each symbol with one space in between,
// like:
//
// X Y Z
let mut positions = self.starting_positions(lengths);
// Adjust spacing to account for the nonterminal labels
// we will have to add. It will display
// like this:
//
// A1 B2 C3 D4 E5 F6
// | |
// +-Label---+
//
// But if the label is long we may have to adjust the spacing
// of the covered items (here, we changed them to two spaces,
// except the first gap, which got 3 spaces):
//
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
for &Reduction {
start,
end,
ref nonterminal,
} in &self.reductions
{
let nt_len = format!("{}", nonterminal).chars().count();
// Number of symbols we are reducing. This should always
// be non-zero because even in the case of a \epsilon
// rule, we ought to be have a `None` entry in the symbol array.
let num_syms = end - start;
assert!(num_syms > 0);
// Let's use the expansion from above as our running example.
// We start out with positions like this:
//
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
//
// But we want LongLabel to end at D4. No good.
// Start of first symbol to be reduced. Here, 0.
//
// A1 B2 C3 D4
// ^ here
let start_position = positions[start];
// End of last symbol to be reduced. Here, 11.
//
// A1 B2 C3 D4 E5
// ^ positions[end]
// ^ here -- positions[end] - 1
let end_position = positions[end] - 1;
// We need space to draw `+-Label-+` between
// start_position and end_position.
let required_len = nt_len + 4; // here, 15
let actual_len = end_position - start_position; // here, 10
if required_len < actual_len {
continue; // Got enough space, all set.
}
// Have to add `difference` characters altogether.
let difference = required_len - actual_len; // here, 4
// Increment over everything that is not part of this nonterminal.
// In the example above, that is E5 and F6.
shift(&mut positions[end..], difference);
if num_syms > 1 {
// If there is just one symbol being reduced here,
// then we have shifted over the things that follow
// it, and we are done. This would be a case like:
//
// X Y Z
// | |
// +-Label-+
//
// (which maybe ought to be rendered slightly
// differently).
//
// But if there are multiple symbols, we're not quite
// done, because there would be an unsightly gap:
//
// (gaps)
// | | |
// v v v
// A1 B2 C3 D4 E5 F6
// | |
// +-LongLabel22-+
//
// we'd like to make things line up, so we have to
// distribute that extra space internally by
// increasing the "gaps" (marked above) as evenly as
// possible (basically, full justification).
//
// We do this by dividing up the spaces evenly and
// then taking the remainder `N` and distributing 1
// extra to the first N.
let num_gaps = num_syms - 1; // number of gaps we can adjust. Here, 3.
let amount = difference / num_gaps; // what to add to each gap. Here, 1.
let extra = difference % num_gaps; // the remainder. Here, 1.
// For the first `extra` symbols, give them amount + 1
// extra space. After that, just amount. (O(n^2). Sue me.)
for i in 0..extra {
shift(&mut positions[start + 1 + i..end], amount + 1);
}
for i in extra..num_gaps {
shift(&mut positions[start + 1 + i..end], amount);
}
}
}
positions
}
#[cfg(test)]
pub fn paint_unstyled(&self) -> Vec<::ascii_canvas::Row> {
let this = self.clone();
let content = this.into_picture(ExampleStyles::default());
let min_width = content.min_width();
let canvas = content.emit_to_canvas(min_width);
canvas.to_strings()
}
fn paint_on(&self, styles: &ExampleStyles, positions: &[usize], view: &mut dyn AsciiView) {
// Draw the brackets for each reduction:
for (index, reduction) in self.reductions.iter().enumerate() {
let start_column = positions[reduction.start];
let end_column = positions[reduction.end] - 1;
let row = 1 + index;
view.draw_vertical_line(0..row + 1, start_column);
view.draw_vertical_line(0..row + 1, end_column - 1);
view.draw_horizontal_line(row, start_column..end_column);
}
// Write the labels for each reduction. Do this after the
// brackets so that ascii canvas can convert `|` to `+`
// without interfering with the text (in case of weird overlap).
let session = Tls::session();
for (index, reduction) in self.reductions.iter().enumerate() {
let column = positions[reduction.start] + 2;
let row = 1 + index;
view.write_chars(
row,
column,
reduction.nonterminal.to_string().chars(),
session.nonterminal_symbol,
);
}
// Write the labels on top:
// A1 B2 C3 D4 E5 F6
self.paint_symbols_on(&self.symbols, positions, styles, view);
}
fn paint_symbols_on(
&self,
symbols: &[ExampleSymbol],
positions: &[usize],
styles: &ExampleStyles,
view: &mut dyn AsciiView,
) {
let session = Tls::session();
for (index, ex_symbol) in symbols.iter().enumerate() {
let style = match index.cmp(&self.cursor) {
Ordering::Less => styles.before_cursor,
Ordering::Equal => {
// Only display actual terminals in the "on-cursor"
// font, because it might be misleading to show a
// nonterminal that way. Really it'd be nice to expand
// so that the cursor is always a terminal.
match *ex_symbol {
ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor,
_ => styles.after_cursor,
}
}
Ordering::Greater => styles.after_cursor,
};
let column = positions[index];
match *ex_symbol {
ExampleSymbol::Symbol(Symbol::Terminal(ref term)) => {
view.write_chars(
0,
column,
term.to_string().chars(),
style.with(session.terminal_symbol),
);
}
ExampleSymbol::Symbol(Symbol::Nonterminal(ref nt)) => {
view.write_chars(
0,
column,
nt.to_string().chars(),
style.with(session.nonterminal_symbol),
);
}
ExampleSymbol::Epsilon => {}
}
}
}
}
struct ExamplePicture {
example: Example,
positions: Vec<usize>,
styles: ExampleStyles,
}
impl Content for ExamplePicture {
fn min_width(&self) -> usize {
*self.positions.last().unwrap()
}
fn emit(&self, view: &mut dyn AsciiView) {
self.example.paint_on(&self.styles, &self.positions, view);
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<dyn Content>>) {
wrap_items.push(self);
}
}
impl Debug for ExamplePicture {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
Debug::fmt(&self.example, fmt)
}
}
fn shift(positions: &mut [usize], amount: usize) {
for position in positions {
*position += amount;
}
}
impl ExampleStyles {
pub fn ambig() -> Self {
let session = Tls::session();
ExampleStyles {
before_cursor: session.ambig_symbols,
on_cursor: session.ambig_symbols,
after_cursor: session.ambig_symbols,
}
}
pub fn new() -> Self {
let session = Tls::session();
ExampleStyles {
before_cursor: session.observed_symbols,
on_cursor: session.cursor_symbol,
after_cursor: session.unobserved_symbols,
}
}
} | /// ``` | random_line_split |
install.go | // Copyright 2021 Ross Light
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/spf13/cobra"
"go.starlark.net/starlark"
"go4.org/xdgdir"
"zombiezen.com/go/biome"
"zombiezen.com/go/biome/downloader"
"zombiezen.com/go/biome/internal/extract"
"zombiezen.com/go/sqlite/sqlitex"
)
type installCommand struct {
biomeID string
script string
version string
}
func newInstallCommand() *cobra.Command {
c := new(installCommand)
cmd := &cobra.Command{
Use: "install [options] SCRIPT VERSION",
DisableFlagsInUseLine: true,
Short: "run an installer script",
Args: cobra.ExactArgs(2),
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
c.script = args[0]
c.version = args[1]
return c.run(cmd.Context())
},
}
cmd.Flags().StringVarP(&c.biomeID, "biome", "b", "", "biome to run inside")
return cmd
}
func (c *installCommand) run(ctx context.Context) (err error) {
db, err := openDB(ctx)
if err != nil {
return err
}
defer db.Close()
endFn, err := sqlitex.ImmediateTransaction(db)
if err != nil {
return err
}
defer endFn(&err)
rec, err := findBiome(db, c.biomeID)
if err != nil {
return err
}
bio, err := rec.setupWithoutEnv(ctx, db)
if err != nil {
return err
}
thread := &starlark.Thread{}
thread.SetLocal(threadContextKey, ctx)
script, err := os.Open(c.script)
if err != nil {
return err
}
defer script.Close()
predeclared := starlark.StringDict{
"Environment": starlark.NewBuiltin("Environment", builtinEnvironmentCtor),
}
globals, err := starlark.ExecFile(thread, c.script, script, predeclared)
if err != nil {
return err
}
installFuncValue := globals["install"]
if installFuncValue == nil {
return fmt.Errorf("no install function found")
}
installFunc, ok := installFuncValue.(*starlark.Function)
if !ok {
return fmt.Errorf("`install` is declared as %s instead of function", installFuncValue.Type())
}
if !installFunc.HasKwargs() {
//lint:ignore ST1005 referencing Environment constructor
return fmt.Errorf("install function does not permit extra keyword arguments. " +
"Please add `**kwargs` to the end of install's parameters for forward compatibility.")
}
cachePath := xdgdir.Cache.Path()
if cachePath == "" {
return fmt.Errorf("%v not set", xdgdir.Cache)
}
myDownloader := downloader.New(filepath.Join(cachePath, cacheSubdirName, "downloads"))
installReturnValue, err := starlark.Call(
thread,
installFunc,
starlark.Tuple{biomeValue(bio), starlark.String(c.version)},
[]starlark.Tuple{
{starlark.String("downloader"), downloaderValue(myDownloader)},
},
)
if err != nil {
return err
}
installReturn, ok := installReturnValue.(*envValue)
if !ok {
return fmt.Errorf("`install` returned a %s instead of Environment", installReturnValue.Type())
}
installEnv, err := installReturn.toEnvironment()
if err != nil {
return fmt.Errorf("install return value: %w", err)
}
if err := writeBiomeEnvironment(db, rec.id, rec.env.Merge(installEnv)); err != nil {
return err
}
return nil
}
const threadContextKey = "zombiezen.com/go/biome.Context"
func threadContext(t *starlark.Thread) context.Context {
ctx, _ := t.Local(threadContextKey).(context.Context)
if ctx == nil {
ctx = context.Background()
}
return ctx
}
type envValue struct {
vars *starlark.Dict
prependPath *starlark.List
appendPath *starlark.List
}
func builtinEnvironmentCtor(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ev := new(envValue)
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"vars?", &ev.vars,
"prepend_path?", &ev.prependPath,
"append_path?", &ev.appendPath,
)
if err != nil {
return nil, err
}
if ev.vars == nil {
ev.vars = new(starlark.Dict)
}
if ev.prependPath == nil {
ev.prependPath = new(starlark.List)
}
if ev.appendPath == nil {
ev.appendPath = new(starlark.List)
}
return ev, nil
}
func (ev *envValue) String() string {
return fmt.Sprintf("Environment(vars=%v, prepend_path=%v, append_path=%v)",
ev.vars, ev.prependPath, ev.appendPath)
}
func (ev *envValue) Type() string {
return "Environment"
}
func (ev *envValue) Freeze() {
ev.vars.Freeze()
ev.prependPath.Freeze()
ev.appendPath.Freeze()
}
func (ev *envValue) Truth() starlark.Bool {
return ev.vars.Len() > 0 || ev.prependPath.Len() > 0 || ev.appendPath.Len() > 0
}
func (ev *envValue) Hash() (uint32, error) {
//lint:ignore ST1005 referencing Environment constructor
return 0, fmt.Errorf("Environment not hashable")
}
func (ev *envValue) Attr(name string) (starlark.Value, error) {
switch name {
case "vars":
return ev.vars, nil
case "prepend_path":
return ev.prependPath, nil
case "append_path":
return ev.appendPath, nil
default:
return nil, nil
}
}
func (ev *envValue) AttrNames() []string {
return []string{
"append_path",
"prepend_path",
"vars",
}
}
func (ev *envValue) toEnvironment() (biome.Environment, error) {
var e biome.Environment
if n := ev.vars.Len(); n > 0 {
e.Vars = make(map[string]string, n)
for _, kv := range ev.vars.Items() {
k, ok := starlark.AsString(kv[0])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars key %v", kv[0])
}
v, ok := starlark.AsString(kv[1])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars value %v for key %q", kv[1], k)
}
e.Vars[k] = v
}
}
for i, n := 0, ev.appendPath.Len(); i < n; i++ {
pv := ev.appendPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.appendPath[%d] value %v", i, pv)
}
e.AppendPath = append(e.AppendPath, p)
}
for i, n := 0, ev.prependPath.Len(); i < n; i++ {
pv := ev.prependPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.prependPath[%d] value %v", i, pv)
}
e.PrependPath = append(e.PrependPath, p)
}
return e, nil
}
type biomeWrapper struct {
biome biome.Biome
attrs starlark.StringDict
}
func biomeValue(bio biome.Biome) *biomeWrapper {
bw := &biomeWrapper{biome: bio}
bw.attrs = starlark.StringDict{
"os": starlark.String(bio.Describe().OS),
"arch": starlark.String(bio.Describe().Arch),
"run": starlark.NewBuiltin("run", bw.runBuiltin),
"dirs": newDirsModule(bio.Dirs()),
"path": newPathModule(bio),
}
return bw
}
func (*biomeWrapper) Type() string { return "biome" }
func (*biomeWrapper) Freeze() {}
func (*biomeWrapper) Truth() starlark.Bool { return starlark.True }
func (*biomeWrapper) Hash() (uint32, error) { return 0, fmt.Errorf("biome not hashable") }
func (*biomeWrapper) String() string { return "<biome>" }
func (bw *biomeWrapper) Attr(name string) (starlark.Value, error) {
return bw.attrs[name], nil
}
func (bw *biomeWrapper) AttrNames() []string {
return sortedStringDictKeys(bw.attrs)
}
func (bw *biomeWrapper) runBuiltin(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ctx := threadContext(thread)
var argv *starlark.List
invocation := &biome.Invocation{
Stdout: os.Stderr,
Stderr: os.Stderr,
}
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"argv", &argv,
"dir??", &invocation.Dir,
)
if err != nil {
return nil, err
}
invocation.Argv = make([]string, 0, argv.Len())
for i := 0; i < argv.Len(); i++ {
arg, ok := starlark.AsString(argv.Index(i))
if !ok {
return nil, fmt.Errorf("run: could not convert argv[%d] to string", i)
}
invocation.Argv = append(invocation.Argv, arg)
}
if err := bw.biome.Run(ctx, invocation); err != nil {
return nil, err
}
return starlark.None, nil
}
func newDirsModule(dirs *biome.Dirs) *module {
return &module{
name: "dirs",
attrs: starlark.StringDict{
"work": starlark.String(dirs.Work),
"home": starlark.String(dirs.Home),
"tools": starlark.String(dirs.Tools),
},
}
}
func newPathModule(bio biome.Biome) *module {
return &module{
name: "path",
attrs: starlark.StringDict{
"join": starlark.NewBuiltin("path.join", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(kwargs) != 0 {
return nil, fmt.Errorf("%s: keyword arguments not allowed", fn.Name())
}
stringArgs := make([]string, 0, args.Len())
for i := 0; i < args.Len(); i++ {
arg, ok := starlark.AsString(args.Index(i))
if !ok {
return nil, fmt.Errorf("%s: could not convert arg[%d] to string", fn.Name(), i)
}
stringArgs = append(stringArgs, arg)
}
return starlark.String(biome.JoinPath(bio.Describe(), stringArgs...)), nil
}),
"exists": starlark.NewBuiltin("path.exists", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var path string
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "path", &path); err != nil {
return nil, err
}
_, err := biome.EvalSymlinks(threadContext(thread), bio, path)
return starlark.Bool(err == nil), nil
}),
},
}
}
func downloaderValue(d *downloader.Downloader) *module {
return &module{
name: "downloader",
attrs: starlark.StringDict{
"extract": starlark.NewBuiltin("extract", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
opts := &extract.Options{
Downloader: d,
Output: os.Stderr,
}
var bw *biomeWrapper
mode := "tarbomb"
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"biome", &bw,
"dst_dir", &opts.DestinationDir,
"url", &opts.URL,
"mode?", &mode,
)
if err != nil {
return nil, err
}
opts.Biome = bw.biome
switch mode {
case "tarbomb":
opts.ExtractMode = extract.Tarbomb
case "strip":
opts.ExtractMode = extract.StripTopDirectory
default:
return nil, fmt.Errorf("%s: invalid mode %q", fn.Name(), mode)
}
if err := extract.Extract(threadContext(thread), opts); err != nil {
return nil, err
}
return starlark.None, nil
}),
},
}
}
var _ starlark.HasAttrs = (*module)(nil)
type module struct {
name string
attrs starlark.StringDict
}
func (*module) Type() string { return "module" }
func (*module) Freeze() {}
func (*module) Truth() starlark.Bool { return starlark.True }
func (*module) Hash() (uint32, error) { return 0, fmt.Errorf("module not hashable") }
func (mod *module) String() string { return "<module '" + mod.name + "'>" }
func (mod *module) Attr(name string) (starlark.Value, error) {
return mod.attrs[name], nil
}
func (mod *module) AttrNames() []string |
func sortedStringDictKeys(d starlark.StringDict) []string {
keys := make([]string, 0, len(d))
for k := range d {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
| {
return sortedStringDictKeys(mod.attrs)
} | identifier_body |
install.go | // Copyright 2021 Ross Light
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/spf13/cobra"
"go.starlark.net/starlark"
"go4.org/xdgdir"
"zombiezen.com/go/biome"
"zombiezen.com/go/biome/downloader"
"zombiezen.com/go/biome/internal/extract"
"zombiezen.com/go/sqlite/sqlitex"
)
type installCommand struct {
biomeID string
script string
version string
}
func newInstallCommand() *cobra.Command {
c := new(installCommand)
cmd := &cobra.Command{
Use: "install [options] SCRIPT VERSION",
DisableFlagsInUseLine: true,
Short: "run an installer script",
Args: cobra.ExactArgs(2),
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
c.script = args[0]
c.version = args[1]
return c.run(cmd.Context())
},
}
cmd.Flags().StringVarP(&c.biomeID, "biome", "b", "", "biome to run inside")
return cmd
}
func (c *installCommand) run(ctx context.Context) (err error) {
db, err := openDB(ctx)
if err != nil {
return err
}
defer db.Close()
endFn, err := sqlitex.ImmediateTransaction(db)
if err != nil {
return err
}
defer endFn(&err)
rec, err := findBiome(db, c.biomeID)
if err != nil {
return err
}
bio, err := rec.setupWithoutEnv(ctx, db)
if err != nil {
return err
}
thread := &starlark.Thread{}
thread.SetLocal(threadContextKey, ctx)
script, err := os.Open(c.script)
if err != nil {
return err
}
defer script.Close()
predeclared := starlark.StringDict{
"Environment": starlark.NewBuiltin("Environment", builtinEnvironmentCtor),
}
globals, err := starlark.ExecFile(thread, c.script, script, predeclared)
if err != nil {
return err
}
installFuncValue := globals["install"]
if installFuncValue == nil {
return fmt.Errorf("no install function found")
}
installFunc, ok := installFuncValue.(*starlark.Function)
if !ok {
return fmt.Errorf("`install` is declared as %s instead of function", installFuncValue.Type())
}
if !installFunc.HasKwargs() {
//lint:ignore ST1005 referencing Environment constructor
return fmt.Errorf("install function does not permit extra keyword arguments. " +
"Please add `**kwargs` to the end of install's parameters for forward compatibility.")
}
cachePath := xdgdir.Cache.Path()
if cachePath == "" {
return fmt.Errorf("%v not set", xdgdir.Cache)
}
myDownloader := downloader.New(filepath.Join(cachePath, cacheSubdirName, "downloads"))
installReturnValue, err := starlark.Call(
thread,
installFunc,
starlark.Tuple{biomeValue(bio), starlark.String(c.version)},
[]starlark.Tuple{
{starlark.String("downloader"), downloaderValue(myDownloader)},
},
)
if err != nil {
return err
}
installReturn, ok := installReturnValue.(*envValue)
if !ok {
return fmt.Errorf("`install` returned a %s instead of Environment", installReturnValue.Type())
}
installEnv, err := installReturn.toEnvironment()
if err != nil { | return err
}
return nil
}
const threadContextKey = "zombiezen.com/go/biome.Context"
func threadContext(t *starlark.Thread) context.Context {
ctx, _ := t.Local(threadContextKey).(context.Context)
if ctx == nil {
ctx = context.Background()
}
return ctx
}
type envValue struct {
vars *starlark.Dict
prependPath *starlark.List
appendPath *starlark.List
}
func builtinEnvironmentCtor(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ev := new(envValue)
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"vars?", &ev.vars,
"prepend_path?", &ev.prependPath,
"append_path?", &ev.appendPath,
)
if err != nil {
return nil, err
}
if ev.vars == nil {
ev.vars = new(starlark.Dict)
}
if ev.prependPath == nil {
ev.prependPath = new(starlark.List)
}
if ev.appendPath == nil {
ev.appendPath = new(starlark.List)
}
return ev, nil
}
func (ev *envValue) String() string {
return fmt.Sprintf("Environment(vars=%v, prepend_path=%v, append_path=%v)",
ev.vars, ev.prependPath, ev.appendPath)
}
func (ev *envValue) Type() string {
return "Environment"
}
func (ev *envValue) Freeze() {
ev.vars.Freeze()
ev.prependPath.Freeze()
ev.appendPath.Freeze()
}
func (ev *envValue) Truth() starlark.Bool {
return ev.vars.Len() > 0 || ev.prependPath.Len() > 0 || ev.appendPath.Len() > 0
}
func (ev *envValue) Hash() (uint32, error) {
//lint:ignore ST1005 referencing Environment constructor
return 0, fmt.Errorf("Environment not hashable")
}
func (ev *envValue) Attr(name string) (starlark.Value, error) {
switch name {
case "vars":
return ev.vars, nil
case "prepend_path":
return ev.prependPath, nil
case "append_path":
return ev.appendPath, nil
default:
return nil, nil
}
}
func (ev *envValue) AttrNames() []string {
return []string{
"append_path",
"prepend_path",
"vars",
}
}
func (ev *envValue) toEnvironment() (biome.Environment, error) {
var e biome.Environment
if n := ev.vars.Len(); n > 0 {
e.Vars = make(map[string]string, n)
for _, kv := range ev.vars.Items() {
k, ok := starlark.AsString(kv[0])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars key %v", kv[0])
}
v, ok := starlark.AsString(kv[1])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars value %v for key %q", kv[1], k)
}
e.Vars[k] = v
}
}
for i, n := 0, ev.appendPath.Len(); i < n; i++ {
pv := ev.appendPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.appendPath[%d] value %v", i, pv)
}
e.AppendPath = append(e.AppendPath, p)
}
for i, n := 0, ev.prependPath.Len(); i < n; i++ {
pv := ev.prependPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.prependPath[%d] value %v", i, pv)
}
e.PrependPath = append(e.PrependPath, p)
}
return e, nil
}
type biomeWrapper struct {
biome biome.Biome
attrs starlark.StringDict
}
func biomeValue(bio biome.Biome) *biomeWrapper {
bw := &biomeWrapper{biome: bio}
bw.attrs = starlark.StringDict{
"os": starlark.String(bio.Describe().OS),
"arch": starlark.String(bio.Describe().Arch),
"run": starlark.NewBuiltin("run", bw.runBuiltin),
"dirs": newDirsModule(bio.Dirs()),
"path": newPathModule(bio),
}
return bw
}
func (*biomeWrapper) Type() string { return "biome" }
func (*biomeWrapper) Freeze() {}
func (*biomeWrapper) Truth() starlark.Bool { return starlark.True }
func (*biomeWrapper) Hash() (uint32, error) { return 0, fmt.Errorf("biome not hashable") }
func (*biomeWrapper) String() string { return "<biome>" }
func (bw *biomeWrapper) Attr(name string) (starlark.Value, error) {
return bw.attrs[name], nil
}
func (bw *biomeWrapper) AttrNames() []string {
return sortedStringDictKeys(bw.attrs)
}
func (bw *biomeWrapper) runBuiltin(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ctx := threadContext(thread)
var argv *starlark.List
invocation := &biome.Invocation{
Stdout: os.Stderr,
Stderr: os.Stderr,
}
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"argv", &argv,
"dir??", &invocation.Dir,
)
if err != nil {
return nil, err
}
invocation.Argv = make([]string, 0, argv.Len())
for i := 0; i < argv.Len(); i++ {
arg, ok := starlark.AsString(argv.Index(i))
if !ok {
return nil, fmt.Errorf("run: could not convert argv[%d] to string", i)
}
invocation.Argv = append(invocation.Argv, arg)
}
if err := bw.biome.Run(ctx, invocation); err != nil {
return nil, err
}
return starlark.None, nil
}
func newDirsModule(dirs *biome.Dirs) *module {
return &module{
name: "dirs",
attrs: starlark.StringDict{
"work": starlark.String(dirs.Work),
"home": starlark.String(dirs.Home),
"tools": starlark.String(dirs.Tools),
},
}
}
func newPathModule(bio biome.Biome) *module {
return &module{
name: "path",
attrs: starlark.StringDict{
"join": starlark.NewBuiltin("path.join", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(kwargs) != 0 {
return nil, fmt.Errorf("%s: keyword arguments not allowed", fn.Name())
}
stringArgs := make([]string, 0, args.Len())
for i := 0; i < args.Len(); i++ {
arg, ok := starlark.AsString(args.Index(i))
if !ok {
return nil, fmt.Errorf("%s: could not convert arg[%d] to string", fn.Name(), i)
}
stringArgs = append(stringArgs, arg)
}
return starlark.String(biome.JoinPath(bio.Describe(), stringArgs...)), nil
}),
"exists": starlark.NewBuiltin("path.exists", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var path string
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "path", &path); err != nil {
return nil, err
}
_, err := biome.EvalSymlinks(threadContext(thread), bio, path)
return starlark.Bool(err == nil), nil
}),
},
}
}
func downloaderValue(d *downloader.Downloader) *module {
return &module{
name: "downloader",
attrs: starlark.StringDict{
"extract": starlark.NewBuiltin("extract", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
opts := &extract.Options{
Downloader: d,
Output: os.Stderr,
}
var bw *biomeWrapper
mode := "tarbomb"
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"biome", &bw,
"dst_dir", &opts.DestinationDir,
"url", &opts.URL,
"mode?", &mode,
)
if err != nil {
return nil, err
}
opts.Biome = bw.biome
switch mode {
case "tarbomb":
opts.ExtractMode = extract.Tarbomb
case "strip":
opts.ExtractMode = extract.StripTopDirectory
default:
return nil, fmt.Errorf("%s: invalid mode %q", fn.Name(), mode)
}
if err := extract.Extract(threadContext(thread), opts); err != nil {
return nil, err
}
return starlark.None, nil
}),
},
}
}
var _ starlark.HasAttrs = (*module)(nil)
type module struct {
name string
attrs starlark.StringDict
}
func (*module) Type() string { return "module" }
func (*module) Freeze() {}
func (*module) Truth() starlark.Bool { return starlark.True }
func (*module) Hash() (uint32, error) { return 0, fmt.Errorf("module not hashable") }
func (mod *module) String() string { return "<module '" + mod.name + "'>" }
func (mod *module) Attr(name string) (starlark.Value, error) {
return mod.attrs[name], nil
}
func (mod *module) AttrNames() []string {
return sortedStringDictKeys(mod.attrs)
}
func sortedStringDictKeys(d starlark.StringDict) []string {
keys := make([]string, 0, len(d))
for k := range d {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
} | return fmt.Errorf("install return value: %w", err)
}
if err := writeBiomeEnvironment(db, rec.id, rec.env.Merge(installEnv)); err != nil { | random_line_split |
install.go | // Copyright 2021 Ross Light
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/spf13/cobra"
"go.starlark.net/starlark"
"go4.org/xdgdir"
"zombiezen.com/go/biome"
"zombiezen.com/go/biome/downloader"
"zombiezen.com/go/biome/internal/extract"
"zombiezen.com/go/sqlite/sqlitex"
)
type installCommand struct {
biomeID string
script string
version string
}
func newInstallCommand() *cobra.Command {
c := new(installCommand)
cmd := &cobra.Command{
Use: "install [options] SCRIPT VERSION",
DisableFlagsInUseLine: true,
Short: "run an installer script",
Args: cobra.ExactArgs(2),
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
c.script = args[0]
c.version = args[1]
return c.run(cmd.Context())
},
}
cmd.Flags().StringVarP(&c.biomeID, "biome", "b", "", "biome to run inside")
return cmd
}
func (c *installCommand) run(ctx context.Context) (err error) {
db, err := openDB(ctx)
if err != nil {
return err
}
defer db.Close()
endFn, err := sqlitex.ImmediateTransaction(db)
if err != nil {
return err
}
defer endFn(&err)
rec, err := findBiome(db, c.biomeID)
if err != nil {
return err
}
bio, err := rec.setupWithoutEnv(ctx, db)
if err != nil {
return err
}
thread := &starlark.Thread{}
thread.SetLocal(threadContextKey, ctx)
script, err := os.Open(c.script)
if err != nil {
return err
}
defer script.Close()
predeclared := starlark.StringDict{
"Environment": starlark.NewBuiltin("Environment", builtinEnvironmentCtor),
}
globals, err := starlark.ExecFile(thread, c.script, script, predeclared)
if err != nil {
return err
}
installFuncValue := globals["install"]
if installFuncValue == nil {
return fmt.Errorf("no install function found")
}
installFunc, ok := installFuncValue.(*starlark.Function)
if !ok {
return fmt.Errorf("`install` is declared as %s instead of function", installFuncValue.Type())
}
if !installFunc.HasKwargs() {
//lint:ignore ST1005 referencing Environment constructor
return fmt.Errorf("install function does not permit extra keyword arguments. " +
"Please add `**kwargs` to the end of install's parameters for forward compatibility.")
}
cachePath := xdgdir.Cache.Path()
if cachePath == "" {
return fmt.Errorf("%v not set", xdgdir.Cache)
}
myDownloader := downloader.New(filepath.Join(cachePath, cacheSubdirName, "downloads"))
installReturnValue, err := starlark.Call(
thread,
installFunc,
starlark.Tuple{biomeValue(bio), starlark.String(c.version)},
[]starlark.Tuple{
{starlark.String("downloader"), downloaderValue(myDownloader)},
},
)
if err != nil {
return err
}
installReturn, ok := installReturnValue.(*envValue)
if !ok {
return fmt.Errorf("`install` returned a %s instead of Environment", installReturnValue.Type())
}
installEnv, err := installReturn.toEnvironment()
if err != nil {
return fmt.Errorf("install return value: %w", err)
}
if err := writeBiomeEnvironment(db, rec.id, rec.env.Merge(installEnv)); err != nil {
return err
}
return nil
}
const threadContextKey = "zombiezen.com/go/biome.Context"
func threadContext(t *starlark.Thread) context.Context {
ctx, _ := t.Local(threadContextKey).(context.Context)
if ctx == nil {
ctx = context.Background()
}
return ctx
}
type envValue struct {
vars *starlark.Dict
prependPath *starlark.List
appendPath *starlark.List
}
func builtinEnvironmentCtor(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ev := new(envValue)
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"vars?", &ev.vars,
"prepend_path?", &ev.prependPath,
"append_path?", &ev.appendPath,
)
if err != nil {
return nil, err
}
if ev.vars == nil {
ev.vars = new(starlark.Dict)
}
if ev.prependPath == nil {
ev.prependPath = new(starlark.List)
}
if ev.appendPath == nil {
ev.appendPath = new(starlark.List)
}
return ev, nil
}
func (ev *envValue) String() string {
return fmt.Sprintf("Environment(vars=%v, prepend_path=%v, append_path=%v)",
ev.vars, ev.prependPath, ev.appendPath)
}
func (ev *envValue) Type() string {
return "Environment"
}
func (ev *envValue) Freeze() {
ev.vars.Freeze()
ev.prependPath.Freeze()
ev.appendPath.Freeze()
}
func (ev *envValue) Truth() starlark.Bool {
return ev.vars.Len() > 0 || ev.prependPath.Len() > 0 || ev.appendPath.Len() > 0
}
func (ev *envValue) Hash() (uint32, error) {
//lint:ignore ST1005 referencing Environment constructor
return 0, fmt.Errorf("Environment not hashable")
}
func (ev *envValue) Attr(name string) (starlark.Value, error) {
switch name {
case "vars":
return ev.vars, nil
case "prepend_path":
return ev.prependPath, nil
case "append_path":
return ev.appendPath, nil
default:
return nil, nil
}
}
func (ev *envValue) AttrNames() []string {
return []string{
"append_path",
"prepend_path",
"vars",
}
}
func (ev *envValue) toEnvironment() (biome.Environment, error) {
var e biome.Environment
if n := ev.vars.Len(); n > 0 {
e.Vars = make(map[string]string, n)
for _, kv := range ev.vars.Items() {
k, ok := starlark.AsString(kv[0])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars key %v", kv[0])
}
v, ok := starlark.AsString(kv[1])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars value %v for key %q", kv[1], k)
}
e.Vars[k] = v
}
}
for i, n := 0, ev.appendPath.Len(); i < n; i++ {
pv := ev.appendPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.appendPath[%d] value %v", i, pv)
}
e.AppendPath = append(e.AppendPath, p)
}
for i, n := 0, ev.prependPath.Len(); i < n; i++ {
pv := ev.prependPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.prependPath[%d] value %v", i, pv)
}
e.PrependPath = append(e.PrependPath, p)
}
return e, nil
}
type biomeWrapper struct {
biome biome.Biome
attrs starlark.StringDict
}
func biomeValue(bio biome.Biome) *biomeWrapper {
bw := &biomeWrapper{biome: bio}
bw.attrs = starlark.StringDict{
"os": starlark.String(bio.Describe().OS),
"arch": starlark.String(bio.Describe().Arch),
"run": starlark.NewBuiltin("run", bw.runBuiltin),
"dirs": newDirsModule(bio.Dirs()),
"path": newPathModule(bio),
}
return bw
}
func (*biomeWrapper) Type() string { return "biome" }
func (*biomeWrapper) Freeze() {}
func (*biomeWrapper) Truth() starlark.Bool { return starlark.True }
func (*biomeWrapper) Hash() (uint32, error) { return 0, fmt.Errorf("biome not hashable") }
func (*biomeWrapper) String() string { return "<biome>" }
func (bw *biomeWrapper) Attr(name string) (starlark.Value, error) {
return bw.attrs[name], nil
}
func (bw *biomeWrapper) AttrNames() []string {
return sortedStringDictKeys(bw.attrs)
}
func (bw *biomeWrapper) runBuiltin(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ctx := threadContext(thread)
var argv *starlark.List
invocation := &biome.Invocation{
Stdout: os.Stderr,
Stderr: os.Stderr,
}
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"argv", &argv,
"dir??", &invocation.Dir,
)
if err != nil {
return nil, err
}
invocation.Argv = make([]string, 0, argv.Len())
for i := 0; i < argv.Len(); i++ {
arg, ok := starlark.AsString(argv.Index(i))
if !ok {
return nil, fmt.Errorf("run: could not convert argv[%d] to string", i)
}
invocation.Argv = append(invocation.Argv, arg)
}
if err := bw.biome.Run(ctx, invocation); err != nil {
return nil, err
}
return starlark.None, nil
}
func newDirsModule(dirs *biome.Dirs) *module {
return &module{
name: "dirs",
attrs: starlark.StringDict{
"work": starlark.String(dirs.Work),
"home": starlark.String(dirs.Home),
"tools": starlark.String(dirs.Tools),
},
}
}
func newPathModule(bio biome.Biome) *module {
return &module{
name: "path",
attrs: starlark.StringDict{
"join": starlark.NewBuiltin("path.join", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(kwargs) != 0 {
return nil, fmt.Errorf("%s: keyword arguments not allowed", fn.Name())
}
stringArgs := make([]string, 0, args.Len())
for i := 0; i < args.Len(); i++ |
return starlark.String(biome.JoinPath(bio.Describe(), stringArgs...)), nil
}),
"exists": starlark.NewBuiltin("path.exists", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var path string
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "path", &path); err != nil {
return nil, err
}
_, err := biome.EvalSymlinks(threadContext(thread), bio, path)
return starlark.Bool(err == nil), nil
}),
},
}
}
func downloaderValue(d *downloader.Downloader) *module {
return &module{
name: "downloader",
attrs: starlark.StringDict{
"extract": starlark.NewBuiltin("extract", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
opts := &extract.Options{
Downloader: d,
Output: os.Stderr,
}
var bw *biomeWrapper
mode := "tarbomb"
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"biome", &bw,
"dst_dir", &opts.DestinationDir,
"url", &opts.URL,
"mode?", &mode,
)
if err != nil {
return nil, err
}
opts.Biome = bw.biome
switch mode {
case "tarbomb":
opts.ExtractMode = extract.Tarbomb
case "strip":
opts.ExtractMode = extract.StripTopDirectory
default:
return nil, fmt.Errorf("%s: invalid mode %q", fn.Name(), mode)
}
if err := extract.Extract(threadContext(thread), opts); err != nil {
return nil, err
}
return starlark.None, nil
}),
},
}
}
var _ starlark.HasAttrs = (*module)(nil)
type module struct {
name string
attrs starlark.StringDict
}
func (*module) Type() string { return "module" }
func (*module) Freeze() {}
func (*module) Truth() starlark.Bool { return starlark.True }
func (*module) Hash() (uint32, error) { return 0, fmt.Errorf("module not hashable") }
func (mod *module) String() string { return "<module '" + mod.name + "'>" }
func (mod *module) Attr(name string) (starlark.Value, error) {
return mod.attrs[name], nil
}
func (mod *module) AttrNames() []string {
return sortedStringDictKeys(mod.attrs)
}
func sortedStringDictKeys(d starlark.StringDict) []string {
keys := make([]string, 0, len(d))
for k := range d {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
| {
arg, ok := starlark.AsString(args.Index(i))
if !ok {
return nil, fmt.Errorf("%s: could not convert arg[%d] to string", fn.Name(), i)
}
stringArgs = append(stringArgs, arg)
} | conditional_block |
install.go | // Copyright 2021 Ross Light
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/spf13/cobra"
"go.starlark.net/starlark"
"go4.org/xdgdir"
"zombiezen.com/go/biome"
"zombiezen.com/go/biome/downloader"
"zombiezen.com/go/biome/internal/extract"
"zombiezen.com/go/sqlite/sqlitex"
)
type installCommand struct {
biomeID string
script string
version string
}
func newInstallCommand() *cobra.Command {
c := new(installCommand)
cmd := &cobra.Command{
Use: "install [options] SCRIPT VERSION",
DisableFlagsInUseLine: true,
Short: "run an installer script",
Args: cobra.ExactArgs(2),
SilenceErrors: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
c.script = args[0]
c.version = args[1]
return c.run(cmd.Context())
},
}
cmd.Flags().StringVarP(&c.biomeID, "biome", "b", "", "biome to run inside")
return cmd
}
func (c *installCommand) run(ctx context.Context) (err error) {
db, err := openDB(ctx)
if err != nil {
return err
}
defer db.Close()
endFn, err := sqlitex.ImmediateTransaction(db)
if err != nil {
return err
}
defer endFn(&err)
rec, err := findBiome(db, c.biomeID)
if err != nil {
return err
}
bio, err := rec.setupWithoutEnv(ctx, db)
if err != nil {
return err
}
thread := &starlark.Thread{}
thread.SetLocal(threadContextKey, ctx)
script, err := os.Open(c.script)
if err != nil {
return err
}
defer script.Close()
predeclared := starlark.StringDict{
"Environment": starlark.NewBuiltin("Environment", builtinEnvironmentCtor),
}
globals, err := starlark.ExecFile(thread, c.script, script, predeclared)
if err != nil {
return err
}
installFuncValue := globals["install"]
if installFuncValue == nil {
return fmt.Errorf("no install function found")
}
installFunc, ok := installFuncValue.(*starlark.Function)
if !ok {
return fmt.Errorf("`install` is declared as %s instead of function", installFuncValue.Type())
}
if !installFunc.HasKwargs() {
//lint:ignore ST1005 referencing Environment constructor
return fmt.Errorf("install function does not permit extra keyword arguments. " +
"Please add `**kwargs` to the end of install's parameters for forward compatibility.")
}
cachePath := xdgdir.Cache.Path()
if cachePath == "" {
return fmt.Errorf("%v not set", xdgdir.Cache)
}
myDownloader := downloader.New(filepath.Join(cachePath, cacheSubdirName, "downloads"))
installReturnValue, err := starlark.Call(
thread,
installFunc,
starlark.Tuple{biomeValue(bio), starlark.String(c.version)},
[]starlark.Tuple{
{starlark.String("downloader"), downloaderValue(myDownloader)},
},
)
if err != nil {
return err
}
installReturn, ok := installReturnValue.(*envValue)
if !ok {
return fmt.Errorf("`install` returned a %s instead of Environment", installReturnValue.Type())
}
installEnv, err := installReturn.toEnvironment()
if err != nil {
return fmt.Errorf("install return value: %w", err)
}
if err := writeBiomeEnvironment(db, rec.id, rec.env.Merge(installEnv)); err != nil {
return err
}
return nil
}
const threadContextKey = "zombiezen.com/go/biome.Context"
func threadContext(t *starlark.Thread) context.Context {
ctx, _ := t.Local(threadContextKey).(context.Context)
if ctx == nil {
ctx = context.Background()
}
return ctx
}
type envValue struct {
vars *starlark.Dict
prependPath *starlark.List
appendPath *starlark.List
}
func builtinEnvironmentCtor(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ev := new(envValue)
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"vars?", &ev.vars,
"prepend_path?", &ev.prependPath,
"append_path?", &ev.appendPath,
)
if err != nil {
return nil, err
}
if ev.vars == nil {
ev.vars = new(starlark.Dict)
}
if ev.prependPath == nil {
ev.prependPath = new(starlark.List)
}
if ev.appendPath == nil {
ev.appendPath = new(starlark.List)
}
return ev, nil
}
func (ev *envValue) String() string {
return fmt.Sprintf("Environment(vars=%v, prepend_path=%v, append_path=%v)",
ev.vars, ev.prependPath, ev.appendPath)
}
func (ev *envValue) Type() string {
return "Environment"
}
func (ev *envValue) Freeze() {
ev.vars.Freeze()
ev.prependPath.Freeze()
ev.appendPath.Freeze()
}
func (ev *envValue) Truth() starlark.Bool {
return ev.vars.Len() > 0 || ev.prependPath.Len() > 0 || ev.appendPath.Len() > 0
}
func (ev *envValue) Hash() (uint32, error) {
//lint:ignore ST1005 referencing Environment constructor
return 0, fmt.Errorf("Environment not hashable")
}
func (ev *envValue) Attr(name string) (starlark.Value, error) {
switch name {
case "vars":
return ev.vars, nil
case "prepend_path":
return ev.prependPath, nil
case "append_path":
return ev.appendPath, nil
default:
return nil, nil
}
}
func (ev *envValue) AttrNames() []string {
return []string{
"append_path",
"prepend_path",
"vars",
}
}
func (ev *envValue) toEnvironment() (biome.Environment, error) {
var e biome.Environment
if n := ev.vars.Len(); n > 0 {
e.Vars = make(map[string]string, n)
for _, kv := range ev.vars.Items() {
k, ok := starlark.AsString(kv[0])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars key %v", kv[0])
}
v, ok := starlark.AsString(kv[1])
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.vars value %v for key %q", kv[1], k)
}
e.Vars[k] = v
}
}
for i, n := 0, ev.appendPath.Len(); i < n; i++ {
pv := ev.appendPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.appendPath[%d] value %v", i, pv)
}
e.AppendPath = append(e.AppendPath, p)
}
for i, n := 0, ev.prependPath.Len(); i < n; i++ {
pv := ev.prependPath.Index(i)
p, ok := starlark.AsString(pv)
if !ok {
return biome.Environment{}, fmt.Errorf("invalid Environment.prependPath[%d] value %v", i, pv)
}
e.PrependPath = append(e.PrependPath, p)
}
return e, nil
}
type biomeWrapper struct {
biome biome.Biome
attrs starlark.StringDict
}
func biomeValue(bio biome.Biome) *biomeWrapper {
bw := &biomeWrapper{biome: bio}
bw.attrs = starlark.StringDict{
"os": starlark.String(bio.Describe().OS),
"arch": starlark.String(bio.Describe().Arch),
"run": starlark.NewBuiltin("run", bw.runBuiltin),
"dirs": newDirsModule(bio.Dirs()),
"path": newPathModule(bio),
}
return bw
}
func (*biomeWrapper) Type() string { return "biome" }
func (*biomeWrapper) Freeze() {}
func (*biomeWrapper) Truth() starlark.Bool { return starlark.True }
func (*biomeWrapper) Hash() (uint32, error) { return 0, fmt.Errorf("biome not hashable") }
func (*biomeWrapper) String() string { return "<biome>" }
func (bw *biomeWrapper) Attr(name string) (starlark.Value, error) {
return bw.attrs[name], nil
}
func (bw *biomeWrapper) AttrNames() []string {
return sortedStringDictKeys(bw.attrs)
}
func (bw *biomeWrapper) runBuiltin(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
ctx := threadContext(thread)
var argv *starlark.List
invocation := &biome.Invocation{
Stdout: os.Stderr,
Stderr: os.Stderr,
}
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"argv", &argv,
"dir??", &invocation.Dir,
)
if err != nil {
return nil, err
}
invocation.Argv = make([]string, 0, argv.Len())
for i := 0; i < argv.Len(); i++ {
arg, ok := starlark.AsString(argv.Index(i))
if !ok {
return nil, fmt.Errorf("run: could not convert argv[%d] to string", i)
}
invocation.Argv = append(invocation.Argv, arg)
}
if err := bw.biome.Run(ctx, invocation); err != nil {
return nil, err
}
return starlark.None, nil
}
func newDirsModule(dirs *biome.Dirs) *module {
return &module{
name: "dirs",
attrs: starlark.StringDict{
"work": starlark.String(dirs.Work),
"home": starlark.String(dirs.Home),
"tools": starlark.String(dirs.Tools),
},
}
}
func newPathModule(bio biome.Biome) *module {
return &module{
name: "path",
attrs: starlark.StringDict{
"join": starlark.NewBuiltin("path.join", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
if len(kwargs) != 0 {
return nil, fmt.Errorf("%s: keyword arguments not allowed", fn.Name())
}
stringArgs := make([]string, 0, args.Len())
for i := 0; i < args.Len(); i++ {
arg, ok := starlark.AsString(args.Index(i))
if !ok {
return nil, fmt.Errorf("%s: could not convert arg[%d] to string", fn.Name(), i)
}
stringArgs = append(stringArgs, arg)
}
return starlark.String(biome.JoinPath(bio.Describe(), stringArgs...)), nil
}),
"exists": starlark.NewBuiltin("path.exists", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
var path string
if err := starlark.UnpackArgs(fn.Name(), args, kwargs, "path", &path); err != nil {
return nil, err
}
_, err := biome.EvalSymlinks(threadContext(thread), bio, path)
return starlark.Bool(err == nil), nil
}),
},
}
}
func downloaderValue(d *downloader.Downloader) *module {
return &module{
name: "downloader",
attrs: starlark.StringDict{
"extract": starlark.NewBuiltin("extract", func(thread *starlark.Thread, fn *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) {
opts := &extract.Options{
Downloader: d,
Output: os.Stderr,
}
var bw *biomeWrapper
mode := "tarbomb"
err := starlark.UnpackArgs(fn.Name(), args, kwargs,
"biome", &bw,
"dst_dir", &opts.DestinationDir,
"url", &opts.URL,
"mode?", &mode,
)
if err != nil {
return nil, err
}
opts.Biome = bw.biome
switch mode {
case "tarbomb":
opts.ExtractMode = extract.Tarbomb
case "strip":
opts.ExtractMode = extract.StripTopDirectory
default:
return nil, fmt.Errorf("%s: invalid mode %q", fn.Name(), mode)
}
if err := extract.Extract(threadContext(thread), opts); err != nil {
return nil, err
}
return starlark.None, nil
}),
},
}
}
var _ starlark.HasAttrs = (*module)(nil)
type module struct {
name string
attrs starlark.StringDict
}
func (*module) Type() string { return "module" }
func (*module) Freeze() {}
func (*module) Truth() starlark.Bool { return starlark.True }
func (*module) Hash() (uint32, error) { return 0, fmt.Errorf("module not hashable") }
func (mod *module) String() string { return "<module '" + mod.name + "'>" }
func (mod *module) Attr(name string) (starlark.Value, error) {
return mod.attrs[name], nil
}
func (mod *module) | () []string {
return sortedStringDictKeys(mod.attrs)
}
func sortedStringDictKeys(d starlark.StringDict) []string {
keys := make([]string, 0, len(d))
for k := range d {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
| AttrNames | identifier_name |
mod.rs | /* File_config_functionalities_pmz */
// conditions:
/*
exec trait: [alias:(file/any)] [operations] [path/name] [parameters/--options]
Read:Content _ none
Write:Content _ String : if use multi line content ->
check len()::Enum -> i32|&str
Update:Content _ String
Delete:Content _ none
Some file operations need parameters and some don't;
*/
mod parameters;
pub use super::interface::{self, components, printer, template_engine, text_processing};
pub use super::utility::{self, ErrorHandler::FileError};
use parameters::filter_param;
use printer::TermCfg;
use std::{
fs::File,
io::prelude::*,
io::{self},
time::Duration,
};
use template_engine::TemplateBuilder;
use template_engine::TemplateEngine;
type Params = [Vec<String>; 2];
use std::collections::hash_map::HashMap;
#[derive(Debug, PartialOrd, PartialEq)]
pub struct Fileconfig {
name: String,
access_at: Duration,
query: String,
parameters: Params,
// content:Option<String>,
content: String,
path: String,
}
impl Fileconfig {
pub fn new(param: &str, timestamp: fn() -> Duration) -> Result<Fileconfig, &'static str> {
let mut command_chunk = Vec::new();
for res in param.trim().split_whitespace() {
command_chunk.push(res.to_owned());
}
if command_chunk.len() < 3 {
return Err("Insufficient parameters to run file operations!");
}
let capture = |index: usize| command_chunk.get(index).unwrap().to_owned();
let mut vc: [Vec<String>; 2] = [Vec::new(), Vec::new()];
if command_chunk.len() > 3 {
let v_param = command_chunk[3..command_chunk.len()].to_owned();
let p_vec = v_param.into_iter().map(|p_str| String::from(p_str));
// let tup = (p_reg,quote_word);
//^"[a-zA-Z-\s]+"
let throw_reg_panic =
|regex_err: regex::Error| panic!("Verification Errors! : {}", regex_err);
//^<\w++>$
let p_reg = regex::Regex::new(r"^\--+[a-zA-Z]+").unwrap_or_else(|x| throw_reg_panic(x));
let quote_word = regex::Regex::new(r#"(["'])((\\{2})*|(.*?[^\\](\\{2})*))"#)
.unwrap_or_else(|x| throw_reg_panic(x));
let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap();
p_vec.for_each(|x| {
if match_inside_brac.is_match(&x) || quote_word.is_match(&x) {
vc[0].push(x);
} else if p_reg.is_match(&x) {
vc[1].push(x);
}
})
}
let result = Fileconfig {
name: capture(2),
query: capture(1),
path: capture(2),
access_at: timestamp(),
parameters: vc,
content: String::from("None"),
};
Ok(result)
}
fn parse_quotation(&self, param: &Vec<String>) -> Vec<String> {
let quoted = |st: &str| st.starts_with("\"") && st.ends_with("\"");
param
.into_iter()
.filter(|st| quoted(st))
.map(|quote_par| {
text_processing::CrumpCluster::break_chunk("e_par)
.delete(0, Some(1))
.delete(quote_par.len() - 1, Some(quote_par.len()))
.merge_crump()
})
.collect::<Vec<String>>()
}
fn parse_bracket(&self, param: &Vec<String>) -> Vec<String> {
let match_brack: &[_] = &['[', ']', '\"'];
param
.iter()
// .filter(|general_param| match_inside_brac.is_match(general_param))
.flat_map(|bk_par| {
let split_brack = bk_par
.trim_matches(match_brack)
.split_whitespace()
.map(|f| f.to_string())
.collect::<Vec<String>>();
return split_brack;
})
.collect::<Vec<String>>()
// .filter(|bracketed|);
}
pub fn run(&self) -> Result<(), FileError> {
let init_ptr = TermCfg::new()
.set_attr(console::Attribute::Bold)
.set_attr(console::Attribute::Italic);
let print = init_ptr.gen_print(Some(console::Color::Blue));
let mut print_ln = init_ptr.gen_println(Some(console::Color::Blue));
let mut err_collector: Vec<FileError> = Vec::new();
let display_txt = |txt: &str| -> template_engine::Template {
let mut tmp_engine = template_engine::TemplateFactory::init()
.parse_in_template(txt)
.create_movable()
.collect();
let template = tmp_engine.padding(vec![1, 6, 6, 3]);
template.to_owned()
};
match self.query.as_str() {
"update" => {
// self.write(params[0], params[1].parse::<i32>().unwrap());
println!("what is your ct?");
let elim_quote = self.parse_bracket(&self.parameters[0]);
self.update(&elim_quote[1], elim_quote[0].clone().as_str());
}
"search" => {
let unquote = self.parse_bracket(&self.parameters[0]);
print_ln(&format!("<->statistics of word {:?}<->", unquote))?;
let mut p = init_ptr.gen_println(Some(console::Color::Blue));
for quoted in unquote {
let quoted = filter_param(&self.parameters[1], "ed);
let filtered = filter_param(&self.parameters[1], "ed);
match self.search(&filtered) {
Ok(found_map) => {
print!("Highligted-Text: \n");
let full_content = self.read().unwrap();
let total_line = found_map.len();
let mut key_holder = Vec::new();
found_map.iter().for_each(|(key, _)| key_holder.push(key));
let mut count = 0;
let mut crumps = full_content
.lines()
.into_iter()
.enumerate()
.map(|(idx, x)| {
(idx as i64, text_processing::CrumpCluster::break_chunk(x))
})
.collect::<Vec<(i64, text_processing::CrumpCluster)>>();
while count < found_map.len() {
// each_indx.iter().for_each(|x|)
crumps.iter_mut().for_each(|(loc, crump)| {
if loc == key_holder[count] {
let locations = found_map.get(loc).unwrap();
locations.into_iter().for_each(|(start, end)| {
crump.delete(*start, Some(*end));
crump.insert(
*start,
&format!("--->\"{}\"<---", quoted.clone().trim(),)
.trim(),
);
});
}
});
count += 1;
}
let fully_merged = crumps
.iter()
.map(|(_, crump)| {
let merged = crump.merge_crump();
return merged;
})
.collect::<String>();
// display_txt(&fully_merged, "+/");
if total_line <= 1 {
p(&"No word found in the text!")?;
} else {
display_txt(&fully_merged)
.border("+", components::BorderWeight::Bold)
.center_box()
.display();
p(&format!(
"->Number of line that contain word /{}/: {}",
quoted, total_line
))?;
p(&format!(
"Total number of words /{}/ {}",
quoted,
count_found_map(found_map)
))?;
}
}
Err(file_err) => err_collector.push(file_err),
}
}
}
"read" => {
let result = self.read();
print_ln("Reading contains : ")?;
match result {
Ok(txt) => {
display_txt(&filter_param(&self.parameters[1], &txt))
.border("+", components::BorderWeight::Bold)
.center_box()
.display();
}
Err(file_err) => {
err_collector.push(file_err);
}
}
}
_ => err_collector.push(FileError::new().set_message("Invalid operation!")),
}
if err_collector.len() > 0 {
Err(err_collector.into_iter().next().unwrap())
} else {
Ok(())
}
}
}
type OriResult<T> = Result<T, FileError>;
/*positions : [{ Number of line to modify / word to replace / newdoc }]*/
pub trait TextPos {
fn modify(&self, content: String, new_str: &str) -> Vec<String>;
}
// [x1,x2,"string"]
// replace all word within that target across all content
impl TextPos for &str {
fn modify(&self, content: String, new_str: &str) -> Vec<String> {
if self.contains(" ") {
let multi_tar = self.split_whitespace().collect::<Vec<&str>>();
let emp = multi_tar
.iter()
.map(|x| {
let xt = content.replace(*x, new_str);
if xt != content {
return xt;
} else {
"None".to_string()
}
})
.filter(|x| *x != "None".to_string())
.collect::<Vec<String>>();
// println!("special emp {:#?}",emp);
return emp;
} else {
let mut result: Vec<String> = Vec::new();
result.push(content.replace(self, new_str));
return result;
}
}
}
pub trait Operation {
fn read(&self) -> OriResult<String>;
fn update<T>(&self, new_content: &str, target: T)
where
T: TextPos;
fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError>;
}
fn checkempty(result: &str) -> OriResult<String> {
if result.is_empty() {
let empty_err = FileError::new().set_message("The Folder is Empty inside");
Err(empty_err)
} else {
Ok(result.trim().to_string())
}
}
impl Operation for Fileconfig {
fn read(&self) -> OriResult<String> {
let file = File::open(&self.path)?;
let mut buffer = io::BufReader::new(file);
let mut result = String::new();
buffer.read_to_string(&mut result)?;
checkempty(&result)
}
// use for string only
fn update<T: TextPos>(&self, new_content: &str, target: T) {
/* if target is multiple start spit out different result to different file! */
let existed_content = self.read().expect("Cannot open that file");
let mutation = target.modify(existed_content.to_string(), new_content);
println!("muttip {:?}", mutation);
let mut count = 0;
for n in mutation {
let new_path = format!("output -- {} [{}]", self.path, count);
let mut newfile = File::create(new_path).unwrap();
newfile.write_all(n.as_bytes()).unwrap();
count += 1;
}
}
// regex for search: ^"[a-zA-Z-\s]+"
fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError> {
let mut err_clt = String::new();
// let found_map = Vec::new();
let mut found_map: HashMap<i64, Vec<(usize, usize)>> = HashMap::new();
if self.parameters.is_empty() {
err_clt.push_str("No params!")
}
let mut content = String::new();
match self.read() {
Ok(ct) => content.push_str(&ct),
Err(read_error) => err_clt.push_str(&read_error.message),
}
let mut count: i64 = 0;
let mut line_found = Vec::new();
for (line_num, line) in content.lines().enumerate() {
let each_line = line.trim();
let word_group = each_line.split_whitespace().collect::<Vec<&str>>();
let reg = regex::Regex::new(&format!(r"{}", target)).unwrap();
let mut indx_vec = Vec::new();
for found in reg.find_iter(line) {
let key_indx = (found.start(), found.end());
indx_vec.push(key_indx);
}
if word_group.len() >= 1 && word_group.into_iter().any(|word| word.contains(target)) {
line_found.push(line_num);
found_map.insert(line_num as i64, indx_vec);
count += 1;
}
}
if err_clt.len() > 0 {
let bruh = FileError::new().set_message(&err_clt.clone());
return Err(bruh);
} else {
return Ok(found_map);
}
/**/
}
}
impl Clone for Fileconfig {
fn clone(&self) -> Self {
return Fileconfig {
name: self.name.clone(),
access_at: self.access_at,
query: self.query.clone(),
parameters: self.parameters.clone(),
// content:Option<String>,
content: self.content.clone(),
path: self.path.clone(),
};
}
}
fn count_found_map(hsm: HashMap<i64, Vec<(usize, usize)>>) -> usize {
let mut count: usize = 0;
for (_, hs) in hsm {
hs.iter().for_each(|_| count += 1);
}
return count;
}
#[test]
fn test() {
let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap();
let test = "[Apple sauce bananan ba;;;a]"; | println!(
"test {:?} ",
(match_inside_brac.is_match(test), test.trim_matches(x))
);
} | println!("t {}", test);
let x: &[_] = &['[', ']']; | random_line_split |
mod.rs | /* File_config_functionalities_pmz */
// conditions:
/*
exec trait: [alias:(file/any)] [operations] [path/name] [parameters/--options]
Read:Content _ none
Write:Content _ String : if use multi line content ->
check len()::Enum -> i32|&str
Update:Content _ String
Delete:Content _ none
Some file operations need parameters and some don't;
*/
mod parameters;
pub use super::interface::{self, components, printer, template_engine, text_processing};
pub use super::utility::{self, ErrorHandler::FileError};
use parameters::filter_param;
use printer::TermCfg;
use std::{
fs::File,
io::prelude::*,
io::{self},
time::Duration,
};
use template_engine::TemplateBuilder;
use template_engine::TemplateEngine;
type Params = [Vec<String>; 2];
use std::collections::hash_map::HashMap;
#[derive(Debug, PartialOrd, PartialEq)]
pub struct Fileconfig {
name: String,
access_at: Duration,
query: String,
parameters: Params,
// content:Option<String>,
content: String,
path: String,
}
impl Fileconfig {
pub fn new(param: &str, timestamp: fn() -> Duration) -> Result<Fileconfig, &'static str> {
let mut command_chunk = Vec::new();
for res in param.trim().split_whitespace() {
command_chunk.push(res.to_owned());
}
if command_chunk.len() < 3 {
return Err("Insufficient parameters to run file operations!");
}
let capture = |index: usize| command_chunk.get(index).unwrap().to_owned();
let mut vc: [Vec<String>; 2] = [Vec::new(), Vec::new()];
if command_chunk.len() > 3 {
let v_param = command_chunk[3..command_chunk.len()].to_owned();
let p_vec = v_param.into_iter().map(|p_str| String::from(p_str));
// let tup = (p_reg,quote_word);
//^"[a-zA-Z-\s]+"
let throw_reg_panic =
|regex_err: regex::Error| panic!("Verification Errors! : {}", regex_err);
//^<\w++>$
let p_reg = regex::Regex::new(r"^\--+[a-zA-Z]+").unwrap_or_else(|x| throw_reg_panic(x));
let quote_word = regex::Regex::new(r#"(["'])((\\{2})*|(.*?[^\\](\\{2})*))"#)
.unwrap_or_else(|x| throw_reg_panic(x));
let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap();
p_vec.for_each(|x| {
if match_inside_brac.is_match(&x) || quote_word.is_match(&x) {
vc[0].push(x);
} else if p_reg.is_match(&x) {
vc[1].push(x);
}
})
}
let result = Fileconfig {
name: capture(2),
query: capture(1),
path: capture(2),
access_at: timestamp(),
parameters: vc,
content: String::from("None"),
};
Ok(result)
}
fn parse_quotation(&self, param: &Vec<String>) -> Vec<String> {
let quoted = |st: &str| st.starts_with("\"") && st.ends_with("\"");
param
.into_iter()
.filter(|st| quoted(st))
.map(|quote_par| {
text_processing::CrumpCluster::break_chunk("e_par)
.delete(0, Some(1))
.delete(quote_par.len() - 1, Some(quote_par.len()))
.merge_crump()
})
.collect::<Vec<String>>()
}
fn parse_bracket(&self, param: &Vec<String>) -> Vec<String> {
let match_brack: &[_] = &['[', ']', '\"'];
param
.iter()
// .filter(|general_param| match_inside_brac.is_match(general_param))
.flat_map(|bk_par| {
let split_brack = bk_par
.trim_matches(match_brack)
.split_whitespace()
.map(|f| f.to_string())
.collect::<Vec<String>>();
return split_brack;
})
.collect::<Vec<String>>()
// .filter(|bracketed|);
}
pub fn run(&self) -> Result<(), FileError> {
let init_ptr = TermCfg::new()
.set_attr(console::Attribute::Bold)
.set_attr(console::Attribute::Italic);
let print = init_ptr.gen_print(Some(console::Color::Blue));
let mut print_ln = init_ptr.gen_println(Some(console::Color::Blue));
let mut err_collector: Vec<FileError> = Vec::new();
let display_txt = |txt: &str| -> template_engine::Template {
let mut tmp_engine = template_engine::TemplateFactory::init()
.parse_in_template(txt)
.create_movable()
.collect();
let template = tmp_engine.padding(vec![1, 6, 6, 3]);
template.to_owned()
};
match self.query.as_str() {
"update" => {
// self.write(params[0], params[1].parse::<i32>().unwrap());
println!("what is your ct?");
let elim_quote = self.parse_bracket(&self.parameters[0]);
self.update(&elim_quote[1], elim_quote[0].clone().as_str());
}
"search" => {
let unquote = self.parse_bracket(&self.parameters[0]);
print_ln(&format!("<->statistics of word {:?}<->", unquote))?;
let mut p = init_ptr.gen_println(Some(console::Color::Blue));
for quoted in unquote {
let quoted = filter_param(&self.parameters[1], "ed);
let filtered = filter_param(&self.parameters[1], "ed);
match self.search(&filtered) {
Ok(found_map) => {
print!("Highligted-Text: \n");
let full_content = self.read().unwrap();
let total_line = found_map.len();
let mut key_holder = Vec::new();
found_map.iter().for_each(|(key, _)| key_holder.push(key));
let mut count = 0;
let mut crumps = full_content
.lines()
.into_iter()
.enumerate()
.map(|(idx, x)| {
(idx as i64, text_processing::CrumpCluster::break_chunk(x))
})
.collect::<Vec<(i64, text_processing::CrumpCluster)>>();
while count < found_map.len() {
// each_indx.iter().for_each(|x|)
crumps.iter_mut().for_each(|(loc, crump)| {
if loc == key_holder[count] {
let locations = found_map.get(loc).unwrap();
locations.into_iter().for_each(|(start, end)| {
crump.delete(*start, Some(*end));
crump.insert(
*start,
&format!("--->\"{}\"<---", quoted.clone().trim(),)
.trim(),
);
});
}
});
count += 1;
}
let fully_merged = crumps
.iter()
.map(|(_, crump)| {
let merged = crump.merge_crump();
return merged;
})
.collect::<String>();
// display_txt(&fully_merged, "+/");
if total_line <= 1 {
p(&"No word found in the text!")?;
} else {
display_txt(&fully_merged)
.border("+", components::BorderWeight::Bold)
.center_box()
.display();
p(&format!(
"->Number of line that contain word /{}/: {}",
quoted, total_line
))?;
p(&format!(
"Total number of words /{}/ {}",
quoted,
count_found_map(found_map)
))?;
}
}
Err(file_err) => err_collector.push(file_err),
}
}
}
"read" => {
let result = self.read();
print_ln("Reading contains : ")?;
match result {
Ok(txt) => {
display_txt(&filter_param(&self.parameters[1], &txt))
.border("+", components::BorderWeight::Bold)
.center_box()
.display();
}
Err(file_err) => {
err_collector.push(file_err);
}
}
}
_ => err_collector.push(FileError::new().set_message("Invalid operation!")),
}
if err_collector.len() > 0 {
Err(err_collector.into_iter().next().unwrap())
} else {
Ok(())
}
}
}
type OriResult<T> = Result<T, FileError>;
/*positions : [{ Number of line to modify / word to replace / newdoc }]*/
pub trait TextPos {
fn modify(&self, content: String, new_str: &str) -> Vec<String>;
}
// [x1,x2,"string"]
// replace all word within that target across all content
impl TextPos for &str {
fn modify(&self, content: String, new_str: &str) -> Vec<String> {
if self.contains(" ") {
let multi_tar = self.split_whitespace().collect::<Vec<&str>>();
let emp = multi_tar
.iter()
.map(|x| {
let xt = content.replace(*x, new_str);
if xt != content {
return xt;
} else {
"None".to_string()
}
})
.filter(|x| *x != "None".to_string())
.collect::<Vec<String>>();
// println!("special emp {:#?}",emp);
return emp;
} else {
let mut result: Vec<String> = Vec::new();
result.push(content.replace(self, new_str));
return result;
}
}
}
pub trait Operation {
fn read(&self) -> OriResult<String>;
fn update<T>(&self, new_content: &str, target: T)
where
T: TextPos;
fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError>;
}
fn checkempty(result: &str) -> OriResult<String> {
if result.is_empty() {
let empty_err = FileError::new().set_message("The Folder is Empty inside");
Err(empty_err)
} else {
Ok(result.trim().to_string())
}
}
impl Operation for Fileconfig {
fn read(&self) -> OriResult<String> {
let file = File::open(&self.path)?;
let mut buffer = io::BufReader::new(file);
let mut result = String::new();
buffer.read_to_string(&mut result)?;
checkempty(&result)
}
// use for string only
fn update<T: TextPos>(&self, new_content: &str, target: T) {
/* if target is multiple start spit out different result to different file! */
let existed_content = self.read().expect("Cannot open that file");
let mutation = target.modify(existed_content.to_string(), new_content);
println!("muttip {:?}", mutation);
let mut count = 0;
for n in mutation {
let new_path = format!("output -- {} [{}]", self.path, count);
let mut newfile = File::create(new_path).unwrap();
newfile.write_all(n.as_bytes()).unwrap();
count += 1;
}
}
// regex for search: ^"[a-zA-Z-\s]+"
fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError> {
let mut err_clt = String::new();
// let found_map = Vec::new();
let mut found_map: HashMap<i64, Vec<(usize, usize)>> = HashMap::new();
if self.parameters.is_empty() {
err_clt.push_str("No params!")
}
let mut content = String::new();
match self.read() {
Ok(ct) => content.push_str(&ct),
Err(read_error) => err_clt.push_str(&read_error.message),
}
let mut count: i64 = 0;
let mut line_found = Vec::new();
for (line_num, line) in content.lines().enumerate() {
let each_line = line.trim();
let word_group = each_line.split_whitespace().collect::<Vec<&str>>();
let reg = regex::Regex::new(&format!(r"{}", target)).unwrap();
let mut indx_vec = Vec::new();
for found in reg.find_iter(line) {
let key_indx = (found.start(), found.end());
indx_vec.push(key_indx);
}
if word_group.len() >= 1 && word_group.into_iter().any(|word| word.contains(target)) {
line_found.push(line_num);
found_map.insert(line_num as i64, indx_vec);
count += 1;
}
}
if err_clt.len() > 0 {
let bruh = FileError::new().set_message(&err_clt.clone());
return Err(bruh);
} else {
return Ok(found_map);
}
/**/
}
}
impl Clone for Fileconfig {
fn clone(&self) -> Self {
return Fileconfig {
name: self.name.clone(),
access_at: self.access_at,
query: self.query.clone(),
parameters: self.parameters.clone(),
// content:Option<String>,
content: self.content.clone(),
path: self.path.clone(),
};
}
}
fn count_found_map(hsm: HashMap<i64, Vec<(usize, usize)>>) -> usize {
let mut count: usize = 0;
for (_, hs) in hsm {
hs.iter().for_each(|_| count += 1);
}
return count;
}
#[test]
fn test() | {
let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap();
let test = "[Apple sauce bananan ba;;;a]";
println!("t {}", test);
let x: &[_] = &['[', ']'];
println!(
"test {:?} ",
(match_inside_brac.is_match(test), test.trim_matches(x))
);
} | identifier_body | |
mod.rs | /* File_config_functionalities_pmz */
// conditions:
/*
exec trait: [alias:(file/any)] [operations] [path/name] [parameters/--options]
Read:Content _ none
Write:Content _ String : if use multi line content ->
check len()::Enum -> i32|&str
Update:Content _ String
Delete:Content _ none
Some file operations need parameters and some don't;
*/
mod parameters;
pub use super::interface::{self, components, printer, template_engine, text_processing};
pub use super::utility::{self, ErrorHandler::FileError};
use parameters::filter_param;
use printer::TermCfg;
use std::{
fs::File,
io::prelude::*,
io::{self},
time::Duration,
};
use template_engine::TemplateBuilder;
use template_engine::TemplateEngine;
type Params = [Vec<String>; 2];
use std::collections::hash_map::HashMap;
#[derive(Debug, PartialOrd, PartialEq)]
pub struct Fileconfig {
name: String,
access_at: Duration,
query: String,
parameters: Params,
// content:Option<String>,
content: String,
path: String,
}
impl Fileconfig {
pub fn new(param: &str, timestamp: fn() -> Duration) -> Result<Fileconfig, &'static str> {
let mut command_chunk = Vec::new();
for res in param.trim().split_whitespace() {
command_chunk.push(res.to_owned());
}
if command_chunk.len() < 3 {
return Err("Insufficient parameters to run file operations!");
}
let capture = |index: usize| command_chunk.get(index).unwrap().to_owned();
let mut vc: [Vec<String>; 2] = [Vec::new(), Vec::new()];
if command_chunk.len() > 3 {
let v_param = command_chunk[3..command_chunk.len()].to_owned();
let p_vec = v_param.into_iter().map(|p_str| String::from(p_str));
// let tup = (p_reg,quote_word);
//^"[a-zA-Z-\s]+"
let throw_reg_panic =
|regex_err: regex::Error| panic!("Verification Errors! : {}", regex_err);
//^<\w++>$
let p_reg = regex::Regex::new(r"^\--+[a-zA-Z]+").unwrap_or_else(|x| throw_reg_panic(x));
let quote_word = regex::Regex::new(r#"(["'])((\\{2})*|(.*?[^\\](\\{2})*))"#)
.unwrap_or_else(|x| throw_reg_panic(x));
let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap();
p_vec.for_each(|x| {
if match_inside_brac.is_match(&x) || quote_word.is_match(&x) {
vc[0].push(x);
} else if p_reg.is_match(&x) {
vc[1].push(x);
}
})
}
let result = Fileconfig {
name: capture(2),
query: capture(1),
path: capture(2),
access_at: timestamp(),
parameters: vc,
content: String::from("None"),
};
Ok(result)
}
fn parse_quotation(&self, param: &Vec<String>) -> Vec<String> {
let quoted = |st: &str| st.starts_with("\"") && st.ends_with("\"");
param
.into_iter()
.filter(|st| quoted(st))
.map(|quote_par| {
text_processing::CrumpCluster::break_chunk("e_par)
.delete(0, Some(1))
.delete(quote_par.len() - 1, Some(quote_par.len()))
.merge_crump()
})
.collect::<Vec<String>>()
}
fn parse_bracket(&self, param: &Vec<String>) -> Vec<String> {
let match_brack: &[_] = &['[', ']', '\"'];
param
.iter()
// .filter(|general_param| match_inside_brac.is_match(general_param))
.flat_map(|bk_par| {
let split_brack = bk_par
.trim_matches(match_brack)
.split_whitespace()
.map(|f| f.to_string())
.collect::<Vec<String>>();
return split_brack;
})
.collect::<Vec<String>>()
// .filter(|bracketed|);
}
pub fn run(&self) -> Result<(), FileError> {
let init_ptr = TermCfg::new()
.set_attr(console::Attribute::Bold)
.set_attr(console::Attribute::Italic);
let print = init_ptr.gen_print(Some(console::Color::Blue));
let mut print_ln = init_ptr.gen_println(Some(console::Color::Blue));
let mut err_collector: Vec<FileError> = Vec::new();
let display_txt = |txt: &str| -> template_engine::Template {
let mut tmp_engine = template_engine::TemplateFactory::init()
.parse_in_template(txt)
.create_movable()
.collect();
let template = tmp_engine.padding(vec![1, 6, 6, 3]);
template.to_owned()
};
match self.query.as_str() {
"update" => {
// self.write(params[0], params[1].parse::<i32>().unwrap());
println!("what is your ct?");
let elim_quote = self.parse_bracket(&self.parameters[0]);
self.update(&elim_quote[1], elim_quote[0].clone().as_str());
}
"search" => {
let unquote = self.parse_bracket(&self.parameters[0]);
print_ln(&format!("<->statistics of word {:?}<->", unquote))?;
let mut p = init_ptr.gen_println(Some(console::Color::Blue));
for quoted in unquote {
let quoted = filter_param(&self.parameters[1], "ed);
let filtered = filter_param(&self.parameters[1], "ed);
match self.search(&filtered) {
Ok(found_map) => {
print!("Highligted-Text: \n");
let full_content = self.read().unwrap();
let total_line = found_map.len();
let mut key_holder = Vec::new();
found_map.iter().for_each(|(key, _)| key_holder.push(key));
let mut count = 0;
let mut crumps = full_content
.lines()
.into_iter()
.enumerate()
.map(|(idx, x)| {
(idx as i64, text_processing::CrumpCluster::break_chunk(x))
})
.collect::<Vec<(i64, text_processing::CrumpCluster)>>();
while count < found_map.len() {
// each_indx.iter().for_each(|x|)
crumps.iter_mut().for_each(|(loc, crump)| {
if loc == key_holder[count] {
let locations = found_map.get(loc).unwrap();
locations.into_iter().for_each(|(start, end)| {
crump.delete(*start, Some(*end));
crump.insert(
*start,
&format!("--->\"{}\"<---", quoted.clone().trim(),)
.trim(),
);
});
}
});
count += 1;
}
let fully_merged = crumps
.iter()
.map(|(_, crump)| {
let merged = crump.merge_crump();
return merged;
})
.collect::<String>();
// display_txt(&fully_merged, "+/");
if total_line <= 1 {
p(&"No word found in the text!")?;
} else {
display_txt(&fully_merged)
.border("+", components::BorderWeight::Bold)
.center_box()
.display();
p(&format!(
"->Number of line that contain word /{}/: {}",
quoted, total_line
))?;
p(&format!(
"Total number of words /{}/ {}",
quoted,
count_found_map(found_map)
))?;
}
}
Err(file_err) => err_collector.push(file_err),
}
}
}
"read" => {
let result = self.read();
print_ln("Reading contains : ")?;
match result {
Ok(txt) => {
display_txt(&filter_param(&self.parameters[1], &txt))
.border("+", components::BorderWeight::Bold)
.center_box()
.display();
}
Err(file_err) => {
err_collector.push(file_err);
}
}
}
_ => err_collector.push(FileError::new().set_message("Invalid operation!")),
}
if err_collector.len() > 0 {
Err(err_collector.into_iter().next().unwrap())
} else {
Ok(())
}
}
}
type OriResult<T> = Result<T, FileError>;
/*positions : [{ Number of line to modify / word to replace / newdoc }]*/
pub trait TextPos {
fn modify(&self, content: String, new_str: &str) -> Vec<String>;
}
// [x1,x2,"string"]
// replace all word within that target across all content
impl TextPos for &str {
fn modify(&self, content: String, new_str: &str) -> Vec<String> {
if self.contains(" ") {
let multi_tar = self.split_whitespace().collect::<Vec<&str>>();
let emp = multi_tar
.iter()
.map(|x| {
let xt = content.replace(*x, new_str);
if xt != content {
return xt;
} else {
"None".to_string()
}
})
.filter(|x| *x != "None".to_string())
.collect::<Vec<String>>();
// println!("special emp {:#?}",emp);
return emp;
} else {
let mut result: Vec<String> = Vec::new();
result.push(content.replace(self, new_str));
return result;
}
}
}
pub trait Operation {
fn read(&self) -> OriResult<String>;
fn update<T>(&self, new_content: &str, target: T)
where
T: TextPos;
fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError>;
}
fn | (result: &str) -> OriResult<String> {
if result.is_empty() {
let empty_err = FileError::new().set_message("The Folder is Empty inside");
Err(empty_err)
} else {
Ok(result.trim().to_string())
}
}
impl Operation for Fileconfig {
fn read(&self) -> OriResult<String> {
let file = File::open(&self.path)?;
let mut buffer = io::BufReader::new(file);
let mut result = String::new();
buffer.read_to_string(&mut result)?;
checkempty(&result)
}
// use for string only
fn update<T: TextPos>(&self, new_content: &str, target: T) {
/* if target is multiple start spit out different result to different file! */
let existed_content = self.read().expect("Cannot open that file");
let mutation = target.modify(existed_content.to_string(), new_content);
println!("muttip {:?}", mutation);
let mut count = 0;
for n in mutation {
let new_path = format!("output -- {} [{}]", self.path, count);
let mut newfile = File::create(new_path).unwrap();
newfile.write_all(n.as_bytes()).unwrap();
count += 1;
}
}
// regex for search: ^"[a-zA-Z-\s]+"
fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError> {
let mut err_clt = String::new();
// let found_map = Vec::new();
let mut found_map: HashMap<i64, Vec<(usize, usize)>> = HashMap::new();
if self.parameters.is_empty() {
err_clt.push_str("No params!")
}
let mut content = String::new();
match self.read() {
Ok(ct) => content.push_str(&ct),
Err(read_error) => err_clt.push_str(&read_error.message),
}
let mut count: i64 = 0;
let mut line_found = Vec::new();
for (line_num, line) in content.lines().enumerate() {
let each_line = line.trim();
let word_group = each_line.split_whitespace().collect::<Vec<&str>>();
let reg = regex::Regex::new(&format!(r"{}", target)).unwrap();
let mut indx_vec = Vec::new();
for found in reg.find_iter(line) {
let key_indx = (found.start(), found.end());
indx_vec.push(key_indx);
}
if word_group.len() >= 1 && word_group.into_iter().any(|word| word.contains(target)) {
line_found.push(line_num);
found_map.insert(line_num as i64, indx_vec);
count += 1;
}
}
if err_clt.len() > 0 {
let bruh = FileError::new().set_message(&err_clt.clone());
return Err(bruh);
} else {
return Ok(found_map);
}
/**/
}
}
impl Clone for Fileconfig {
fn clone(&self) -> Self {
return Fileconfig {
name: self.name.clone(),
access_at: self.access_at,
query: self.query.clone(),
parameters: self.parameters.clone(),
// content:Option<String>,
content: self.content.clone(),
path: self.path.clone(),
};
}
}
fn count_found_map(hsm: HashMap<i64, Vec<(usize, usize)>>) -> usize {
let mut count: usize = 0;
for (_, hs) in hsm {
hs.iter().for_each(|_| count += 1);
}
return count;
}
#[test]
fn test() {
let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap();
let test = "[Apple sauce bananan ba;;;a]";
println!("t {}", test);
let x: &[_] = &['[', ']'];
println!(
"test {:?} ",
(match_inside_brac.is_match(test), test.trim_matches(x))
);
}
| checkempty | identifier_name |
sketch.js | //playerSprite
let pharahSprite;
//crosshair
let crosshairSprite;
//player
let pharah;
//enemies!!!!!
let enemy1sprite;
let enemy2sprite;
let enemy3sprite;
//enemy array
let enemyArr;
//enemyImage array
let images;
let enemyNum;
let bulletSprite;
//gamestate, 0 = menu, 1 = gameplay
let state;
//boring initializer variables
let score;
let b;
let flip;
let health;
let ammo;
let rangeData;
let shotSound;
function updateRange(clickedRange) {
// grab the range data as an integer
rangeData = int(clickedRange.value);
}
//preload assets
function preload()
{
pharahSprite = loadImage("images/pharahSprite_small.png");
bg = loadImage("images/bg.png");
crosshairSprite = loadImage("images/crosshair.png");
bulletSprite = loadImage("images/bullet.png");
soundFormats("mp3");
shotSound = loadSound("sounds/exp.mp3");
//enemies, potentially variable with more effort lol
enemy1sprite = loadImage("images/enemy1.png");
enemy2sprite = loadImage("images/enemy2.png");
enemy3sprite = loadImage("images/enemy3.png");
}
function setup()
{
//hot diggity this is big
createCanvas(1920,1200);
state = 0;
//GAME STATE
score = 0;
flip = false;
//make all the bad guys
let enemy1 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy2 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy3 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
let enemy4 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy5 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy6 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
//this hurts, but idk a better way rn
enemyArr = [enemy1, enemy2, enemy3, enemy4, enemy5, enemy6];
}
function draw()
{
//opening menu
if (state === 0)
{
textSize(72);
background(bg);
text("Pharah", 800, 100);
textSize(60);
text("Press (1) - EASY", 700, 200);
text("Press (2) - NORMAL", 700, 300);
text("Press (3) - HARD", 700, 400);
text("CONTROLS", 800, 500);
textSize(32);
text("Left / Right Arrow, or A / D", 700, 550);
text("Press SPACEBAR to use Fuel", 700, 600);
text("Click to shoot!", 700, 650);
//EASY
if (keyIsDown(49))
{
ammo = 12;
health = 1500;
state = 1;
pharah = new Pharah(health, ammo);
}
//NORMAL
else if (keyIsDown(50))
{
ammo = 8;
health = 1000;
state = 1;
pharah = new Pharah(health, ammo);
}
//HARD
else if (keyIsDown(51))
{
ammo = 4;
health = 750;
state = 1;
pharah = new Pharah(health, ammo);
}
}
//GAMEPLAY LOOP
else if (state === 1)
{
noCursor();
background(bg);
//get pharah lookin cool and movin nicely
pharah.display();
pharah.move();
//let's spawnEnemies
spawnEnemies(enemyArr, enemyArr.length);
//oh and let pharah aim
pharah.aim();
//oh god. This is how I got the bullet to finally shoot
//it feels very hacky, and based on our lecture today, I assume we
//will find a more intuitive way to do this later on in the course. But I create a bullet in mousePressed()
//so to avoid null pointer errors theres a "flip" boolean to make sure b exists (the bullet)
if (flip)
{
b.display();
b.move();
for (let i = 0; i < enemyArr.length; i++)
{
//check if there's a hit, if there is, add score, if the score is game-ending, then END IT
if(enemyArr[i].detectHit(b.bulletX, b.bulletY)) | {
score += 1;
if (score >= 6)
{
state = 2;
}
}
//Pharah takes damage if she touches people
if(pharah.detectHit(enemyArr[i].xPos, enemyArr[i].yPos))
{
pharah.health -= 10;
}
}
}
}
//standard gameover screen.
else if (state === 2)
{
background(255);
textSize(72)
text("GAMEOVER", 10, 200);
if(keyIsDown(82))
{
state = 1;
}
}
}
//again, the hackiest bullet ever
function mousePressed()
{
if (pharah.ammoCapacity <= 0)
{
return false;
}
else {
flip = true;
shotSound.play();
pharah.ammoCapacity -= 1;
//yeah, so here I create the bullet so it can be drawn. flip the bool, etc.
b = new Bullet(pharah, mouseX, mouseY, .02);
return false;
}
}
//just a helper function when I thought it would be cute to do so
function spawnEnemies(enemyArr, num)
{
for (let i = 0; i < num; i++)
{
enemyArr[i].display();
enemyArr[i].move();
}
}
//Alright, here we go
class Pharah
{
//instantiate all her important stuff
constructor(health, ammoCapacity)
{
this.xPos = 250;
this.yPos = 250;
this.sprite = pharahSprite;
this.crosshair = crosshairSprite;
this.accel = 0.1;
this.xSpeed = 0;
this.ySpeed = 0;
this.gravity = 0.03;
this.xLimit = 5;
this.aimX = mouseX;
this.aimY = mouseY;
this.bulletX = this.sprite.width + (this.sprite.width / 2);
this.bulletY = this.sprite.height + (this.sprite.height / 2);
this.bulletSpeed = .02;
this.ammoCapacity = ammoCapacity;
this.fuelCapacity = rangeData;;
this.fuel = rangeData;
this.currentTime = 0;
this.jumpSize = 50;
this.jumpSpeed = .02;
this.health = 1000;
}
//basic collision taken from KAPP notes
detectHit(x, y)
{
if(dist(x, y, this.xPos, this.yPos) < 50)
{
return true;
}
return false;
}
//set the mouseX and mouseY offsets so the crosshair looks more centered
aim()
{
this.aimX = mouseX - (this.sprite.width / 2 - 33);
this.aimY = mouseY - ((this.sprite.height / 2) + 10);
image(this.crosshair, this.aimX, this.aimY);
}
//display
display()
{
//kill her!!!
if (this.health < 0)
{
state = 2;
}
//normal image
image(this.sprite, this.xPos, this.yPos);
//included text, felt easier here
textSize(32);
if (this.fuel < 0)
{
text("Fuel: 0", 10, 40);
}
else
{
text("Fuel: " + this.fuel, 10, 40);
}
text("Ammo: " + this.ammoCapacity, 10, 70);
text("Score: " + score, 10, 105);
text("Health: " + this.health, 10, 200);
// text("Name: " + name, 1500, 70);
if (this.ammoCapacity <= 0)
{
textSize(72);
text("Press R to reload.", (width/2)-200, 100);
}
}
//not implemented, unfortuantely. Couldn't get it to work. Want to go back to it
jump()
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
}
// this will move our character
move()
{
//contain logic within borders
if (this.xPos + this.sprite.width > width)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = width - this.sprite.width
}
if (this.xPos < 0)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = 0;
}
if (this.yPos > height-238-this.sprite.height)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = height-238 - this.sprite.height;
}
if (this.yPos < 0)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = 0;
}
//kapp notes helpful as always
// move left?
if (keyIsDown(LEFT_ARROW) || keyIsDown(65))
{
// subtract from character's xSpeed
this.xSpeed -= this.accel;
this.left = true;
this.right = false;
}
// move right?
if (keyIsDown(RIGHT_ARROW) || keyIsDown(68))
{
// add to character's xSpeed
this.xSpeed += this.accel;
this.right = true;
this.left = false;
}
//reload, basic
if (keyIsDown(82))
{
this.ammoCapacity = 8;
}
// fuel!!
if (keyIsDown(32))
{
if (this.fuel > 0)
{
//if you still have fuel, then use it
this.ySpeed -= this.accel;
}
if (this.fuel > -250)
{
//250 is the threshold under 0 to simulate a "delay" since idk how millis() works
this.fuel -= 15;
}
}
//look at this sad commented out failure of a feature... maybe one day
/*
if (keyCode == SHIFT)
{
if (cooldown)
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
jumping = true;
}
}*/
//this I felt I wanted to do so that the left and right speeds
//would naturally slow down over time. Felt unnatural otherwise.
if (this.right)
{
this.xSpeed -= this.gravity;
if (this.xSpeed < 0)
{
this.right = false;
this.left = true;
}
}
else if (this.left)
{
this.xSpeed += this.gravity;
if (this.xSpeed > 0)
{
this.right = true;
this.left = false;
}
}
//the standard movement. Add gravity, speed to position
this.ySpeed += this.gravity;
this.xPos += this.xSpeed;
this.yPos += this.ySpeed;
//gradually grow your fuel overtime. A counter would've been great but I couldn't learn it in time...
//no pun intended.
if (this.fuel < this.fuelCapacity)
{
this.fuel += 5;
}
// speed limit! prevent the user from moving too fast
this.xSpeed = constrain(this.xSpeed, -this.xLimit, this.xLimit);
this.ySpeed = constrain(this.ySpeed, -25, 25);
}
}
class Bullet
{
//all standard stuffs
constructor(owner, x, y, speed)
{
this.bulletX = owner.xPos;
this.bulletY = owner.yPos;
this.bulletXSpeed = 0;
this.bulletYSpeed = 0;
this.destX = mouseX;
this.destY = mouseY;
this.sprite = bulletSprite;
this.bulletSpeed = speed;
//x2 = mousex, x1 = pharahX
this.xDist = this.destX - owner.xPos;
this.yDist = this.destY - owner.yPos;
}
//simple display function (hallelujah)
display()
{
image(this.sprite, this.bulletX, this.bulletY);
}
//OK...
//This idea came to me in the shower (literally) so it might be dumb
//But I thought if I ahd the X,Y of when the mouse was pressed, and I had the current
//x,y of the image, I could calculate the slope of the line
//connecting those two points, which is just the ratio of y pixels over x pixels
//and add to the x y vectors based on that slope. I ended up having
//to have four special cases for each quadrant as referenced by the console.logs
//There has to be a better way for this, but this is the best I could do
//and it's still not perfect (I haven't yet, but I assume at some point I could accidentally
//divide by 0 for example)
move()
{
let slope = this.yDist / this.xDist;
//y is negative, x is positive
if (this.yDist < 0 && this.xDist > 0)
{
slope = this.xDist / this.yDist;
console.log("-Y, X");
//FINALLY WORKING
this.bulletXSpeed -= slope;
this.bulletYSpeed -= 1;
}
//y is positive, x is positive
if (this.yDist > 0 && this.xDist > 0)
{
console.log("Y, X");
//GOOD, WORKING
this.bulletXSpeed += 1;
this.bulletYSpeed += slope;
}
//y is negative, x is negative
if (this.yDist < 0 && this.xDist < 0)
{
console.log("-Y, -X");
//GOOD, WORKING
this.bulletXSpeed -= 1;
this.bulletYSpeed -= slope;
}
//y is positive, x is negative
if (this.yDist > 0 && this.xDist < 0)
{
slope = this.xDist / this.yDist;
//GOOD, WORKING
console.log("Y, -X");
this.bulletXSpeed += slope;
this.bulletYSpeed += 1;
}
//then just move regularly
this.bulletX += this.bulletXSpeed;
this.bulletY += this.bulletYSpeed;
}
}
//ENEMIES! The guys that gave me the least amount of trouble
class BadBoi
{
//build me a badboi
constructor(x, y, image, speed)
{
this.xPos = x;
this.yPos = y;
this.sprite = image;
this.xDest = random(10, width-10);
this.speed = speed;
this.destroyed = false;
}
//Kapp notes >>
detectHit(x, y)
{
if (dist(x,y, this.xPos, this.yPos) < 50)
{
this.destroyed = true;
return true;
}
return false;
}
//this was the easiest way I could "delete" them,
//send them way offscreen and turn off their image...
//again, probably not the best way to do this.
display()
{
if (!this.destroyed)
{
image(this.sprite, this.xPos, this.yPos);
}
else
{
this.xPos = -999;
this.yPos = -999;
}
}
//move! Use the kapp notes movement from Zelda to randomly choose positions
//and get closer to them by the "Speed" which was instantiated as I think .02 which
//was what we used in the notes (move 2% fo the distance etc.)
move()
{
if (abs(this.xPos - this.xDest) < 100)
{
this.xDest = random(10, width - 10);
}
let xDist = this.xDest - this.xPos;
this.xPos += this.speed * xDist;
}
} | random_line_split | |
sketch.js | //playerSprite
let pharahSprite;
//crosshair
let crosshairSprite;
//player
let pharah;
//enemies!!!!!
let enemy1sprite;
let enemy2sprite;
let enemy3sprite;
//enemy array
let enemyArr;
//enemyImage array
let images;
let enemyNum;
let bulletSprite;
//gamestate, 0 = menu, 1 = gameplay
let state;
//boring initializer variables
let score;
let b;
let flip;
let health;
let ammo;
let rangeData;
let shotSound;
function updateRange(clickedRange) |
//preload assets
function preload()
{
pharahSprite = loadImage("images/pharahSprite_small.png");
bg = loadImage("images/bg.png");
crosshairSprite = loadImage("images/crosshair.png");
bulletSprite = loadImage("images/bullet.png");
soundFormats("mp3");
shotSound = loadSound("sounds/exp.mp3");
//enemies, potentially variable with more effort lol
enemy1sprite = loadImage("images/enemy1.png");
enemy2sprite = loadImage("images/enemy2.png");
enemy3sprite = loadImage("images/enemy3.png");
}
function setup()
{
//hot diggity this is big
createCanvas(1920,1200);
state = 0;
//GAME STATE
score = 0;
flip = false;
//make all the bad guys
let enemy1 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy2 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy3 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
let enemy4 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy5 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy6 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
//this hurts, but idk a better way rn
enemyArr = [enemy1, enemy2, enemy3, enemy4, enemy5, enemy6];
}
function draw()
{
//opening menu
if (state === 0)
{
textSize(72);
background(bg);
text("Pharah", 800, 100);
textSize(60);
text("Press (1) - EASY", 700, 200);
text("Press (2) - NORMAL", 700, 300);
text("Press (3) - HARD", 700, 400);
text("CONTROLS", 800, 500);
textSize(32);
text("Left / Right Arrow, or A / D", 700, 550);
text("Press SPACEBAR to use Fuel", 700, 600);
text("Click to shoot!", 700, 650);
//EASY
if (keyIsDown(49))
{
ammo = 12;
health = 1500;
state = 1;
pharah = new Pharah(health, ammo);
}
//NORMAL
else if (keyIsDown(50))
{
ammo = 8;
health = 1000;
state = 1;
pharah = new Pharah(health, ammo);
}
//HARD
else if (keyIsDown(51))
{
ammo = 4;
health = 750;
state = 1;
pharah = new Pharah(health, ammo);
}
}
//GAMEPLAY LOOP
else if (state === 1)
{
noCursor();
background(bg);
//get pharah lookin cool and movin nicely
pharah.display();
pharah.move();
//let's spawnEnemies
spawnEnemies(enemyArr, enemyArr.length);
//oh and let pharah aim
pharah.aim();
//oh god. This is how I got the bullet to finally shoot
//it feels very hacky, and based on our lecture today, I assume we
//will find a more intuitive way to do this later on in the course. But I create a bullet in mousePressed()
//so to avoid null pointer errors theres a "flip" boolean to make sure b exists (the bullet)
if (flip)
{
b.display();
b.move();
for (let i = 0; i < enemyArr.length; i++)
{
//check if there's a hit, if there is, add score, if the score is game-ending, then END IT
if(enemyArr[i].detectHit(b.bulletX, b.bulletY))
{
score += 1;
if (score >= 6)
{
state = 2;
}
}
//Pharah takes damage if she touches people
if(pharah.detectHit(enemyArr[i].xPos, enemyArr[i].yPos))
{
pharah.health -= 10;
}
}
}
}
//standard gameover screen.
else if (state === 2)
{
background(255);
textSize(72)
text("GAMEOVER", 10, 200);
if(keyIsDown(82))
{
state = 1;
}
}
}
//again, the hackiest bullet ever
function mousePressed()
{
if (pharah.ammoCapacity <= 0)
{
return false;
}
else {
flip = true;
shotSound.play();
pharah.ammoCapacity -= 1;
//yeah, so here I create the bullet so it can be drawn. flip the bool, etc.
b = new Bullet(pharah, mouseX, mouseY, .02);
return false;
}
}
//just a helper function when I thought it would be cute to do so
function spawnEnemies(enemyArr, num)
{
for (let i = 0; i < num; i++)
{
enemyArr[i].display();
enemyArr[i].move();
}
}
//Alright, here we go
class Pharah
{
//instantiate all her important stuff
constructor(health, ammoCapacity)
{
this.xPos = 250;
this.yPos = 250;
this.sprite = pharahSprite;
this.crosshair = crosshairSprite;
this.accel = 0.1;
this.xSpeed = 0;
this.ySpeed = 0;
this.gravity = 0.03;
this.xLimit = 5;
this.aimX = mouseX;
this.aimY = mouseY;
this.bulletX = this.sprite.width + (this.sprite.width / 2);
this.bulletY = this.sprite.height + (this.sprite.height / 2);
this.bulletSpeed = .02;
this.ammoCapacity = ammoCapacity;
this.fuelCapacity = rangeData;;
this.fuel = rangeData;
this.currentTime = 0;
this.jumpSize = 50;
this.jumpSpeed = .02;
this.health = 1000;
}
//basic collision taken from KAPP notes
detectHit(x, y)
{
if(dist(x, y, this.xPos, this.yPos) < 50)
{
return true;
}
return false;
}
//set the mouseX and mouseY offsets so the crosshair looks more centered
aim()
{
this.aimX = mouseX - (this.sprite.width / 2 - 33);
this.aimY = mouseY - ((this.sprite.height / 2) + 10);
image(this.crosshair, this.aimX, this.aimY);
}
//display
display()
{
//kill her!!!
if (this.health < 0)
{
state = 2;
}
//normal image
image(this.sprite, this.xPos, this.yPos);
//included text, felt easier here
textSize(32);
if (this.fuel < 0)
{
text("Fuel: 0", 10, 40);
}
else
{
text("Fuel: " + this.fuel, 10, 40);
}
text("Ammo: " + this.ammoCapacity, 10, 70);
text("Score: " + score, 10, 105);
text("Health: " + this.health, 10, 200);
// text("Name: " + name, 1500, 70);
if (this.ammoCapacity <= 0)
{
textSize(72);
text("Press R to reload.", (width/2)-200, 100);
}
}
//not implemented, unfortuantely. Couldn't get it to work. Want to go back to it
jump()
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
}
// this will move our character
move()
{
//contain logic within borders
if (this.xPos + this.sprite.width > width)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = width - this.sprite.width
}
if (this.xPos < 0)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = 0;
}
if (this.yPos > height-238-this.sprite.height)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = height-238 - this.sprite.height;
}
if (this.yPos < 0)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = 0;
}
//kapp notes helpful as always
// move left?
if (keyIsDown(LEFT_ARROW) || keyIsDown(65))
{
// subtract from character's xSpeed
this.xSpeed -= this.accel;
this.left = true;
this.right = false;
}
// move right?
if (keyIsDown(RIGHT_ARROW) || keyIsDown(68))
{
// add to character's xSpeed
this.xSpeed += this.accel;
this.right = true;
this.left = false;
}
//reload, basic
if (keyIsDown(82))
{
this.ammoCapacity = 8;
}
// fuel!!
if (keyIsDown(32))
{
if (this.fuel > 0)
{
//if you still have fuel, then use it
this.ySpeed -= this.accel;
}
if (this.fuel > -250)
{
//250 is the threshold under 0 to simulate a "delay" since idk how millis() works
this.fuel -= 15;
}
}
//look at this sad commented out failure of a feature... maybe one day
/*
if (keyCode == SHIFT)
{
if (cooldown)
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
jumping = true;
}
}*/
//this I felt I wanted to do so that the left and right speeds
//would naturally slow down over time. Felt unnatural otherwise.
if (this.right)
{
this.xSpeed -= this.gravity;
if (this.xSpeed < 0)
{
this.right = false;
this.left = true;
}
}
else if (this.left)
{
this.xSpeed += this.gravity;
if (this.xSpeed > 0)
{
this.right = true;
this.left = false;
}
}
//the standard movement. Add gravity, speed to position
this.ySpeed += this.gravity;
this.xPos += this.xSpeed;
this.yPos += this.ySpeed;
//gradually grow your fuel overtime. A counter would've been great but I couldn't learn it in time...
//no pun intended.
if (this.fuel < this.fuelCapacity)
{
this.fuel += 5;
}
// speed limit! prevent the user from moving too fast
this.xSpeed = constrain(this.xSpeed, -this.xLimit, this.xLimit);
this.ySpeed = constrain(this.ySpeed, -25, 25);
}
}
class Bullet
{
//all standard stuffs
constructor(owner, x, y, speed)
{
this.bulletX = owner.xPos;
this.bulletY = owner.yPos;
this.bulletXSpeed = 0;
this.bulletYSpeed = 0;
this.destX = mouseX;
this.destY = mouseY;
this.sprite = bulletSprite;
this.bulletSpeed = speed;
//x2 = mousex, x1 = pharahX
this.xDist = this.destX - owner.xPos;
this.yDist = this.destY - owner.yPos;
}
//simple display function (hallelujah)
display()
{
image(this.sprite, this.bulletX, this.bulletY);
}
//OK...
//This idea came to me in the shower (literally) so it might be dumb
//But I thought if I ahd the X,Y of when the mouse was pressed, and I had the current
//x,y of the image, I could calculate the slope of the line
//connecting those two points, which is just the ratio of y pixels over x pixels
//and add to the x y vectors based on that slope. I ended up having
//to have four special cases for each quadrant as referenced by the console.logs
//There has to be a better way for this, but this is the best I could do
//and it's still not perfect (I haven't yet, but I assume at some point I could accidentally
//divide by 0 for example)
move()
{
let slope = this.yDist / this.xDist;
//y is negative, x is positive
if (this.yDist < 0 && this.xDist > 0)
{
slope = this.xDist / this.yDist;
console.log("-Y, X");
//FINALLY WORKING
this.bulletXSpeed -= slope;
this.bulletYSpeed -= 1;
}
//y is positive, x is positive
if (this.yDist > 0 && this.xDist > 0)
{
console.log("Y, X");
//GOOD, WORKING
this.bulletXSpeed += 1;
this.bulletYSpeed += slope;
}
//y is negative, x is negative
if (this.yDist < 0 && this.xDist < 0)
{
console.log("-Y, -X");
//GOOD, WORKING
this.bulletXSpeed -= 1;
this.bulletYSpeed -= slope;
}
//y is positive, x is negative
if (this.yDist > 0 && this.xDist < 0)
{
slope = this.xDist / this.yDist;
//GOOD, WORKING
console.log("Y, -X");
this.bulletXSpeed += slope;
this.bulletYSpeed += 1;
}
//then just move regularly
this.bulletX += this.bulletXSpeed;
this.bulletY += this.bulletYSpeed;
}
}
//ENEMIES! The guys that gave me the least amount of trouble
class BadBoi
{
//build me a badboi
constructor(x, y, image, speed)
{
this.xPos = x;
this.yPos = y;
this.sprite = image;
this.xDest = random(10, width-10);
this.speed = speed;
this.destroyed = false;
}
//Kapp notes >>
detectHit(x, y)
{
if (dist(x,y, this.xPos, this.yPos) < 50)
{
this.destroyed = true;
return true;
}
return false;
}
//this was the easiest way I could "delete" them,
//send them way offscreen and turn off their image...
//again, probably not the best way to do this.
display()
{
if (!this.destroyed)
{
image(this.sprite, this.xPos, this.yPos);
}
else
{
this.xPos = -999;
this.yPos = -999;
}
}
//move! Use the kapp notes movement from Zelda to randomly choose positions
//and get closer to them by the "Speed" which was instantiated as I think .02 which
//was what we used in the notes (move 2% fo the distance etc.)
move()
{
if (abs(this.xPos - this.xDest) < 100)
{
this.xDest = random(10, width - 10);
}
let xDist = this.xDest - this.xPos;
this.xPos += this.speed * xDist;
}
}
| {
// grab the range data as an integer
rangeData = int(clickedRange.value);
} | identifier_body |
sketch.js | //playerSprite
let pharahSprite;
//crosshair
let crosshairSprite;
//player
let pharah;
//enemies!!!!!
let enemy1sprite;
let enemy2sprite;
let enemy3sprite;
//enemy array
let enemyArr;
//enemyImage array
let images;
let enemyNum;
let bulletSprite;
//gamestate, 0 = menu, 1 = gameplay
let state;
//boring initializer variables
let score;
let b;
let flip;
let health;
let ammo;
let rangeData;
let shotSound;
function updateRange(clickedRange) {
// grab the range data as an integer
rangeData = int(clickedRange.value);
}
//preload assets
function preload()
{
pharahSprite = loadImage("images/pharahSprite_small.png");
bg = loadImage("images/bg.png");
crosshairSprite = loadImage("images/crosshair.png");
bulletSprite = loadImage("images/bullet.png");
soundFormats("mp3");
shotSound = loadSound("sounds/exp.mp3");
//enemies, potentially variable with more effort lol
enemy1sprite = loadImage("images/enemy1.png");
enemy2sprite = loadImage("images/enemy2.png");
enemy3sprite = loadImage("images/enemy3.png");
}
function setup()
{
//hot diggity this is big
createCanvas(1920,1200);
state = 0;
//GAME STATE
score = 0;
flip = false;
//make all the bad guys
let enemy1 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy2 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy3 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
let enemy4 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy5 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy6 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
//this hurts, but idk a better way rn
enemyArr = [enemy1, enemy2, enemy3, enemy4, enemy5, enemy6];
}
function draw()
{
//opening menu
if (state === 0)
{
textSize(72);
background(bg);
text("Pharah", 800, 100);
textSize(60);
text("Press (1) - EASY", 700, 200);
text("Press (2) - NORMAL", 700, 300);
text("Press (3) - HARD", 700, 400);
text("CONTROLS", 800, 500);
textSize(32);
text("Left / Right Arrow, or A / D", 700, 550);
text("Press SPACEBAR to use Fuel", 700, 600);
text("Click to shoot!", 700, 650);
//EASY
if (keyIsDown(49))
{
ammo = 12;
health = 1500;
state = 1;
pharah = new Pharah(health, ammo);
}
//NORMAL
else if (keyIsDown(50))
{
ammo = 8;
health = 1000;
state = 1;
pharah = new Pharah(health, ammo);
}
//HARD
else if (keyIsDown(51))
{
ammo = 4;
health = 750;
state = 1;
pharah = new Pharah(health, ammo);
}
}
//GAMEPLAY LOOP
else if (state === 1)
{
noCursor();
background(bg);
//get pharah lookin cool and movin nicely
pharah.display();
pharah.move();
//let's spawnEnemies
spawnEnemies(enemyArr, enemyArr.length);
//oh and let pharah aim
pharah.aim();
//oh god. This is how I got the bullet to finally shoot
//it feels very hacky, and based on our lecture today, I assume we
//will find a more intuitive way to do this later on in the course. But I create a bullet in mousePressed()
//so to avoid null pointer errors theres a "flip" boolean to make sure b exists (the bullet)
if (flip)
{
b.display();
b.move();
for (let i = 0; i < enemyArr.length; i++)
{
//check if there's a hit, if there is, add score, if the score is game-ending, then END IT
if(enemyArr[i].detectHit(b.bulletX, b.bulletY))
{
score += 1;
if (score >= 6)
{
state = 2;
}
}
//Pharah takes damage if she touches people
if(pharah.detectHit(enemyArr[i].xPos, enemyArr[i].yPos))
{
pharah.health -= 10;
}
}
}
}
//standard gameover screen.
else if (state === 2)
{
background(255);
textSize(72)
text("GAMEOVER", 10, 200);
if(keyIsDown(82))
{
state = 1;
}
}
}
//again, the hackiest bullet ever
function mousePressed()
{
if (pharah.ammoCapacity <= 0)
{
return false;
}
else {
flip = true;
shotSound.play();
pharah.ammoCapacity -= 1;
//yeah, so here I create the bullet so it can be drawn. flip the bool, etc.
b = new Bullet(pharah, mouseX, mouseY, .02);
return false;
}
}
//just a helper function when I thought it would be cute to do so
function spawnEnemies(enemyArr, num)
{
for (let i = 0; i < num; i++)
{
enemyArr[i].display();
enemyArr[i].move();
}
}
//Alright, here we go
class Pharah
{
//instantiate all her important stuff
constructor(health, ammoCapacity)
{
this.xPos = 250;
this.yPos = 250;
this.sprite = pharahSprite;
this.crosshair = crosshairSprite;
this.accel = 0.1;
this.xSpeed = 0;
this.ySpeed = 0;
this.gravity = 0.03;
this.xLimit = 5;
this.aimX = mouseX;
this.aimY = mouseY;
this.bulletX = this.sprite.width + (this.sprite.width / 2);
this.bulletY = this.sprite.height + (this.sprite.height / 2);
this.bulletSpeed = .02;
this.ammoCapacity = ammoCapacity;
this.fuelCapacity = rangeData;;
this.fuel = rangeData;
this.currentTime = 0;
this.jumpSize = 50;
this.jumpSpeed = .02;
this.health = 1000;
}
//basic collision taken from KAPP notes
detectHit(x, y)
{
if(dist(x, y, this.xPos, this.yPos) < 50)
{
return true;
}
return false;
}
//set the mouseX and mouseY offsets so the crosshair looks more centered
aim()
{
this.aimX = mouseX - (this.sprite.width / 2 - 33);
this.aimY = mouseY - ((this.sprite.height / 2) + 10);
image(this.crosshair, this.aimX, this.aimY);
}
//display
display()
{
//kill her!!!
if (this.health < 0)
{
state = 2;
}
//normal image
image(this.sprite, this.xPos, this.yPos);
//included text, felt easier here
textSize(32);
if (this.fuel < 0)
{
text("Fuel: 0", 10, 40);
}
else
{
text("Fuel: " + this.fuel, 10, 40);
}
text("Ammo: " + this.ammoCapacity, 10, 70);
text("Score: " + score, 10, 105);
text("Health: " + this.health, 10, 200);
// text("Name: " + name, 1500, 70);
if (this.ammoCapacity <= 0)
{
textSize(72);
text("Press R to reload.", (width/2)-200, 100);
}
}
//not implemented, unfortuantely. Couldn't get it to work. Want to go back to it
jump()
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
}
// this will move our character
| ()
{
//contain logic within borders
if (this.xPos + this.sprite.width > width)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = width - this.sprite.width
}
if (this.xPos < 0)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = 0;
}
if (this.yPos > height-238-this.sprite.height)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = height-238 - this.sprite.height;
}
if (this.yPos < 0)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = 0;
}
//kapp notes helpful as always
// move left?
if (keyIsDown(LEFT_ARROW) || keyIsDown(65))
{
// subtract from character's xSpeed
this.xSpeed -= this.accel;
this.left = true;
this.right = false;
}
// move right?
if (keyIsDown(RIGHT_ARROW) || keyIsDown(68))
{
// add to character's xSpeed
this.xSpeed += this.accel;
this.right = true;
this.left = false;
}
//reload, basic
if (keyIsDown(82))
{
this.ammoCapacity = 8;
}
// fuel!!
if (keyIsDown(32))
{
if (this.fuel > 0)
{
//if you still have fuel, then use it
this.ySpeed -= this.accel;
}
if (this.fuel > -250)
{
//250 is the threshold under 0 to simulate a "delay" since idk how millis() works
this.fuel -= 15;
}
}
//look at this sad commented out failure of a feature... maybe one day
/*
if (keyCode == SHIFT)
{
if (cooldown)
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
jumping = true;
}
}*/
//this I felt I wanted to do so that the left and right speeds
//would naturally slow down over time. Felt unnatural otherwise.
if (this.right)
{
this.xSpeed -= this.gravity;
if (this.xSpeed < 0)
{
this.right = false;
this.left = true;
}
}
else if (this.left)
{
this.xSpeed += this.gravity;
if (this.xSpeed > 0)
{
this.right = true;
this.left = false;
}
}
//the standard movement. Add gravity, speed to position
this.ySpeed += this.gravity;
this.xPos += this.xSpeed;
this.yPos += this.ySpeed;
//gradually grow your fuel overtime. A counter would've been great but I couldn't learn it in time...
//no pun intended.
if (this.fuel < this.fuelCapacity)
{
this.fuel += 5;
}
// speed limit! prevent the user from moving too fast
this.xSpeed = constrain(this.xSpeed, -this.xLimit, this.xLimit);
this.ySpeed = constrain(this.ySpeed, -25, 25);
}
}
class Bullet
{
//all standard stuffs
constructor(owner, x, y, speed)
{
this.bulletX = owner.xPos;
this.bulletY = owner.yPos;
this.bulletXSpeed = 0;
this.bulletYSpeed = 0;
this.destX = mouseX;
this.destY = mouseY;
this.sprite = bulletSprite;
this.bulletSpeed = speed;
//x2 = mousex, x1 = pharahX
this.xDist = this.destX - owner.xPos;
this.yDist = this.destY - owner.yPos;
}
//simple display function (hallelujah)
display()
{
image(this.sprite, this.bulletX, this.bulletY);
}
//OK...
//This idea came to me in the shower (literally) so it might be dumb
//But I thought if I ahd the X,Y of when the mouse was pressed, and I had the current
//x,y of the image, I could calculate the slope of the line
//connecting those two points, which is just the ratio of y pixels over x pixels
//and add to the x y vectors based on that slope. I ended up having
//to have four special cases for each quadrant as referenced by the console.logs
//There has to be a better way for this, but this is the best I could do
//and it's still not perfect (I haven't yet, but I assume at some point I could accidentally
//divide by 0 for example)
move()
{
let slope = this.yDist / this.xDist;
//y is negative, x is positive
if (this.yDist < 0 && this.xDist > 0)
{
slope = this.xDist / this.yDist;
console.log("-Y, X");
//FINALLY WORKING
this.bulletXSpeed -= slope;
this.bulletYSpeed -= 1;
}
//y is positive, x is positive
if (this.yDist > 0 && this.xDist > 0)
{
console.log("Y, X");
//GOOD, WORKING
this.bulletXSpeed += 1;
this.bulletYSpeed += slope;
}
//y is negative, x is negative
if (this.yDist < 0 && this.xDist < 0)
{
console.log("-Y, -X");
//GOOD, WORKING
this.bulletXSpeed -= 1;
this.bulletYSpeed -= slope;
}
//y is positive, x is negative
if (this.yDist > 0 && this.xDist < 0)
{
slope = this.xDist / this.yDist;
//GOOD, WORKING
console.log("Y, -X");
this.bulletXSpeed += slope;
this.bulletYSpeed += 1;
}
//then just move regularly
this.bulletX += this.bulletXSpeed;
this.bulletY += this.bulletYSpeed;
}
}
//ENEMIES! The guys that gave me the least amount of trouble
class BadBoi
{
//build me a badboi
constructor(x, y, image, speed)
{
this.xPos = x;
this.yPos = y;
this.sprite = image;
this.xDest = random(10, width-10);
this.speed = speed;
this.destroyed = false;
}
//Kapp notes >>
detectHit(x, y)
{
if (dist(x,y, this.xPos, this.yPos) < 50)
{
this.destroyed = true;
return true;
}
return false;
}
//this was the easiest way I could "delete" them,
//send them way offscreen and turn off their image...
//again, probably not the best way to do this.
display()
{
if (!this.destroyed)
{
image(this.sprite, this.xPos, this.yPos);
}
else
{
this.xPos = -999;
this.yPos = -999;
}
}
//move! Use the kapp notes movement from Zelda to randomly choose positions
//and get closer to them by the "Speed" which was instantiated as I think .02 which
//was what we used in the notes (move 2% fo the distance etc.)
move()
{
if (abs(this.xPos - this.xDest) < 100)
{
this.xDest = random(10, width - 10);
}
let xDist = this.xDest - this.xPos;
this.xPos += this.speed * xDist;
}
}
| move | identifier_name |
sketch.js | //playerSprite
let pharahSprite;
//crosshair
let crosshairSprite;
//player
let pharah;
//enemies!!!!!
let enemy1sprite;
let enemy2sprite;
let enemy3sprite;
//enemy array
let enemyArr;
//enemyImage array
let images;
let enemyNum;
let bulletSprite;
//gamestate, 0 = menu, 1 = gameplay
let state;
//boring initializer variables
let score;
let b;
let flip;
let health;
let ammo;
let rangeData;
let shotSound;
function updateRange(clickedRange) {
// grab the range data as an integer
rangeData = int(clickedRange.value);
}
//preload assets
function preload()
{
pharahSprite = loadImage("images/pharahSprite_small.png");
bg = loadImage("images/bg.png");
crosshairSprite = loadImage("images/crosshair.png");
bulletSprite = loadImage("images/bullet.png");
soundFormats("mp3");
shotSound = loadSound("sounds/exp.mp3");
//enemies, potentially variable with more effort lol
enemy1sprite = loadImage("images/enemy1.png");
enemy2sprite = loadImage("images/enemy2.png");
enemy3sprite = loadImage("images/enemy3.png");
}
function setup()
{
//hot diggity this is big
createCanvas(1920,1200);
state = 0;
//GAME STATE
score = 0;
flip = false;
//make all the bad guys
let enemy1 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy2 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy3 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
let enemy4 = new BadBoi(150, height-238-enemy1sprite.height, enemy1sprite, .03);
let enemy5 = new BadBoi(400, height-238-enemy2sprite.height, enemy2sprite, .01);
let enemy6 = new BadBoi(150, height-238-enemy3sprite.height, enemy3sprite, .02);
//this hurts, but idk a better way rn
enemyArr = [enemy1, enemy2, enemy3, enemy4, enemy5, enemy6];
}
function draw()
{
//opening menu
if (state === 0)
{
textSize(72);
background(bg);
text("Pharah", 800, 100);
textSize(60);
text("Press (1) - EASY", 700, 200);
text("Press (2) - NORMAL", 700, 300);
text("Press (3) - HARD", 700, 400);
text("CONTROLS", 800, 500);
textSize(32);
text("Left / Right Arrow, or A / D", 700, 550);
text("Press SPACEBAR to use Fuel", 700, 600);
text("Click to shoot!", 700, 650);
//EASY
if (keyIsDown(49))
{
ammo = 12;
health = 1500;
state = 1;
pharah = new Pharah(health, ammo);
}
//NORMAL
else if (keyIsDown(50))
{
ammo = 8;
health = 1000;
state = 1;
pharah = new Pharah(health, ammo);
}
//HARD
else if (keyIsDown(51))
{
ammo = 4;
health = 750;
state = 1;
pharah = new Pharah(health, ammo);
}
}
//GAMEPLAY LOOP
else if (state === 1)
{
noCursor();
background(bg);
//get pharah lookin cool and movin nicely
pharah.display();
pharah.move();
//let's spawnEnemies
spawnEnemies(enemyArr, enemyArr.length);
//oh and let pharah aim
pharah.aim();
//oh god. This is how I got the bullet to finally shoot
//it feels very hacky, and based on our lecture today, I assume we
//will find a more intuitive way to do this later on in the course. But I create a bullet in mousePressed()
//so to avoid null pointer errors theres a "flip" boolean to make sure b exists (the bullet)
if (flip)
{
b.display();
b.move();
for (let i = 0; i < enemyArr.length; i++)
{
//check if there's a hit, if there is, add score, if the score is game-ending, then END IT
if(enemyArr[i].detectHit(b.bulletX, b.bulletY))
{
score += 1;
if (score >= 6)
{
state = 2;
}
}
//Pharah takes damage if she touches people
if(pharah.detectHit(enemyArr[i].xPos, enemyArr[i].yPos))
{
pharah.health -= 10;
}
}
}
}
//standard gameover screen.
else if (state === 2)
{
background(255);
textSize(72)
text("GAMEOVER", 10, 200);
if(keyIsDown(82))
{
state = 1;
}
}
}
//again, the hackiest bullet ever
function mousePressed()
{
if (pharah.ammoCapacity <= 0)
{
return false;
}
else {
flip = true;
shotSound.play();
pharah.ammoCapacity -= 1;
//yeah, so here I create the bullet so it can be drawn. flip the bool, etc.
b = new Bullet(pharah, mouseX, mouseY, .02);
return false;
}
}
//just a helper function when I thought it would be cute to do so
function spawnEnemies(enemyArr, num)
{
for (let i = 0; i < num; i++)
{
enemyArr[i].display();
enemyArr[i].move();
}
}
//Alright, here we go
class Pharah
{
//instantiate all her important stuff
constructor(health, ammoCapacity)
{
this.xPos = 250;
this.yPos = 250;
this.sprite = pharahSprite;
this.crosshair = crosshairSprite;
this.accel = 0.1;
this.xSpeed = 0;
this.ySpeed = 0;
this.gravity = 0.03;
this.xLimit = 5;
this.aimX = mouseX;
this.aimY = mouseY;
this.bulletX = this.sprite.width + (this.sprite.width / 2);
this.bulletY = this.sprite.height + (this.sprite.height / 2);
this.bulletSpeed = .02;
this.ammoCapacity = ammoCapacity;
this.fuelCapacity = rangeData;;
this.fuel = rangeData;
this.currentTime = 0;
this.jumpSize = 50;
this.jumpSpeed = .02;
this.health = 1000;
}
//basic collision taken from KAPP notes
detectHit(x, y)
{
if(dist(x, y, this.xPos, this.yPos) < 50)
{
return true;
}
return false;
}
//set the mouseX and mouseY offsets so the crosshair looks more centered
aim()
{
this.aimX = mouseX - (this.sprite.width / 2 - 33);
this.aimY = mouseY - ((this.sprite.height / 2) + 10);
image(this.crosshair, this.aimX, this.aimY);
}
//display
display()
{
//kill her!!!
if (this.health < 0)
{
state = 2;
}
//normal image
image(this.sprite, this.xPos, this.yPos);
//included text, felt easier here
textSize(32);
if (this.fuel < 0)
{
text("Fuel: 0", 10, 40);
}
else
{
text("Fuel: " + this.fuel, 10, 40);
}
text("Ammo: " + this.ammoCapacity, 10, 70);
text("Score: " + score, 10, 105);
text("Health: " + this.health, 10, 200);
// text("Name: " + name, 1500, 70);
if (this.ammoCapacity <= 0)
{
textSize(72);
text("Press R to reload.", (width/2)-200, 100);
}
}
//not implemented, unfortuantely. Couldn't get it to work. Want to go back to it
jump()
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
}
// this will move our character
move()
{
//contain logic within borders
if (this.xPos + this.sprite.width > width)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = width - this.sprite.width
}
if (this.xPos < 0)
{
this.xSpeed = 0;
this.collided = true;
this.xPos = 0;
}
if (this.yPos > height-238-this.sprite.height)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = height-238 - this.sprite.height;
}
if (this.yPos < 0)
{
this.ySpeed = 0;
this.collided = true;
this.yPos = 0;
}
//kapp notes helpful as always
// move left?
if (keyIsDown(LEFT_ARROW) || keyIsDown(65))
{
// subtract from character's xSpeed
this.xSpeed -= this.accel;
this.left = true;
this.right = false;
}
// move right?
if (keyIsDown(RIGHT_ARROW) || keyIsDown(68))
{
// add to character's xSpeed
this.xSpeed += this.accel;
this.right = true;
this.left = false;
}
//reload, basic
if (keyIsDown(82))
{
this.ammoCapacity = 8;
}
// fuel!!
if (keyIsDown(32))
{
if (this.fuel > 0)
{
//if you still have fuel, then use it
this.ySpeed -= this.accel;
}
if (this.fuel > -250)
{
//250 is the threshold under 0 to simulate a "delay" since idk how millis() works
this.fuel -= 15;
}
}
//look at this sad commented out failure of a feature... maybe one day
/*
if (keyCode == SHIFT)
{
if (cooldown)
{
let yDistance = this.yPos - this.jumpSize;
this.yPos -= this.jumpSpeed * yDistance;
jumping = true;
}
}*/
//this I felt I wanted to do so that the left and right speeds
//would naturally slow down over time. Felt unnatural otherwise.
if (this.right)
|
else if (this.left)
{
this.xSpeed += this.gravity;
if (this.xSpeed > 0)
{
this.right = true;
this.left = false;
}
}
//the standard movement. Add gravity, speed to position
this.ySpeed += this.gravity;
this.xPos += this.xSpeed;
this.yPos += this.ySpeed;
//gradually grow your fuel overtime. A counter would've been great but I couldn't learn it in time...
//no pun intended.
if (this.fuel < this.fuelCapacity)
{
this.fuel += 5;
}
// speed limit! prevent the user from moving too fast
this.xSpeed = constrain(this.xSpeed, -this.xLimit, this.xLimit);
this.ySpeed = constrain(this.ySpeed, -25, 25);
}
}
class Bullet
{
//all standard stuffs
constructor(owner, x, y, speed)
{
this.bulletX = owner.xPos;
this.bulletY = owner.yPos;
this.bulletXSpeed = 0;
this.bulletYSpeed = 0;
this.destX = mouseX;
this.destY = mouseY;
this.sprite = bulletSprite;
this.bulletSpeed = speed;
//x2 = mousex, x1 = pharahX
this.xDist = this.destX - owner.xPos;
this.yDist = this.destY - owner.yPos;
}
//simple display function (hallelujah)
display()
{
image(this.sprite, this.bulletX, this.bulletY);
}
//OK...
//This idea came to me in the shower (literally) so it might be dumb
//But I thought if I ahd the X,Y of when the mouse was pressed, and I had the current
//x,y of the image, I could calculate the slope of the line
//connecting those two points, which is just the ratio of y pixels over x pixels
//and add to the x y vectors based on that slope. I ended up having
//to have four special cases for each quadrant as referenced by the console.logs
//There has to be a better way for this, but this is the best I could do
//and it's still not perfect (I haven't yet, but I assume at some point I could accidentally
//divide by 0 for example)
move()
{
let slope = this.yDist / this.xDist;
//y is negative, x is positive
if (this.yDist < 0 && this.xDist > 0)
{
slope = this.xDist / this.yDist;
console.log("-Y, X");
//FINALLY WORKING
this.bulletXSpeed -= slope;
this.bulletYSpeed -= 1;
}
//y is positive, x is positive
if (this.yDist > 0 && this.xDist > 0)
{
console.log("Y, X");
//GOOD, WORKING
this.bulletXSpeed += 1;
this.bulletYSpeed += slope;
}
//y is negative, x is negative
if (this.yDist < 0 && this.xDist < 0)
{
console.log("-Y, -X");
//GOOD, WORKING
this.bulletXSpeed -= 1;
this.bulletYSpeed -= slope;
}
//y is positive, x is negative
if (this.yDist > 0 && this.xDist < 0)
{
slope = this.xDist / this.yDist;
//GOOD, WORKING
console.log("Y, -X");
this.bulletXSpeed += slope;
this.bulletYSpeed += 1;
}
//then just move regularly
this.bulletX += this.bulletXSpeed;
this.bulletY += this.bulletYSpeed;
}
}
//ENEMIES! The guys that gave me the least amount of trouble
class BadBoi
{
//build me a badboi
constructor(x, y, image, speed)
{
this.xPos = x;
this.yPos = y;
this.sprite = image;
this.xDest = random(10, width-10);
this.speed = speed;
this.destroyed = false;
}
//Kapp notes >>
detectHit(x, y)
{
if (dist(x,y, this.xPos, this.yPos) < 50)
{
this.destroyed = true;
return true;
}
return false;
}
//this was the easiest way I could "delete" them,
//send them way offscreen and turn off their image...
//again, probably not the best way to do this.
display()
{
if (!this.destroyed)
{
image(this.sprite, this.xPos, this.yPos);
}
else
{
this.xPos = -999;
this.yPos = -999;
}
}
//move! Use the kapp notes movement from Zelda to randomly choose positions
//and get closer to them by the "Speed" which was instantiated as I think .02 which
//was what we used in the notes (move 2% fo the distance etc.)
move()
{
if (abs(this.xPos - this.xDest) < 100)
{
this.xDest = random(10, width - 10);
}
let xDist = this.xDest - this.xPos;
this.xPos += this.speed * xDist;
}
}
| {
this.xSpeed -= this.gravity;
if (this.xSpeed < 0)
{
this.right = false;
this.left = true;
}
} | conditional_block |
FactorPropertyController.js | Ext.define('Factor.controller.panel.FactorPropertyController', {
extend: 'Factor.controller.Base',
stores: [
"FactorPropertyStore",
"FactorDataStore"
],
models: [
"FactorPropertyModel"
],
views: [
// 'window.FactorWindow'
],
controllers:[
'window.FactorWindowController'
],
refs : [{
selector: '#factorWindow',
ref: 'factorWindow'
},{
selector: '#factorGrid',
ref: 'factorGrid'
},{
selector: '#factorPropertyGrid',
ref: 'factorPropertyGrid'
},{
selector: '#factorProperty',
ref: 'factorProperty'
},{
selector: '#factorDataGrid',
ref: 'factorDataGrid'
}],
init: function() {
this.control({
'#factorPropertyGrid' : {
afterrender: this.onPanelRendered,
select : this.selectRow,
beforeitemclick : this.rowClick,
containerdblclick:this.containerdblclick,
containerclick:this.containerclick
}
});
},
containerclick:function(){
// alert('containerclick');
document.getElementById('factorGrid_header').style.removeProperty('background-color');
document.getElementById('factorGrid_header').style.removeProperty('background-image');
document.getElementById('factorPropertyGrid_header').style.setProperty('background-image','none');
document.getElementById('factorPropertyGrid_header').style.setProperty('background-color','rgb(117, 177, 218)');
document.getElementById('factorDataGrid_header').style.removeProperty('background-color');
document.getElementById('factorDataGrid_header').style.removeProperty('background-image');
document.getElementById('relationGrid_header').style.removeProperty('background-color');
document.getElementById('relationGrid_header').style.removeProperty('background-image');
document.getElementById('relationPropertyGrid_header').style.removeProperty('background-color');
document.getElementById('relationPropertyGrid_header').style.removeProperty('background-image');
document.getElementById('relationDataGrid_header').style.removeProperty('background-color');
document.getElementById('relationDataGrid_header').style.removeProperty('background-image');
},
onPanelRendered : function(){
/*this.getFactorPropertyGrid().columns[7].hide();
this.getFactorPropertyGrid().columns[8].hide();*/
},
rowClick : function(row, record, item, index, e){
this.containerclick();
var valueLabelUuid= record.get('valueLabelUuid');
setFactorPropertyInputGrid(valueLabelUuid,record.get('value'));
propertyGridActivity();
setGlobleVariate('FactorProperty',record.get('uuid'),record.get('defineUuid'));
this.getFactorProperty().setSource({
// "defineUuid":e.selected.getAt(0).get('uuid'),
"name":record.get('name'),
"format": record.get('format'),
"width": record.get('width'),
"decimalWidth": record.get('decimalWidth'),
"valueLabelUuid": record.get('valueLabelUuid')
});
Factor.App.getPanelFactorPropertyGridControllerController().getFactorProperty().plugins[0].startEdit(0, 2);
Factor.App.getPanelFactorPropertyGridControllerController().getFactorProperty().setTitle('正在编辑【因子属性表】');
},
selectRow : function(e, obj ,row){
if(0 == e.selected.length){
return;
}
this.getFactorProperty().setSource({
// "defineUuid":e.selected.getAt(0).get('uuid'),
"name": e.selected.getAt(0).get('name'),
"format": e.selected.getAt(0).get('format'),
"width": e.selected.getAt(0).get('width'),
"decimalWidth": e.selected.getAt(0).get('decimalWidth'),
"valueLabelUuid": e.selected.getAt(0).get('valueLabelUuid')
});
setGlobleVariate('Factor',e.selected.getAt(0).get('uuid'),e.selected.getAt(0).get('defineUuid'));
if(''===e.selected.getAt(0).get('queryUuid')||null==e.selected.getAt(0).get('queryUuid')){
Ext.getCmp('dataFilterForm').down('#queryUuid').setValue("");
Ext.getCmp('dataFilterForm').down('#dsKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#dbKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#tableKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#valueKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#resultField').setValue("");
var columns = [Ext.create('Ext.grid.RowNumberer'),{header : 'uuid',hideable : false,hidden : true,dataIndex : 'uuid',width : 60,editor : {xtype : 'textfield'}},
{header : 'defineUuid',hideable : false,hidden : true,dataIndex : 'defineUuid',width : 60,editor : {xtype : 'textfield'}},
{header : 'defineName',hideable : false,hidden : true,dataIndex : 'defineName',width : 60,editor : {xtype : 'textfield'}},
{header : 'type',hideable : false,hidden : true,dataIndex : 'type',width : 60,editor : {xtype : 'textfield'}},
{ header: '值',dataIndex: 'value',width: 120,align:'center'},
{ header: '标签',dataIndex: 'label',width: 120,align:'center'}
];
this.getFactorDataStoreStore().load({
params:{
tblUuid : T_PARAM_TABLE,
defineUuid:e.selected.getAt(0).get('valueLabelUuid')
}
});
Factor.App.getPanelFactorDataControllerController().getFactorDataGrid().reconfigure( this.getFactorDataStoreStore(), columns);
}else{
Factor.App.getPanelFactorDataControllerController().setFactorData(e.selected.getAt(0).get('queryUuid'),e.selected.getAt(0).get('valueKeyword'));
}
this.getFactorDataStoreStore().sort({
property: 'seq',
direction: 'ASC'
});
},
beforeEdit : function(){
var me = Factor.App.getController("panel.FactorPropertyController");
var selectModel = me.getFactorPropertyGrid().selModel;
var row = selectModel.selectionStart.index;
me.selectRow = row;
},
editFinish : function(component){
var me = Factor.App.getController("panel.FactorPropertyController");
var record = me.getFactorPropertyGrid().getStore().getAt(me.selectRow);
var factorUuid = record.get('factor2Uuid');
Ext.Ajax.request({
url: '../work-platform/updateValue.do',
params: {
factorUuid : factorUuid,
value : component.lastValue
},
success: function(response, options) {
}
});
},
containerdblclick:function(){
var NAME_PROPERTY='新建因子属性';
var factorProDefineName='';
factorProDefineName= SEQ.seq(NAME_PROPERTY, function(newName){
var s=Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().findExact('name', newName);
return s!=-1?true:false;
});
var factorDefineRecord={
dataType:1,
decimalWidth:0,
defineUuid:'',
typeShow:'数值(N)',
valueLabelUuid:'',
name:factorProDefineName,
width:8,
createLevel:2,
uuid:'',
format:COLUMNTYPE_DECIMAL};
// parent信息
var parentUuid;
var factorSelectModel = this.getFactorGrid().selModel;
if( 0 < factorSelectModel.selected.length){
parentUuid = factorSelectModel.selected.items[0].get('uuid');
// http request 保存
Ext.Ajax.request({
url: '../work-platform/saveFactor.do',
params: {
addRecord : Ext.encode(factorDefineRecord),
parentUuid : parentUuid,
parentType :1
},
success: function(response, options) {
var result = Ext.JSON.decode(response.responseText);
if(result.duplicate){
Ext.Msg.alert('','名称重复');
return;
}
Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().load({
params:{
condition : Ext.encode({
parentUuid : parentUuid,
parentType :1
})
}
});
Factor.App.getTreeFactorTreeControllerController().getFactorTreeStoreStore().load();
var sm=Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getSelectionModel( );
sm.select(0);
sm.select(Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().getCount()-1);
}
});
}
},
deleteFatorProperty:function(){
var me = Factor.App.getController('panel.FactorPropertyController');
var factorSelectModel = me.getFactorPropertyGrid().selModel;
if(0 == factorSelectModel.selected.length){
return ;
}
/*var row = factorSelectModel.selectionStart.index;
var record = me.getFactorPropertyGrid().getStore().getAt(row);
*/
var deleteJson=[];
var Store= me.getFactorPropertyGrid().getStore();
var sm =me.getFactorPropertyGrid().getSelectionModel();
for(var i=0;i<sm.getSelection().length;i++){ | Ext.MessageBox.alert('提示', '您选中的属性包含固有属性,因子固有属性不能删除!');
return;
}
}
Ext.MessageBox.confirm('确认', '确定要删除选中因子属性?', function(btn){
if('yes'===btn){
Store.remove(sm.getSelection());
if (Store.getCount() > 0) {
sm.select(0);
}
var delstores= Store.getRemovedRecords();
if(delstores.length>0){
for(var i=0;i<delstores.length;i++){
var record = delstores[i];
deleteJson.push(record.data);
}
}
Ext.Ajax.request({
url: '../work-platform/deleteFactorInstances.do',
params: {
deleteFactorInstances:Ext.encode(deleteJson)
},
success: function(response, options) {
var factorSelectModel = me.getFactorGrid().selModel;
if(0 < factorSelectModel.selected.length){
parentUuid = factorSelectModel.selected.items[0].get('uuid');
}
Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().load({
params:{
condition : Ext.encode({
parentUuid : parentUuid,
parentType :1
})
}
});
}
});
}
});
}
}); | if("1"===(sm.getSelection()[i].get('prototype')+"")){ | random_line_split |
FactorPropertyController.js | Ext.define('Factor.controller.panel.FactorPropertyController', {
extend: 'Factor.controller.Base',
stores: [
"FactorPropertyStore",
"FactorDataStore"
],
models: [
"FactorPropertyModel"
],
views: [
// 'window.FactorWindow'
],
controllers:[
'window.FactorWindowController'
],
refs : [{
selector: '#factorWindow',
ref: 'factorWindow'
},{
selector: '#factorGrid',
ref: 'factorGrid'
},{
selector: '#factorPropertyGrid',
ref: 'factorPropertyGrid'
},{
selector: '#factorProperty',
ref: 'factorProperty'
},{
selector: '#factorDataGrid',
ref: 'factorDataGrid'
}],
init: function() {
this.control({
'#factorPropertyGrid' : {
afterrender: this.onPanelRendered,
select : this.selectRow,
beforeitemclick : this.rowClick,
containerdblclick:this.containerdblclick,
containerclick:this.containerclick
}
});
},
containerclick:function(){
// alert('containerclick');
document.getElementById('factorGrid_header').style.removeProperty('background-color');
document.getElementById('factorGrid_header').style.removeProperty('background-image');
document.getElementById('factorPropertyGrid_header').style.setProperty('background-image','none');
document.getElementById('factorPropertyGrid_header').style.setProperty('background-color','rgb(117, 177, 218)');
document.getElementById('factorDataGrid_header').style.removeProperty('background-color');
document.getElementById('factorDataGrid_header').style.removeProperty('background-image');
document.getElementById('relationGrid_header').style.removeProperty('background-color');
document.getElementById('relationGrid_header').style.removeProperty('background-image');
document.getElementById('relationPropertyGrid_header').style.removeProperty('background-color');
document.getElementById('relationPropertyGrid_header').style.removeProperty('background-image');
document.getElementById('relationDataGrid_header').style.removeProperty('background-color');
document.getElementById('relationDataGrid_header').style.removeProperty('background-image');
},
onPanelRendered : function(){
/*this.getFactorPropertyGrid().columns[7].hide();
this.getFactorPropertyGrid().columns[8].hide();*/
},
rowClick : function(row, record, item, index, e){
this.containerclick();
var valueLabelUuid= record.get('valueLabelUuid');
setFactorPropertyInputGrid(valueLabelUuid,record.get('value'));
propertyGridActivity();
setGlobleVariate('FactorProperty',record.get('uuid'),record.get('defineUuid'));
this.getFactorProperty().setSource({
// "defineUuid":e.selected.getAt(0).get('uuid'),
"name":record.get('name'),
"format": record.get('format'),
"width": record.get('width'),
"decimalWidth": record.get('decimalWidth'),
"valueLabelUuid": record.get('valueLabelUuid')
});
Factor.App.getPanelFactorPropertyGridControllerController().getFactorProperty().plugins[0].startEdit(0, 2);
Factor.App.getPanelFactorPropertyGridControllerController().getFactorProperty().setTitle('正在编辑【因子属性表】');
},
selectRow : function(e, obj ,row){
if(0 == e.selected.length){
return;
}
this.getFactorProperty().setSource({
// "defineUuid":e.selected.getAt(0).get('uuid'),
"name": e.selected.getAt(0).get('name'),
"format": e.selected.getAt(0).get('format'),
"width": e.selected.getAt(0).get('width'),
"decimalWidth": e.selected.getAt(0).get('decimalWidth'),
"valueLabelUuid": e.selected.getAt(0).get('valueLabelUuid')
});
setGlobleVariate('Factor',e.selected.getAt(0).get('uuid'),e.selected.getAt(0).get('defineUuid'));
if(''===e.selected.getAt(0).get('queryUuid')||null==e.selected.getAt(0).get('queryUuid')){
Ext.getCmp('dataFilterForm').down('#queryUuid').setValue("");
Ext.getCmp('dataFilterForm').down('#dsKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#dbKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#tableKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#valueKeyword').setValue("");
Ext.getCmp('dataFilterForm').down('#resultField').setValue("");
var columns = [Ext.create('Ext.grid.RowNumberer'),{header : 'uuid',hideable : false,hidden : true,dataIndex : 'uuid',width : 60,editor : {xtype : 'textfield'}},
{header : 'defineUuid',hideable : false,hidden : true,dataIndex : 'defineUuid',width : 60,editor : {xtype : 'textfield'}},
{header : 'defineName',hideable : false,hidden : true,dataIndex : 'defineName',width : 60,editor : {xtype : 'textfield'}},
{header : 'type',hideable : false,hidden : true,dataIndex : 'type',width : 60,editor : {xtype : 'textfield'}},
{ header: '值',dataIndex: 'value',width: 120,align:'center'},
{ header: '标签',dataIndex: 'label',width: 120,align:'center'}
];
this.getFactorDataStoreStore().load({
params:{
tblUuid : T_PARAM_TABLE,
defineUuid:e.selected.getAt(0).get('valueLabelUuid')
}
});
Factor.App.getPanelFactorDataControllerController().getFactorDataGrid().reconfigure( this.getFactorDataStoreStore(), columns);
}else{
Factor.App.getPanelFactorDataControllerController().setFactorData(e.selected.getAt(0).get('queryUuid'),e.selected.getAt(0).get('valueKeyword'));
}
this.getFactorDataStoreStore().sort({
property: 'seq',
direction: 'ASC'
});
},
beforeEdit : function(){
var me = Factor.App.getController("panel.FactorPropertyController");
var selectModel = me.getFactorPropertyGrid().selModel;
var row = selectModel.selectionStart.index;
me.selectRow = row;
},
editFinish : function(component){
var me = Factor.App.getController("panel.FactorPropertyController");
var record = me.getFactorPropertyGrid().getStore().getAt(me.selectRow);
var factorUuid = record.get('factor2Uuid');
Ext.Ajax.request({
url: '../work-platform/updateValue.do',
params: {
factorUuid : factorUuid,
value : component.lastValue
},
success: function(response, options) {
}
});
},
containerdblclick:function(){
var NAME_PROPERTY='新建因子属性';
var factorProDefineName='';
factorProDefineName= SEQ.seq(NAME_PROPERTY, function(newName){
var s=Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().findExact('name', newName);
return s!=-1?true:false;
});
var factorDefineRecord={
dataType:1,
decimalWidth:0,
defineUuid:'',
typeShow:'数值(N)',
valueLabelUuid:'',
name:factorProDefineName,
width:8,
createLevel:2,
uuid:'',
format:COLUMNTYPE_DECIMAL};
// parent信息
var parentUuid;
var factorSelectModel = this.getFactorGrid().selModel;
if( 0 < factorSelectModel.selected.length){
parentUuid = factorSelectModel.selected.items[0].get('uuid');
// http request 保存
Ext.Ajax.request({
url: '../work-platform/saveFactor.do',
params: {
addRecord : Ext.encode(factorDefineRecord),
parentUuid : parentUuid,
parentType :1
},
success: function(response, options) {
var result = Ext.JSON.decode(response.responseText);
if(result.duplicate){
Ext.Msg.alert('','名称重复');
return;
}
Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().load({
params:{
condition : Ext.encode({
parentUuid : parentUuid,
parentType :1
})
}
});
Factor.App.getTreeFactorTreeControllerController().getFactorTreeStoreStore().load();
var sm=Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getSelectionModel( );
sm.select(0);
sm.select(Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().getCount()-1);
}
});
}
},
deleteFatorProperty:function(){
var me = Factor.App.getController('panel.FactorPropertyController');
var factorSelectModel = me.getFactorPropertyGrid().selModel;
if(0 == factorSelectModel.selected.length){
return ;
}
/*var row = factorSelectModel.selectionStart.index;
var record = me.getFactorPropertyGrid().getStore().getAt(row);
*/
var deleteJson=[];
var Store= me.getFactorPropertyGrid().getStore();
var sm =me.getFactorPropertyGrid().getSelectionModel();
for(var i=0;i<sm.getSelection().length;i++){
if("1"===(sm.getSelection()[i].get('prototype')+"")){
Ext.MessageBox.alert('提示', '您选中的属性包含固有属性,因子固有属性不能删除!');
return;
}
}
Ext.MessageBox.confirm('确认', '确定要删除选中因子属性?', function(btn){
if('yes'===btn){
Store.remove(sm.getSelection());
if (Store.getCount() > 0) {
sm.select(0);
}
var delstores= Store.getRemovedRecords();
if(delstores.length>0){
for(var i=0;i<delstores.length;i++){
var record = delstores[i];
deleteJson.push(record.data);
}
| (deleteJson)
},
success: function(response, options) {
var factorSelectModel = me.getFactorGrid().selModel;
if(0 < factorSelectModel.selected.length){
parentUuid = factorSelectModel.selected.items[0].get('uuid');
}
Factor.App.getPanelFactorPropertyControllerController().getFactorPropertyGrid().getStore().load({
params:{
condition : Ext.encode({
parentUuid : parentUuid,
parentType :1
})
}
});
}
});
}
});
}
}); | }
Ext.Ajax.request({
url: '../work-platform/deleteFactorInstances.do',
params: {
deleteFactorInstances:Ext.encode | conditional_block |
train_ibp.py | import numpy as np
import keras.backend as K
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, \
ReduceLROnPlateau, TensorBoard
from keras.datasets import cifar10, mnist, fashion_mnist
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from models_ibp import SmallCNN, MediumCNN, LargeCNN, LargeCNN_2, \
ScheduleHyperParamCallback, ConstantSchedule, \
InterpolateSchedule, ibp_loss
import math
import argparse
from pathlib import Path
from datetime import datetime
import json
#######################
# Parse configuration #
#######################
parser = argparse.ArgumentParser()
def add_bool_arg(parser, name, default=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--" + name, dest=name, action="store_true")
group.add_argument("--no_" + name, dest=name, action="store_false")
parser.set_defaults(**{name:default})
parser.add_argument("model_name", choices=["SmallCNN", "MediumCNN", "LargeCNN", "LargeCNN_2"])
parser.add_argument("dataset", choices=["MNIST", "CIFAR10", "FASHION_MNIST"])
parser.add_argument("eval_epsilon", type=float)
parser.add_argument("train_epsilon", type=float)
# Model config
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--load_weights_from", type=Path)
add_bool_arg(parser, "elide_final_layer", default=False)
# Training
add_bool_arg(parser, "augmentation", default=False)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--initial_epoch", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--lr_schedule", type=str)
parser.add_argument("--k_warmup", type=int, default=0)
parser.add_argument("--k_rampup", type=int, default=20)
parser.add_argument("--epsilon_warmup", type=int, default=0)
parser.add_argument("--epsilon_rampup", type=int, default=20)
parser.add_argument("--min_k", type=float, default=0.5)
parser.add_argument("--validation_size", type=int, default=5000)
parser.add_argument("--set_gpu", type=int)
# Callbacks
add_bool_arg(parser, "early_stop")
parser.add_argument("--early_stop_patience", type=int, default=30)
add_bool_arg(parser, "lr_reduce")
parser.add_argument("--lr_reduce_patience", type=int, default=10)
parser.add_argument("--lr_reduce_factor", type=float, default=math.sqrt(0.1))
parser.add_argument("--lr_reduce_min", type=float, default=1e-6)
config = parser.parse_args()
######################
# Initialise dataset #
######################
if config.dataset == "CIFAR10":
(x_train, y_train), _ = cifar10.load_data()
elif config.dataset == "MNIST":
(x_train, y_train), _ = mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
elif config.dataset == "FASHION_MNIST":
(x_train, y_train), _ = fashion_mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
else:
raise ValueError("Unrecognised dataset")
# Leave aside a validation set
x_valid = x_train[-config.validation_size:].astype("float32") / 255
y_valid = to_categorical(y_train[-config.validation_size:], num_classes=10)
x_train = x_train[:-config.validation_size].astype("float32") / 255
y_train = to_categorical(y_train[:-config.validation_size], num_classes=10)
# Input image dimensions
input_shape = x_train.shape[1:]
####################
# Initialise model #
####################
# Restrict GPU memory usage
if config.set_gpu is not None:
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
conf.gpu_options.visible_device_list = str(config.set_gpu)
sess = tf.Session(config=conf)
set_session(sess)
del config.set_gpu
eps_train_var = K.variable(config.train_epsilon)
eps = K.in_train_phase(K.stop_gradient(eps_train_var), K.constant(config.eval_epsilon))
k_train_var = K.variable(1)
k = K.in_train_phase(K.stop_gradient(k_train_var), K.constant(config.min_k))
if config.augmentation:
mean, std = x_train.mean(axis=(0, 1, 2)), x_train.std(axis=(0, 1, 2)) + 1e-6
x_train = (x_train - mean) / std
x_valid = (x_valid - mean) / std
print("Normalising channels with values", mean, std)
else:
mean, std = None, None
if config.model_name == "SmallCNN":
model = SmallCNN(input_shape=input_shape)
elif config.model_name == "MediumCNN":
model = MediumCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN":
model = LargeCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN_2":
model = LargeCNN_2(input_shape=input_shape)
else:
raise ValueError("Unrecognised model")
def loss(y_true, y_pred):
return ibp_loss(y_true, y_pred, model, eps, k, mean=mean, std=std, elision=config.elide_final_layer)
def | (y_true, y_pred):
return model.robust_accuracy
if config.load_weights_from is not None:
model.load_weights(config.load_weights_from)
metrics = ["accuracy", robust_acc]
model.compile(loss=loss, optimizer=Adam(lr=config.lr), metrics=metrics)
model.summary()
##################
# Setup training #
##################
# Prepare model model saving directory
model_type = config.model_name
elision = "elide" if config.elide_final_layer else "no_elide"
model_name = "IBP_%s_%s_train_%.3f_eval_%.3f_%s" % (config.dataset, model_type, config.train_epsilon, config.eval_epsilon, elision)
if not config.load_weights_from:
save_dir = Path("saved_models") / model_name / datetime.now().strftime("%b%d_%H-%M-%S")
if not save_dir.exists():
save_dir.mkdir(parents=True)
else:
save_dir = config.load_weights_from.parent
file_path = save_dir / "weights_{epoch:03d}_{val_robust_acc:.3f}.h5"
# Save config to json
with open(str(save_dir / ("config_%d.json" % config.initial_epoch)), "w") as fp:
json.dump(vars(config), fp, sort_keys=True, indent=4)
# Set up training callbacks
checkpoint = ModelCheckpoint(filepath=str(file_path),
monitor="val_robust_acc",
period=10,
verbose=1)
tensor_board = TensorBoard(log_dir=save_dir,
histogram_freq=0,
batch_size=config.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
update_freq=5000)
tensor_board.samples_seen = config.initial_epoch * len(x_train)
tensor_board.samples_seen_at_last_write = config.initial_epoch * len(x_train)
callbacks = [checkpoint, tensor_board]
if config.lr_schedule is not None:
chunks = config.lr_schedule.split(",")
schedule = [(float(lr), int(epoch)) for (lr, epoch) in [c.split("@") for c in chunks]]
def scheduler(epoch, current_lr):
lr = config.lr
for (rate, e) in schedule:
if epoch >= e:
lr = rate
else:
break
return lr
callbacks.insert(0, LearningRateScheduler(scheduler, verbose=1))
if config.lr_reduce:
callbacks.insert(0, ReduceLROnPlateau(monitor="val_loss",
factor=config.lr_reduce_factor,
cooldown=0,
patience=config.lr_reduce_patience,
min_lr=config.lr_reduce_min,
verbose=1))
if config.early_stop:
callbacks.insert(0, EarlyStopping(monitor="val_loss",
patience=config.early_stop_patience,
verbose=1))
if config.epsilon_rampup > 0:
start = config.epsilon_warmup * len(x_train)
end = start + config.epsilon_rampup * len(x_train)
eps_schedule = InterpolateSchedule(0, config.train_epsilon, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="epsilon",
variable=eps_train_var,
schedule=eps_schedule,
update_every=1000,
verbose=0))
if config.k_rampup > 0:
start = config.k_warmup * len(x_train)
end = start + config.k_rampup * len(x_train)
k_schedule = InterpolateSchedule(1, config.min_k, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="k",
variable=k_train_var,
schedule=k_schedule,
update_every=1000,
verbose=0))
# Run training, with or without data augmentation.
if not config.augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
validation_data=(x_valid, y_valid),
epochs=config.epochs,
initial_epoch=config.initial_epoch,
batch_size=config.batch_size,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
shift = 4 if config.dataset == "CIFAR10" else 2
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# randomly rotate images in the range (deg 0 to 30)
# rotation_range=30,
# randomly shift images horizontally
width_shift_range=shift,
# randomly shift images vertically
height_shift_range=shift,
# set mode for filling points outside the input boundaries
fill_mode="constant" if config.dataset == "CIFAR10" else "nearest",
cval=0,
# randomly flip images
horizontal_flip=True)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=config.batch_size),
validation_data=(x_valid, y_valid), steps_per_epoch=(len(x_train) / config.batch_size),
epochs=config.epochs, initial_epoch=config.initial_epoch,
verbose=1, workers=4, callbacks=callbacks)
| robust_acc | identifier_name |
train_ibp.py | import numpy as np
import keras.backend as K
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, \
ReduceLROnPlateau, TensorBoard
from keras.datasets import cifar10, mnist, fashion_mnist
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from models_ibp import SmallCNN, MediumCNN, LargeCNN, LargeCNN_2, \
ScheduleHyperParamCallback, ConstantSchedule, \
InterpolateSchedule, ibp_loss
import math
import argparse
from pathlib import Path
from datetime import datetime
import json
#######################
# Parse configuration #
#######################
parser = argparse.ArgumentParser()
def add_bool_arg(parser, name, default=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--" + name, dest=name, action="store_true")
group.add_argument("--no_" + name, dest=name, action="store_false")
parser.set_defaults(**{name:default})
parser.add_argument("model_name", choices=["SmallCNN", "MediumCNN", "LargeCNN", "LargeCNN_2"])
parser.add_argument("dataset", choices=["MNIST", "CIFAR10", "FASHION_MNIST"])
parser.add_argument("eval_epsilon", type=float)
parser.add_argument("train_epsilon", type=float)
# Model config
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--load_weights_from", type=Path)
add_bool_arg(parser, "elide_final_layer", default=False)
# Training
add_bool_arg(parser, "augmentation", default=False)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--initial_epoch", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--lr_schedule", type=str)
parser.add_argument("--k_warmup", type=int, default=0)
parser.add_argument("--k_rampup", type=int, default=20)
parser.add_argument("--epsilon_warmup", type=int, default=0)
parser.add_argument("--epsilon_rampup", type=int, default=20)
parser.add_argument("--min_k", type=float, default=0.5)
parser.add_argument("--validation_size", type=int, default=5000)
parser.add_argument("--set_gpu", type=int)
# Callbacks
add_bool_arg(parser, "early_stop")
parser.add_argument("--early_stop_patience", type=int, default=30)
add_bool_arg(parser, "lr_reduce")
parser.add_argument("--lr_reduce_patience", type=int, default=10)
parser.add_argument("--lr_reduce_factor", type=float, default=math.sqrt(0.1))
parser.add_argument("--lr_reduce_min", type=float, default=1e-6)
config = parser.parse_args()
######################
# Initialise dataset #
######################
if config.dataset == "CIFAR10":
(x_train, y_train), _ = cifar10.load_data()
elif config.dataset == "MNIST":
(x_train, y_train), _ = mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
elif config.dataset == "FASHION_MNIST":
(x_train, y_train), _ = fashion_mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
else:
raise ValueError("Unrecognised dataset")
# Leave aside a validation set
x_valid = x_train[-config.validation_size:].astype("float32") / 255
y_valid = to_categorical(y_train[-config.validation_size:], num_classes=10)
x_train = x_train[:-config.validation_size].astype("float32") / 255
y_train = to_categorical(y_train[:-config.validation_size], num_classes=10)
# Input image dimensions
input_shape = x_train.shape[1:]
####################
# Initialise model #
####################
# Restrict GPU memory usage
if config.set_gpu is not None:
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
conf.gpu_options.visible_device_list = str(config.set_gpu)
sess = tf.Session(config=conf)
set_session(sess)
del config.set_gpu
eps_train_var = K.variable(config.train_epsilon)
eps = K.in_train_phase(K.stop_gradient(eps_train_var), K.constant(config.eval_epsilon))
k_train_var = K.variable(1)
k = K.in_train_phase(K.stop_gradient(k_train_var), K.constant(config.min_k))
if config.augmentation:
|
else:
mean, std = None, None
if config.model_name == "SmallCNN":
model = SmallCNN(input_shape=input_shape)
elif config.model_name == "MediumCNN":
model = MediumCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN":
model = LargeCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN_2":
model = LargeCNN_2(input_shape=input_shape)
else:
raise ValueError("Unrecognised model")
def loss(y_true, y_pred):
return ibp_loss(y_true, y_pred, model, eps, k, mean=mean, std=std, elision=config.elide_final_layer)
def robust_acc(y_true, y_pred):
return model.robust_accuracy
if config.load_weights_from is not None:
model.load_weights(config.load_weights_from)
metrics = ["accuracy", robust_acc]
model.compile(loss=loss, optimizer=Adam(lr=config.lr), metrics=metrics)
model.summary()
##################
# Setup training #
##################
# Prepare model model saving directory
model_type = config.model_name
elision = "elide" if config.elide_final_layer else "no_elide"
model_name = "IBP_%s_%s_train_%.3f_eval_%.3f_%s" % (config.dataset, model_type, config.train_epsilon, config.eval_epsilon, elision)
if not config.load_weights_from:
save_dir = Path("saved_models") / model_name / datetime.now().strftime("%b%d_%H-%M-%S")
if not save_dir.exists():
save_dir.mkdir(parents=True)
else:
save_dir = config.load_weights_from.parent
file_path = save_dir / "weights_{epoch:03d}_{val_robust_acc:.3f}.h5"
# Save config to json
with open(str(save_dir / ("config_%d.json" % config.initial_epoch)), "w") as fp:
json.dump(vars(config), fp, sort_keys=True, indent=4)
# Set up training callbacks
checkpoint = ModelCheckpoint(filepath=str(file_path),
monitor="val_robust_acc",
period=10,
verbose=1)
tensor_board = TensorBoard(log_dir=save_dir,
histogram_freq=0,
batch_size=config.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
update_freq=5000)
tensor_board.samples_seen = config.initial_epoch * len(x_train)
tensor_board.samples_seen_at_last_write = config.initial_epoch * len(x_train)
callbacks = [checkpoint, tensor_board]
if config.lr_schedule is not None:
chunks = config.lr_schedule.split(",")
schedule = [(float(lr), int(epoch)) for (lr, epoch) in [c.split("@") for c in chunks]]
def scheduler(epoch, current_lr):
lr = config.lr
for (rate, e) in schedule:
if epoch >= e:
lr = rate
else:
break
return lr
callbacks.insert(0, LearningRateScheduler(scheduler, verbose=1))
if config.lr_reduce:
callbacks.insert(0, ReduceLROnPlateau(monitor="val_loss",
factor=config.lr_reduce_factor,
cooldown=0,
patience=config.lr_reduce_patience,
min_lr=config.lr_reduce_min,
verbose=1))
if config.early_stop:
callbacks.insert(0, EarlyStopping(monitor="val_loss",
patience=config.early_stop_patience,
verbose=1))
if config.epsilon_rampup > 0:
start = config.epsilon_warmup * len(x_train)
end = start + config.epsilon_rampup * len(x_train)
eps_schedule = InterpolateSchedule(0, config.train_epsilon, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="epsilon",
variable=eps_train_var,
schedule=eps_schedule,
update_every=1000,
verbose=0))
if config.k_rampup > 0:
start = config.k_warmup * len(x_train)
end = start + config.k_rampup * len(x_train)
k_schedule = InterpolateSchedule(1, config.min_k, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="k",
variable=k_train_var,
schedule=k_schedule,
update_every=1000,
verbose=0))
# Run training, with or without data augmentation.
if not config.augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
validation_data=(x_valid, y_valid),
epochs=config.epochs,
initial_epoch=config.initial_epoch,
batch_size=config.batch_size,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
shift = 4 if config.dataset == "CIFAR10" else 2
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# randomly rotate images in the range (deg 0 to 30)
# rotation_range=30,
# randomly shift images horizontally
width_shift_range=shift,
# randomly shift images vertically
height_shift_range=shift,
# set mode for filling points outside the input boundaries
fill_mode="constant" if config.dataset == "CIFAR10" else "nearest",
cval=0,
# randomly flip images
horizontal_flip=True)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=config.batch_size),
validation_data=(x_valid, y_valid), steps_per_epoch=(len(x_train) / config.batch_size),
epochs=config.epochs, initial_epoch=config.initial_epoch,
verbose=1, workers=4, callbacks=callbacks)
| mean, std = x_train.mean(axis=(0, 1, 2)), x_train.std(axis=(0, 1, 2)) + 1e-6
x_train = (x_train - mean) / std
x_valid = (x_valid - mean) / std
print("Normalising channels with values", mean, std) | conditional_block |
train_ibp.py | import numpy as np
import keras.backend as K
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, \
ReduceLROnPlateau, TensorBoard
from keras.datasets import cifar10, mnist, fashion_mnist
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from models_ibp import SmallCNN, MediumCNN, LargeCNN, LargeCNN_2, \
ScheduleHyperParamCallback, ConstantSchedule, \
InterpolateSchedule, ibp_loss
import math
import argparse
from pathlib import Path
from datetime import datetime
import json
#######################
# Parse configuration #
#######################
parser = argparse.ArgumentParser()
def add_bool_arg(parser, name, default=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--" + name, dest=name, action="store_true")
group.add_argument("--no_" + name, dest=name, action="store_false")
parser.set_defaults(**{name:default})
parser.add_argument("model_name", choices=["SmallCNN", "MediumCNN", "LargeCNN", "LargeCNN_2"])
parser.add_argument("dataset", choices=["MNIST", "CIFAR10", "FASHION_MNIST"])
parser.add_argument("eval_epsilon", type=float)
parser.add_argument("train_epsilon", type=float)
# Model config
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--load_weights_from", type=Path)
add_bool_arg(parser, "elide_final_layer", default=False)
# Training
add_bool_arg(parser, "augmentation", default=False)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--initial_epoch", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--lr_schedule", type=str)
parser.add_argument("--k_warmup", type=int, default=0)
parser.add_argument("--k_rampup", type=int, default=20)
parser.add_argument("--epsilon_warmup", type=int, default=0)
parser.add_argument("--epsilon_rampup", type=int, default=20)
parser.add_argument("--min_k", type=float, default=0.5)
parser.add_argument("--validation_size", type=int, default=5000)
parser.add_argument("--set_gpu", type=int)
# Callbacks
add_bool_arg(parser, "early_stop")
parser.add_argument("--early_stop_patience", type=int, default=30)
add_bool_arg(parser, "lr_reduce")
parser.add_argument("--lr_reduce_patience", type=int, default=10)
parser.add_argument("--lr_reduce_factor", type=float, default=math.sqrt(0.1))
parser.add_argument("--lr_reduce_min", type=float, default=1e-6)
config = parser.parse_args()
######################
# Initialise dataset #
######################
if config.dataset == "CIFAR10":
(x_train, y_train), _ = cifar10.load_data()
elif config.dataset == "MNIST":
(x_train, y_train), _ = mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
elif config.dataset == "FASHION_MNIST":
(x_train, y_train), _ = fashion_mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
else:
raise ValueError("Unrecognised dataset")
# Leave aside a validation set
x_valid = x_train[-config.validation_size:].astype("float32") / 255
y_valid = to_categorical(y_train[-config.validation_size:], num_classes=10)
x_train = x_train[:-config.validation_size].astype("float32") / 255
y_train = to_categorical(y_train[:-config.validation_size], num_classes=10)
# Input image dimensions
input_shape = x_train.shape[1:]
####################
# Initialise model #
####################
# Restrict GPU memory usage
if config.set_gpu is not None:
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
conf.gpu_options.visible_device_list = str(config.set_gpu)
sess = tf.Session(config=conf)
set_session(sess)
del config.set_gpu
eps_train_var = K.variable(config.train_epsilon)
eps = K.in_train_phase(K.stop_gradient(eps_train_var), K.constant(config.eval_epsilon))
k_train_var = K.variable(1)
k = K.in_train_phase(K.stop_gradient(k_train_var), K.constant(config.min_k))
if config.augmentation:
mean, std = x_train.mean(axis=(0, 1, 2)), x_train.std(axis=(0, 1, 2)) + 1e-6
x_train = (x_train - mean) / std
x_valid = (x_valid - mean) / std
print("Normalising channels with values", mean, std)
else:
mean, std = None, None
if config.model_name == "SmallCNN":
model = SmallCNN(input_shape=input_shape)
elif config.model_name == "MediumCNN":
model = MediumCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN":
model = LargeCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN_2":
model = LargeCNN_2(input_shape=input_shape)
else:
raise ValueError("Unrecognised model")
def loss(y_true, y_pred):
return ibp_loss(y_true, y_pred, model, eps, k, mean=mean, std=std, elision=config.elide_final_layer)
def robust_acc(y_true, y_pred):
|
if config.load_weights_from is not None:
model.load_weights(config.load_weights_from)
metrics = ["accuracy", robust_acc]
model.compile(loss=loss, optimizer=Adam(lr=config.lr), metrics=metrics)
model.summary()
##################
# Setup training #
##################
# Prepare model model saving directory
model_type = config.model_name
elision = "elide" if config.elide_final_layer else "no_elide"
model_name = "IBP_%s_%s_train_%.3f_eval_%.3f_%s" % (config.dataset, model_type, config.train_epsilon, config.eval_epsilon, elision)
if not config.load_weights_from:
save_dir = Path("saved_models") / model_name / datetime.now().strftime("%b%d_%H-%M-%S")
if not save_dir.exists():
save_dir.mkdir(parents=True)
else:
save_dir = config.load_weights_from.parent
file_path = save_dir / "weights_{epoch:03d}_{val_robust_acc:.3f}.h5"
# Save config to json
with open(str(save_dir / ("config_%d.json" % config.initial_epoch)), "w") as fp:
json.dump(vars(config), fp, sort_keys=True, indent=4)
# Set up training callbacks
checkpoint = ModelCheckpoint(filepath=str(file_path),
monitor="val_robust_acc",
period=10,
verbose=1)
tensor_board = TensorBoard(log_dir=save_dir,
histogram_freq=0,
batch_size=config.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
update_freq=5000)
tensor_board.samples_seen = config.initial_epoch * len(x_train)
tensor_board.samples_seen_at_last_write = config.initial_epoch * len(x_train)
callbacks = [checkpoint, tensor_board]
if config.lr_schedule is not None:
chunks = config.lr_schedule.split(",")
schedule = [(float(lr), int(epoch)) for (lr, epoch) in [c.split("@") for c in chunks]]
def scheduler(epoch, current_lr):
lr = config.lr
for (rate, e) in schedule:
if epoch >= e:
lr = rate
else:
break
return lr
callbacks.insert(0, LearningRateScheduler(scheduler, verbose=1))
if config.lr_reduce:
callbacks.insert(0, ReduceLROnPlateau(monitor="val_loss",
factor=config.lr_reduce_factor,
cooldown=0,
patience=config.lr_reduce_patience,
min_lr=config.lr_reduce_min,
verbose=1))
if config.early_stop:
callbacks.insert(0, EarlyStopping(monitor="val_loss",
patience=config.early_stop_patience,
verbose=1))
if config.epsilon_rampup > 0:
start = config.epsilon_warmup * len(x_train)
end = start + config.epsilon_rampup * len(x_train)
eps_schedule = InterpolateSchedule(0, config.train_epsilon, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="epsilon",
variable=eps_train_var,
schedule=eps_schedule,
update_every=1000,
verbose=0))
if config.k_rampup > 0:
start = config.k_warmup * len(x_train)
end = start + config.k_rampup * len(x_train)
k_schedule = InterpolateSchedule(1, config.min_k, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="k",
variable=k_train_var,
schedule=k_schedule,
update_every=1000,
verbose=0))
# Run training, with or without data augmentation.
if not config.augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
validation_data=(x_valid, y_valid),
epochs=config.epochs,
initial_epoch=config.initial_epoch,
batch_size=config.batch_size,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
shift = 4 if config.dataset == "CIFAR10" else 2
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# randomly rotate images in the range (deg 0 to 30)
# rotation_range=30,
# randomly shift images horizontally
width_shift_range=shift,
# randomly shift images vertically
height_shift_range=shift,
# set mode for filling points outside the input boundaries
fill_mode="constant" if config.dataset == "CIFAR10" else "nearest",
cval=0,
# randomly flip images
horizontal_flip=True)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=config.batch_size),
validation_data=(x_valid, y_valid), steps_per_epoch=(len(x_train) / config.batch_size),
epochs=config.epochs, initial_epoch=config.initial_epoch,
verbose=1, workers=4, callbacks=callbacks)
| return model.robust_accuracy | identifier_body |
train_ibp.py | import numpy as np
import keras.backend as K
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, \
ReduceLROnPlateau, TensorBoard
from keras.datasets import cifar10, mnist, fashion_mnist
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from models_ibp import SmallCNN, MediumCNN, LargeCNN, LargeCNN_2, \
ScheduleHyperParamCallback, ConstantSchedule, \
InterpolateSchedule, ibp_loss
import math
import argparse
from pathlib import Path
from datetime import datetime
import json
#######################
# Parse configuration #
#######################
parser = argparse.ArgumentParser()
def add_bool_arg(parser, name, default=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--" + name, dest=name, action="store_true")
group.add_argument("--no_" + name, dest=name, action="store_false")
parser.set_defaults(**{name:default})
parser.add_argument("model_name", choices=["SmallCNN", "MediumCNN", "LargeCNN", "LargeCNN_2"])
parser.add_argument("dataset", choices=["MNIST", "CIFAR10", "FASHION_MNIST"])
parser.add_argument("eval_epsilon", type=float)
parser.add_argument("train_epsilon", type=float)
# Model config
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--load_weights_from", type=Path)
add_bool_arg(parser, "elide_final_layer", default=False)
# Training
add_bool_arg(parser, "augmentation", default=False)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--initial_epoch", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--lr_schedule", type=str)
parser.add_argument("--k_warmup", type=int, default=0)
parser.add_argument("--k_rampup", type=int, default=20)
parser.add_argument("--epsilon_warmup", type=int, default=0)
parser.add_argument("--epsilon_rampup", type=int, default=20)
parser.add_argument("--min_k", type=float, default=0.5)
parser.add_argument("--validation_size", type=int, default=5000)
parser.add_argument("--set_gpu", type=int)
# Callbacks
add_bool_arg(parser, "early_stop")
parser.add_argument("--early_stop_patience", type=int, default=30)
add_bool_arg(parser, "lr_reduce")
parser.add_argument("--lr_reduce_patience", type=int, default=10)
parser.add_argument("--lr_reduce_factor", type=float, default=math.sqrt(0.1))
parser.add_argument("--lr_reduce_min", type=float, default=1e-6)
config = parser.parse_args()
######################
# Initialise dataset #
######################
if config.dataset == "CIFAR10":
(x_train, y_train), _ = cifar10.load_data()
elif config.dataset == "MNIST":
(x_train, y_train), _ = mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
elif config.dataset == "FASHION_MNIST":
(x_train, y_train), _ = fashion_mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
else:
raise ValueError("Unrecognised dataset")
# Leave aside a validation set
x_valid = x_train[-config.validation_size:].astype("float32") / 255
y_valid = to_categorical(y_train[-config.validation_size:], num_classes=10)
x_train = x_train[:-config.validation_size].astype("float32") / 255
y_train = to_categorical(y_train[:-config.validation_size], num_classes=10)
# Input image dimensions
input_shape = x_train.shape[1:]
####################
# Initialise model #
####################
# Restrict GPU memory usage
if config.set_gpu is not None:
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
conf.gpu_options.visible_device_list = str(config.set_gpu)
sess = tf.Session(config=conf)
set_session(sess)
del config.set_gpu
eps_train_var = K.variable(config.train_epsilon)
eps = K.in_train_phase(K.stop_gradient(eps_train_var), K.constant(config.eval_epsilon))
k_train_var = K.variable(1)
k = K.in_train_phase(K.stop_gradient(k_train_var), K.constant(config.min_k))
if config.augmentation:
mean, std = x_train.mean(axis=(0, 1, 2)), x_train.std(axis=(0, 1, 2)) + 1e-6
x_train = (x_train - mean) / std
x_valid = (x_valid - mean) / std
print("Normalising channels with values", mean, std)
else:
mean, std = None, None
if config.model_name == "SmallCNN":
model = SmallCNN(input_shape=input_shape)
elif config.model_name == "MediumCNN":
model = MediumCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN":
model = LargeCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN_2":
model = LargeCNN_2(input_shape=input_shape)
else:
raise ValueError("Unrecognised model")
def loss(y_true, y_pred):
return ibp_loss(y_true, y_pred, model, eps, k, mean=mean, std=std, elision=config.elide_final_layer)
def robust_acc(y_true, y_pred):
return model.robust_accuracy
if config.load_weights_from is not None:
model.load_weights(config.load_weights_from)
metrics = ["accuracy", robust_acc]
model.compile(loss=loss, optimizer=Adam(lr=config.lr), metrics=metrics)
model.summary()
##################
# Setup training #
##################
# Prepare model model saving directory
model_type = config.model_name
elision = "elide" if config.elide_final_layer else "no_elide"
model_name = "IBP_%s_%s_train_%.3f_eval_%.3f_%s" % (config.dataset, model_type, config.train_epsilon, config.eval_epsilon, elision)
if not config.load_weights_from:
save_dir = Path("saved_models") / model_name / datetime.now().strftime("%b%d_%H-%M-%S")
if not save_dir.exists():
save_dir.mkdir(parents=True)
else:
save_dir = config.load_weights_from.parent
file_path = save_dir / "weights_{epoch:03d}_{val_robust_acc:.3f}.h5"
# Save config to json
with open(str(save_dir / ("config_%d.json" % config.initial_epoch)), "w") as fp:
json.dump(vars(config), fp, sort_keys=True, indent=4)
# Set up training callbacks
checkpoint = ModelCheckpoint(filepath=str(file_path),
monitor="val_robust_acc",
period=10,
verbose=1)
tensor_board = TensorBoard(log_dir=save_dir,
histogram_freq=0,
batch_size=config.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
update_freq=5000)
tensor_board.samples_seen = config.initial_epoch * len(x_train)
tensor_board.samples_seen_at_last_write = config.initial_epoch * len(x_train)
callbacks = [checkpoint, tensor_board]
if config.lr_schedule is not None:
chunks = config.lr_schedule.split(",")
schedule = [(float(lr), int(epoch)) for (lr, epoch) in [c.split("@") for c in chunks]]
def scheduler(epoch, current_lr):
lr = config.lr
for (rate, e) in schedule:
if epoch >= e:
lr = rate
else:
break
return lr
callbacks.insert(0, LearningRateScheduler(scheduler, verbose=1))
if config.lr_reduce:
callbacks.insert(0, ReduceLROnPlateau(monitor="val_loss",
factor=config.lr_reduce_factor,
cooldown=0,
patience=config.lr_reduce_patience,
min_lr=config.lr_reduce_min,
verbose=1))
if config.early_stop:
callbacks.insert(0, EarlyStopping(monitor="val_loss",
patience=config.early_stop_patience,
verbose=1))
if config.epsilon_rampup > 0:
start = config.epsilon_warmup * len(x_train)
end = start + config.epsilon_rampup * len(x_train)
eps_schedule = InterpolateSchedule(0, config.train_epsilon, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="epsilon",
variable=eps_train_var,
schedule=eps_schedule,
update_every=1000,
verbose=0))
if config.k_rampup > 0:
start = config.k_warmup * len(x_train)
end = start + config.k_rampup * len(x_train)
k_schedule = InterpolateSchedule(1, config.min_k, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="k",
variable=k_train_var,
schedule=k_schedule,
update_every=1000,
verbose=0))
# Run training, with or without data augmentation.
if not config.augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
validation_data=(x_valid, y_valid),
epochs=config.epochs,
initial_epoch=config.initial_epoch, | batch_size=config.batch_size,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
shift = 4 if config.dataset == "CIFAR10" else 2
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# randomly rotate images in the range (deg 0 to 30)
# rotation_range=30,
# randomly shift images horizontally
width_shift_range=shift,
# randomly shift images vertically
height_shift_range=shift,
# set mode for filling points outside the input boundaries
fill_mode="constant" if config.dataset == "CIFAR10" else "nearest",
cval=0,
# randomly flip images
horizontal_flip=True)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=config.batch_size),
validation_data=(x_valid, y_valid), steps_per_epoch=(len(x_train) / config.batch_size),
epochs=config.epochs, initial_epoch=config.initial_epoch,
verbose=1, workers=4, callbacks=callbacks) | random_line_split | |
main.rs | use crossterm::{
cursor,
event::{self, DisableMouseCapture, EnableMouseCapture, Event},
queue,
style::{self, Color::Rgb, Colors, Print, SetColors},
terminal,
};
use serde::{Deserialize, Serialize};
use std::{
cmp::min,
collections::HashMap,
env, fs,
io::{self, Write},
iter,
process::exit,
};
use unicode_width::UnicodeWidthChar;
mod view;
use view::{Page, Toc, View};
mod epub;
fn wrap(text: &str, max_cols: usize) -> Vec<(usize, usize)> {
let mut lines = Vec::new();
// bytes
let mut start = 0;
let mut end = 0;
// cols after the break
let mut after = 0;
// cols of unbroken line
let mut cols = 0;
// are we breaking on whitespace?
let mut space = false;
// should probably use unicode_segmentation grapheme_indices
for (i, c) in text.char_indices() {
// https://github.com/unicode-rs/unicode-width/issues/6
let char_cols = c.width().unwrap_or(0);
cols += char_cols;
match c {
'\n' => {
after = 0;
end = i;
space = true;
cols = max_cols + 1;
}
' ' => {
after = 0;
end = i;
space = true;
}
'-' | '—' if cols <= max_cols => {
after = 0;
end = i + c.len_utf8();
space = false;
}
_ => after += char_cols,
}
if cols > max_cols {
// break a single long word
if cols == after {
after = char_cols;
end = i;
space = false;
}
lines.push((start, end));
start = end;
if space {
start += 1;
}
cols = after;
}
}
lines
}
struct Se |
dir: Direction,
skip: bool,
}
#[derive(Clone)]
enum Direction {
Next,
Prev,
}
pub struct Bk<'a> {
quit: bool,
chapters: Vec<epub::Chapter>,
// position in the book
chapter: usize,
line: usize,
mark: HashMap<char, (usize, usize)>,
links: HashMap<String, (usize, usize)>,
// layout
colors: Colors,
cols: u16,
rows: usize,
max_width: u16,
// view state
view: &'a dyn View,
cursor: usize,
dir: Direction,
meta: Vec<String>,
query: String,
}
impl Bk<'_> {
fn new(epub: epub::Epub, args: Props) -> Self {
let (cols, rows) = terminal::size().unwrap();
let width = min(cols, args.width) as usize;
let meta = wrap(&epub.meta, width)
.into_iter()
.map(|(a, b)| String::from(&epub.meta[a..b]))
.collect();
let mut chapters = epub.chapters;
for c in &mut chapters {
c.lines = wrap(&c.text, width);
if c.title.chars().count() > width {
c.title = c
.title
.chars()
.take(width - 1)
.chain(std::iter::once('…'))
.collect();
}
}
let mut bk = Bk {
quit: false,
chapters,
chapter: 0,
line: 0,
mark: HashMap::new(),
links: epub.links,
colors: args.colors,
cols,
rows: rows as usize,
max_width: args.width,
view: if args.toc { &Toc } else { &Page },
cursor: 0,
dir: Direction::Next,
meta,
query: String::new(),
};
bk.jump_byte(args.chapter, args.byte);
bk.mark('\'');
bk
}
fn run(&mut self) -> io::Result<()> {
let mut stdout = io::stdout();
queue!(
stdout,
terminal::EnterAlternateScreen,
cursor::Hide,
EnableMouseCapture,
)?;
terminal::enable_raw_mode()?;
let mut render = |bk: &Bk| {
queue!(
stdout,
Print(style::Attribute::Reset),
SetColors(bk.colors),
terminal::Clear(terminal::ClearType::All),
)
.unwrap();
for (i, line) in bk.view.render(bk).iter().enumerate() {
queue!(stdout, cursor::MoveTo(bk.pad(), i as u16), Print(line)).unwrap();
}
queue!(stdout, cursor::MoveTo(bk.pad(), bk.cursor as u16)).unwrap();
stdout.flush().unwrap();
};
render(self);
loop {
match event::read()? {
Event::Key(e) => self.view.on_key(self, e.code),
Event::Mouse(e) => {
// XXX idk seems lame
if e.kind == event::MouseEventKind::Moved {
continue;
}
self.view.on_mouse(self, e);
}
Event::Resize(cols, rows) => {
self.rows = rows as usize;
if cols != self.cols {
self.cols = cols;
let width = min(cols, self.max_width) as usize;
for c in &mut self.chapters {
c.lines = wrap(&c.text, width);
}
}
self.view.on_resize(self);
// XXX marks aren't updated
}
}
if self.quit {
break;
}
render(self);
}
queue!(
stdout,
terminal::LeaveAlternateScreen,
cursor::Show,
DisableMouseCapture
)?;
terminal::disable_raw_mode()
}
fn jump(&mut self, (c, l): (usize, usize)) {
self.mark('\'');
self.chapter = c;
self.line = l;
}
fn jump_byte(&mut self, c: usize, byte: usize) {
self.chapter = c;
self.line = match self.chapters[c]
.lines
.binary_search_by_key(&byte, |&(a, _)| a)
{
Ok(n) => n,
Err(n) => n - 1,
}
}
fn jump_reset(&mut self) {
let &(c, l) = self.mark.get(&'\'').unwrap();
self.chapter = c;
self.line = l;
}
fn mark(&mut self, c: char) {
self.mark.insert(c, (self.chapter, self.line));
}
fn pad(&self) -> u16 {
self.cols.saturating_sub(self.max_width) / 2
}
fn search(&mut self, args: SearchArgs) -> bool {
let (start, end) = self.chapters[self.chapter].lines[self.line];
match args.dir {
Direction::Next => {
let byte = if args.skip { end } else { start };
let head = (self.chapter, byte);
let tail = (self.chapter + 1..self.chapters.len() - 1).map(|n| (n, 0));
for (c, byte) in iter::once(head).chain(tail) {
if let Some(index) = self.chapters[c].text[byte..].find(&self.query) {
self.jump_byte(c, index + byte);
return true;
}
}
false
}
Direction::Prev => {
let byte = if args.skip { start } else { end };
let head = (self.chapter, byte);
let tail = (0..self.chapter)
.rev()
.map(|c| (c, self.chapters[c].text.len()));
for (c, byte) in iter::once(head).chain(tail) {
if let Some(index) = self.chapters[c].text[..byte].rfind(&self.query) {
self.jump_byte(c, index);
return true;
}
}
false
}
}
}
}
#[derive(argh::FromArgs)]
/// read a book
struct Args {
#[argh(positional)]
path: Option<String>,
/// background color (eg 282a36)
#[argh(option)]
bg: Option<String>,
/// foreground color (eg f8f8f2)
#[argh(option)]
fg: Option<String>,
/// print metadata and exit
#[argh(switch, short = 'm')]
meta: bool,
/// start with table of contents open
#[argh(switch, short = 't')]
toc: bool,
/// characters per line
#[argh(option, short = 'w', default = "75")]
width: u16,
}
struct Props {
colors: Colors,
chapter: usize,
byte: usize,
width: u16,
toc: bool,
}
#[derive(Default, Deserialize, Serialize)]
struct Save {
last: String,
files: HashMap<String, (usize, usize)>,
}
struct State {
save: Save,
save_path: String,
path: String,
meta: bool,
bk: Props,
}
fn init() -> Result<State, Box<dyn std::error::Error>> {
let save_path = if cfg!(windows) {
format!("{}\\bk", env::var("APPDATA")?)
} else {
format!("{}/.local/share/bk", env::var("HOME")?)
};
// XXX will silently create a new default save if ron errors but path arg works.
// revisit if/when stabilizing. ez file format upgrades
let save: io::Result<Save> = fs::read_to_string(&save_path).and_then(|s| {
ron::from_str(&s)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid save file"))
});
let args: Args = argh::from_env();
let path = match args.path {
Some(p) => Some(fs::canonicalize(p)?.to_str().unwrap().to_string()),
None => None,
};
let (path, save, chapter, byte) = match (save, path) {
(Err(e), None) => return Err(Box::new(e)),
(Err(_), Some(p)) => (p, Save::default(), 0, 0),
(Ok(s), None) => {
let &(chapter, byte) = s.files.get(&s.last).unwrap();
(s.last.clone(), s, chapter, byte)
}
(Ok(s), Some(p)) => {
if s.files.contains_key(&p) {
let &(chapter, byte) = s.files.get(&p).unwrap();
(p, s, chapter, byte)
} else {
(p, s, 0, 0)
}
}
};
// XXX oh god what
let fg = args
.fg
.map(|s| Rgb {
r: u8::from_str_radix(&s[0..2], 16).unwrap(),
g: u8::from_str_radix(&s[2..4], 16).unwrap(),
b: u8::from_str_radix(&s[4..6], 16).unwrap(),
})
.unwrap_or(style::Color::Reset);
let bg = args
.bg
.map(|s| Rgb {
r: u8::from_str_radix(&s[0..2], 16).unwrap(),
g: u8::from_str_radix(&s[2..4], 16).unwrap(),
b: u8::from_str_radix(&s[4..6], 16).unwrap(),
})
.unwrap_or(style::Color::Reset);
Ok(State {
path,
save,
save_path,
meta: args.meta,
bk: Props {
colors: Colors::new(fg, bg),
chapter,
byte,
width: args.width,
toc: args.toc,
},
})
}
fn main() {
let mut state = init().unwrap_or_else(|e| {
println!("init error: {}", e);
exit(1);
});
let epub = epub::Epub::new(&state.path, state.meta).unwrap_or_else(|e| {
println!("epub error: {}", e);
exit(1);
});
if state.meta {
println!("{}", epub.meta);
exit(0);
}
let mut bk = Bk::new(epub, state.bk);
bk.run().unwrap_or_else(|e| {
println!("run error: {}", e);
exit(1);
});
let byte = bk.chapters[bk.chapter].lines[bk.line].0;
state
.save
.files
.insert(state.path.clone(), (bk.chapter, byte));
state.save.last = state.path;
let serialized = ron::to_string(&state.save).unwrap();
fs::write(state.save_path, serialized).unwrap_or_else(|e| {
println!("error saving state: {}", e);
exit(1);
});
}
| archArgs { | identifier_name |
main.rs | use crossterm::{
cursor,
event::{self, DisableMouseCapture, EnableMouseCapture, Event},
queue,
style::{self, Color::Rgb, Colors, Print, SetColors},
terminal,
};
use serde::{Deserialize, Serialize};
use std::{
cmp::min,
collections::HashMap,
env, fs,
io::{self, Write},
iter,
process::exit,
};
use unicode_width::UnicodeWidthChar;
mod view;
use view::{Page, Toc, View};
mod epub;
fn wrap(text: &str, max_cols: usize) -> Vec<(usize, usize)> {
let mut lines = Vec::new();
// bytes
let mut start = 0;
let mut end = 0;
// cols after the break
let mut after = 0;
// cols of unbroken line
let mut cols = 0;
// are we breaking on whitespace?
let mut space = false;
// should probably use unicode_segmentation grapheme_indices
for (i, c) in text.char_indices() {
// https://github.com/unicode-rs/unicode-width/issues/6
let char_cols = c.width().unwrap_or(0);
cols += char_cols;
match c {
'\n' => {
after = 0;
end = i;
space = true;
cols = max_cols + 1;
}
' ' => {
after = 0;
end = i;
space = true;
}
'-' | '—' if cols <= max_cols => {
after = 0;
end = i + c.len_utf8();
space = false;
}
_ => after += char_cols,
}
if cols > max_cols {
// break a single long word
if cols == after {
after = char_cols;
end = i;
space = false;
}
lines.push((start, end));
start = end;
if space {
start += 1;
}
cols = after;
}
}
lines
}
struct SearchArgs {
dir: Direction,
skip: bool,
}
#[derive(Clone)]
enum Direction {
Next,
Prev,
}
pub struct Bk<'a> {
quit: bool,
chapters: Vec<epub::Chapter>,
// position in the book
chapter: usize,
line: usize,
mark: HashMap<char, (usize, usize)>,
links: HashMap<String, (usize, usize)>,
// layout
colors: Colors,
cols: u16,
rows: usize,
max_width: u16,
// view state
view: &'a dyn View,
cursor: usize,
dir: Direction,
meta: Vec<String>,
query: String,
}
impl Bk<'_> {
fn new(epub: epub::Epub, args: Props) -> Self {
let (cols, rows) = terminal::size().unwrap();
let width = min(cols, args.width) as usize;
let meta = wrap(&epub.meta, width)
.into_iter()
.map(|(a, b)| String::from(&epub.meta[a..b]))
.collect();
let mut chapters = epub.chapters;
for c in &mut chapters {
c.lines = wrap(&c.text, width);
if c.title.chars().count() > width {
c.title = c
.title
.chars()
.take(width - 1)
.chain(std::iter::once('…'))
.collect();
}
}
let mut bk = Bk {
quit: false,
chapters,
chapter: 0,
line: 0,
mark: HashMap::new(),
links: epub.links,
colors: args.colors,
cols,
rows: rows as usize,
max_width: args.width,
view: if args.toc { &Toc } else { &Page },
cursor: 0,
dir: Direction::Next,
meta,
query: String::new(),
};
bk.jump_byte(args.chapter, args.byte);
bk.mark('\'');
bk
}
fn run(&mut self) -> io::Result<()> {
let mut stdout = io::stdout();
queue!(
stdout,
terminal::EnterAlternateScreen,
cursor::Hide,
EnableMouseCapture,
)?;
terminal::enable_raw_mode()?;
let mut render = |bk: &Bk| {
queue!(
stdout,
Print(style::Attribute::Reset),
SetColors(bk.colors),
terminal::Clear(terminal::ClearType::All),
)
.unwrap();
for (i, line) in bk.view.render(bk).iter().enumerate() {
queue!(stdout, cursor::MoveTo(bk.pad(), i as u16), Print(line)).unwrap();
}
queue!(stdout, cursor::MoveTo(bk.pad(), bk.cursor as u16)).unwrap();
stdout.flush().unwrap();
};
render(self);
loop {
match event::read()? {
Event::Key(e) => self.view.on_key(self, e.code),
Event::Mouse(e) => {
// XXX idk seems lame
if e.kind == event::MouseEventKind::Moved {
continue;
}
self.view.on_mouse(self, e);
}
Event::Resize(cols, rows) => {
self.rows = rows as usize;
if cols != self.cols {
self.cols = cols;
let width = min(cols, self.max_width) as usize;
for c in &mut self.chapters {
c.lines = wrap(&c.text, width);
}
}
self.view.on_resize(self);
// XXX marks aren't updated
}
}
if self.quit {
break;
}
render(self);
}
queue!(
stdout,
terminal::LeaveAlternateScreen,
cursor::Show,
DisableMouseCapture
)?;
terminal::disable_raw_mode()
}
fn jump(&mut self, (c, l): (usize, usize)) {
self.mark('\'');
self.chapter = c;
self.line = l;
}
fn jump_byte(&mut self, c: usize, byte: usize) {
self.chapter = c;
self.line = match self.chapters[c]
.lines
.binary_search_by_key(&byte, |&(a, _)| a)
{
Ok(n) => n,
Err(n) => n - 1,
}
}
fn jump_reset(&mut self) {
let &(c, l) = self.mark.get(&'\'').unwrap();
self.chapter = c;
self.line = l;
}
fn mark(&mut self, c: char) {
self.mark.insert(c, (self.chapter, self.line));
}
fn pad(&self) -> u16 {
self.cols.saturating_sub(self.max_width) / 2
}
fn search(&mut self, args: SearchArgs) -> bool {
let (start, end) = self.chapters[self.chapter].lines[self.line];
match args.dir {
Direction::Next => {
let byte = if args.skip { end } else { start };
let head = (self.chapter, byte);
let tail = (self.chapter + 1..self.chapters.len() - 1).map(|n| (n, 0));
for (c, byte) in iter::once(head).chain(tail) {
if let Some(index) = self.chapters[c].text[byte..].find(&self.query) {
self.jump_byte(c, index + byte);
return true;
}
}
false
}
Direction::Prev => {
let byte = if args.skip { start } else { end };
let head = (self.chapter, byte);
let tail = (0..self.chapter)
.rev()
.map(|c| (c, self.chapters[c].text.len()));
for (c, byte) in iter::once(head).chain(tail) {
if let Some(index) = self.chapters[c].text[..byte].rfind(&self.query) {
| }
false
}
}
}
}
#[derive(argh::FromArgs)]
/// read a book
struct Args {
#[argh(positional)]
path: Option<String>,
/// background color (eg 282a36)
#[argh(option)]
bg: Option<String>,
/// foreground color (eg f8f8f2)
#[argh(option)]
fg: Option<String>,
/// print metadata and exit
#[argh(switch, short = 'm')]
meta: bool,
/// start with table of contents open
#[argh(switch, short = 't')]
toc: bool,
/// characters per line
#[argh(option, short = 'w', default = "75")]
width: u16,
}
struct Props {
colors: Colors,
chapter: usize,
byte: usize,
width: u16,
toc: bool,
}
#[derive(Default, Deserialize, Serialize)]
struct Save {
last: String,
files: HashMap<String, (usize, usize)>,
}
struct State {
save: Save,
save_path: String,
path: String,
meta: bool,
bk: Props,
}
fn init() -> Result<State, Box<dyn std::error::Error>> {
let save_path = if cfg!(windows) {
format!("{}\\bk", env::var("APPDATA")?)
} else {
format!("{}/.local/share/bk", env::var("HOME")?)
};
// XXX will silently create a new default save if ron errors but path arg works.
// revisit if/when stabilizing. ez file format upgrades
let save: io::Result<Save> = fs::read_to_string(&save_path).and_then(|s| {
ron::from_str(&s)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid save file"))
});
let args: Args = argh::from_env();
let path = match args.path {
Some(p) => Some(fs::canonicalize(p)?.to_str().unwrap().to_string()),
None => None,
};
let (path, save, chapter, byte) = match (save, path) {
(Err(e), None) => return Err(Box::new(e)),
(Err(_), Some(p)) => (p, Save::default(), 0, 0),
(Ok(s), None) => {
let &(chapter, byte) = s.files.get(&s.last).unwrap();
(s.last.clone(), s, chapter, byte)
}
(Ok(s), Some(p)) => {
if s.files.contains_key(&p) {
let &(chapter, byte) = s.files.get(&p).unwrap();
(p, s, chapter, byte)
} else {
(p, s, 0, 0)
}
}
};
// XXX oh god what
let fg = args
.fg
.map(|s| Rgb {
r: u8::from_str_radix(&s[0..2], 16).unwrap(),
g: u8::from_str_radix(&s[2..4], 16).unwrap(),
b: u8::from_str_radix(&s[4..6], 16).unwrap(),
})
.unwrap_or(style::Color::Reset);
let bg = args
.bg
.map(|s| Rgb {
r: u8::from_str_radix(&s[0..2], 16).unwrap(),
g: u8::from_str_radix(&s[2..4], 16).unwrap(),
b: u8::from_str_radix(&s[4..6], 16).unwrap(),
})
.unwrap_or(style::Color::Reset);
Ok(State {
path,
save,
save_path,
meta: args.meta,
bk: Props {
colors: Colors::new(fg, bg),
chapter,
byte,
width: args.width,
toc: args.toc,
},
})
}
fn main() {
let mut state = init().unwrap_or_else(|e| {
println!("init error: {}", e);
exit(1);
});
let epub = epub::Epub::new(&state.path, state.meta).unwrap_or_else(|e| {
println!("epub error: {}", e);
exit(1);
});
if state.meta {
println!("{}", epub.meta);
exit(0);
}
let mut bk = Bk::new(epub, state.bk);
bk.run().unwrap_or_else(|e| {
println!("run error: {}", e);
exit(1);
});
let byte = bk.chapters[bk.chapter].lines[bk.line].0;
state
.save
.files
.insert(state.path.clone(), (bk.chapter, byte));
state.save.last = state.path;
let serialized = ron::to_string(&state.save).unwrap();
fs::write(state.save_path, serialized).unwrap_or_else(|e| {
println!("error saving state: {}", e);
exit(1);
});
}
| self.jump_byte(c, index);
return true;
}
| conditional_block |
main.rs | use crossterm::{
cursor,
event::{self, DisableMouseCapture, EnableMouseCapture, Event},
queue,
style::{self, Color::Rgb, Colors, Print, SetColors},
terminal,
};
use serde::{Deserialize, Serialize};
use std::{
cmp::min,
collections::HashMap,
env, fs,
io::{self, Write},
iter,
process::exit,
};
use unicode_width::UnicodeWidthChar;
mod view;
use view::{Page, Toc, View};
mod epub;
fn wrap(text: &str, max_cols: usize) -> Vec<(usize, usize)> {
let mut lines = Vec::new();
// bytes
let mut start = 0;
let mut end = 0;
// cols after the break
let mut after = 0;
// cols of unbroken line
let mut cols = 0;
// are we breaking on whitespace?
let mut space = false;
// should probably use unicode_segmentation grapheme_indices
for (i, c) in text.char_indices() {
// https://github.com/unicode-rs/unicode-width/issues/6
let char_cols = c.width().unwrap_or(0);
cols += char_cols;
match c {
'\n' => {
after = 0;
end = i;
space = true;
cols = max_cols + 1;
}
' ' => {
after = 0;
end = i;
space = true;
}
'-' | '—' if cols <= max_cols => {
after = 0;
end = i + c.len_utf8();
space = false;
}
_ => after += char_cols,
}
if cols > max_cols {
// break a single long word
if cols == after {
after = char_cols;
end = i;
space = false;
}
lines.push((start, end));
start = end;
if space {
start += 1;
}
cols = after;
}
}
lines
}
struct SearchArgs {
dir: Direction,
skip: bool,
}
#[derive(Clone)]
enum Direction {
Next,
Prev,
}
pub struct Bk<'a> {
quit: bool,
chapters: Vec<epub::Chapter>,
// position in the book
chapter: usize,
line: usize,
mark: HashMap<char, (usize, usize)>,
links: HashMap<String, (usize, usize)>,
// layout
colors: Colors,
cols: u16,
rows: usize,
max_width: u16,
// view state
view: &'a dyn View,
cursor: usize,
dir: Direction,
meta: Vec<String>,
query: String,
}
impl Bk<'_> {
fn new(epub: epub::Epub, args: Props) -> Self {
let (cols, rows) = terminal::size().unwrap();
let width = min(cols, args.width) as usize;
let meta = wrap(&epub.meta, width)
.into_iter()
.map(|(a, b)| String::from(&epub.meta[a..b]))
.collect();
let mut chapters = epub.chapters;
for c in &mut chapters {
c.lines = wrap(&c.text, width);
if c.title.chars().count() > width {
c.title = c
.title
.chars()
.take(width - 1)
.chain(std::iter::once('…'))
.collect();
}
}
let mut bk = Bk {
quit: false,
chapters,
chapter: 0,
line: 0,
mark: HashMap::new(),
links: epub.links,
colors: args.colors,
cols,
rows: rows as usize,
max_width: args.width,
view: if args.toc { &Toc } else { &Page },
cursor: 0,
dir: Direction::Next,
meta,
query: String::new(),
};
bk.jump_byte(args.chapter, args.byte);
bk.mark('\'');
bk
}
fn run(&mut self) -> io::Result<()> {
let mut stdout = io::stdout();
queue!(
stdout,
terminal::EnterAlternateScreen,
cursor::Hide,
EnableMouseCapture,
)?;
terminal::enable_raw_mode()?;
let mut render = |bk: &Bk| {
queue!(
stdout,
Print(style::Attribute::Reset),
SetColors(bk.colors),
terminal::Clear(terminal::ClearType::All),
)
.unwrap();
for (i, line) in bk.view.render(bk).iter().enumerate() {
queue!(stdout, cursor::MoveTo(bk.pad(), i as u16), Print(line)).unwrap();
}
queue!(stdout, cursor::MoveTo(bk.pad(), bk.cursor as u16)).unwrap();
stdout.flush().unwrap();
};
render(self);
loop {
match event::read()? {
Event::Key(e) => self.view.on_key(self, e.code),
Event::Mouse(e) => {
// XXX idk seems lame
if e.kind == event::MouseEventKind::Moved {
continue;
}
self.view.on_mouse(self, e);
}
Event::Resize(cols, rows) => {
self.rows = rows as usize;
if cols != self.cols {
self.cols = cols;
let width = min(cols, self.max_width) as usize;
for c in &mut self.chapters {
c.lines = wrap(&c.text, width);
}
}
self.view.on_resize(self);
// XXX marks aren't updated
}
}
if self.quit {
break;
}
render(self);
}
queue!(
stdout,
terminal::LeaveAlternateScreen,
cursor::Show,
DisableMouseCapture
)?;
terminal::disable_raw_mode()
}
fn jump(&mut self, (c, l): (usize, usize)) {
self.mark('\'');
self.chapter = c;
self.line = l;
}
fn jump_byte(&mut self, c: usize, byte: usize) {
self.chapter = c;
self.line = match self.chapters[c]
.lines
.binary_search_by_key(&byte, |&(a, _)| a)
{
Ok(n) => n,
Err(n) => n - 1,
}
}
fn jump_reset(&mut self) {
let &(c, l) = self.mark.get(&'\'').unwrap();
self.chapter = c;
self.line = l;
}
fn mark(&mut self, c: char) {
self.mark.insert(c, (self.chapter, self.line));
}
fn pad(&self) -> u16 {
self.cols.saturating_sub(self.max_width) / 2
}
fn search(&mut self, args: SearchArgs) -> bool {
let (start, end) = self.chapters[self.chapter].lines[self.line];
match args.dir {
Direction::Next => {
let byte = if args.skip { end } else { start };
let head = (self.chapter, byte);
let tail = (self.chapter + 1..self.chapters.len() - 1).map(|n| (n, 0));
for (c, byte) in iter::once(head).chain(tail) {
if let Some(index) = self.chapters[c].text[byte..].find(&self.query) {
self.jump_byte(c, index + byte);
return true;
}
}
false
}
Direction::Prev => {
let byte = if args.skip { start } else { end };
let head = (self.chapter, byte);
let tail = (0..self.chapter)
.rev()
.map(|c| (c, self.chapters[c].text.len()));
for (c, byte) in iter::once(head).chain(tail) {
if let Some(index) = self.chapters[c].text[..byte].rfind(&self.query) {
self.jump_byte(c, index);
return true;
}
}
false
}
}
}
}
#[derive(argh::FromArgs)]
/// read a book
struct Args {
#[argh(positional)]
path: Option<String>,
/// background color (eg 282a36)
#[argh(option)]
bg: Option<String>,
/// foreground color (eg f8f8f2)
#[argh(option)]
fg: Option<String>,
/// print metadata and exit
#[argh(switch, short = 'm')]
meta: bool,
/// start with table of contents open
#[argh(switch, short = 't')]
toc: bool,
/// characters per line
#[argh(option, short = 'w', default = "75")]
width: u16,
}
struct Props {
colors: Colors,
chapter: usize,
byte: usize,
width: u16,
toc: bool,
}
#[derive(Default, Deserialize, Serialize)]
struct Save {
last: String,
files: HashMap<String, (usize, usize)>,
}
struct State {
save: Save,
save_path: String,
path: String,
meta: bool,
bk: Props,
}
fn init() -> Result<State, Box<dyn std::error::Error>> {
let save_path = if cfg!(windows) {
format!("{}\\bk", env::var("APPDATA")?)
} else {
format!("{}/.local/share/bk", env::var("HOME")?)
};
// XXX will silently create a new default save if ron errors but path arg works.
// revisit if/when stabilizing. ez file format upgrades
let save: io::Result<Save> = fs::read_to_string(&save_path).and_then(|s| {
ron::from_str(&s)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid save file"))
});
let args: Args = argh::from_env();
let path = match args.path {
Some(p) => Some(fs::canonicalize(p)?.to_str().unwrap().to_string()),
None => None,
};
let (path, save, chapter, byte) = match (save, path) {
(Err(e), None) => return Err(Box::new(e)),
(Err(_), Some(p)) => (p, Save::default(), 0, 0),
(Ok(s), None) => {
let &(chapter, byte) = s.files.get(&s.last).unwrap();
(s.last.clone(), s, chapter, byte)
}
(Ok(s), Some(p)) => {
if s.files.contains_key(&p) {
let &(chapter, byte) = s.files.get(&p).unwrap();
(p, s, chapter, byte)
} else {
(p, s, 0, 0)
}
}
};
// XXX oh god what
let fg = args
.fg
.map(|s| Rgb {
r: u8::from_str_radix(&s[0..2], 16).unwrap(),
g: u8::from_str_radix(&s[2..4], 16).unwrap(),
b: u8::from_str_radix(&s[4..6], 16).unwrap(),
})
.unwrap_or(style::Color::Reset);
let bg = args
.bg
.map(|s| Rgb {
r: u8::from_str_radix(&s[0..2], 16).unwrap(),
g: u8::from_str_radix(&s[2..4], 16).unwrap(),
b: u8::from_str_radix(&s[4..6], 16).unwrap(),
})
.unwrap_or(style::Color::Reset);
Ok(State {
path,
save,
save_path,
meta: args.meta,
bk: Props {
colors: Colors::new(fg, bg),
chapter,
byte,
width: args.width,
toc: args.toc,
},
})
}
fn main() {
let mut state = init().unwrap_or_else(|e| {
println!("init error: {}", e);
exit(1);
});
let epub = epub::Epub::new(&state.path, state.meta).unwrap_or_else(|e| {
println!("epub error: {}", e);
exit(1);
});
if state.meta {
println!("{}", epub.meta);
exit(0);
}
let mut bk = Bk::new(epub, state.bk);
bk.run().unwrap_or_else(|e| {
println!("run error: {}", e);
exit(1);
}); |
let byte = bk.chapters[bk.chapter].lines[bk.line].0;
state
.save
.files
.insert(state.path.clone(), (bk.chapter, byte));
state.save.last = state.path;
let serialized = ron::to_string(&state.save).unwrap();
fs::write(state.save_path, serialized).unwrap_or_else(|e| {
println!("error saving state: {}", e);
exit(1);
});
} | random_line_split | |
colors.rs | use std::io;
use std::mem;
use std::os::windows::io::AsRawHandle;
use std::str::Bytes;
use windows_sys::Win32::Foundation::HANDLE;
use windows_sys::Win32::System::Console::{
GetConsoleScreenBufferInfo, SetConsoleTextAttribute, CONSOLE_SCREEN_BUFFER_INFO,
FOREGROUND_BLUE as FG_BLUE, FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY,
FOREGROUND_RED as FG_RED,
};
use crate::Term;
type WORD = u16;
const FG_CYAN: WORD = FG_BLUE | FG_GREEN;
const FG_MAGENTA: WORD = FG_BLUE | FG_RED;
const FG_YELLOW: WORD = FG_GREEN | FG_RED;
const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED;
/// Query the given handle for information about the console's screen buffer.
///
/// The given handle should represent a console. Otherwise, an error is
/// returned.
///
/// This corresponds to calling [`GetConsoleScreenBufferInfo`].
///
/// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
pub fn screen_buffer_info(h: HANDLE) -> io::Result<ScreenBufferInfo> {
unsafe {
let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed();
let rc = GetConsoleScreenBufferInfo(h, &mut info);
if rc == 0 {
return Err(io::Error::last_os_error());
}
Ok(ScreenBufferInfo(info))
}
}
/// Set the text attributes of the console represented by the given handle.
///
/// This corresponds to calling [`SetConsoleTextAttribute`].
///
/// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
pub fn set_text_attributes(h: HANDLE, attributes: u16) -> io::Result<()> {
if unsafe { SetConsoleTextAttribute(h, attributes) } == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Represents console screen buffer information such as size, cursor position
/// and styling attributes.
///
/// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`].
///
/// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
#[derive(Clone)]
pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO);
impl ScreenBufferInfo {
/// Returns the character attributes associated with this console.
///
/// This corresponds to `wAttributes`.
///
/// See [`char info`] for more details.
///
/// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str
pub fn attributes(&self) -> u16 {
self.0.wAttributes
}
}
/// A Windows console.
///
/// This represents a very limited set of functionality available to a Windows
/// console. In particular, it can only change text attributes such as color
/// and intensity. This may grow over time. If you need more routines, please
/// file an issue and/or PR.
///
/// There is no way to "write" to this console. Simply write to
/// stdout or stderr instead, while interleaving instructions to the console
/// to change text attributes.
///
/// A common pitfall when using a console is to forget to flush writes to
/// stdout before setting new text attributes. | cur_attr: TextAttributes,
}
#[derive(Clone, Copy, Debug)]
enum HandleKind {
Stdout,
Stderr,
}
impl HandleKind {
fn handle(&self) -> HANDLE {
match *self {
HandleKind::Stdout => io::stdout().as_raw_handle() as HANDLE,
HandleKind::Stderr => io::stderr().as_raw_handle() as HANDLE,
}
}
}
impl Console {
/// Get a console for a standard I/O stream.
fn create_for_stream(kind: HandleKind) -> io::Result<Console> {
let h = kind.handle();
let info = screen_buffer_info(h)?;
let attr = TextAttributes::from_word(info.attributes());
Ok(Console {
kind: kind,
start_attr: attr,
cur_attr: attr,
})
}
/// Create a new Console to stdout.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stdout() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stdout)
}
/// Create a new Console to stderr.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stderr() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stderr)
}
/// Applies the current text attributes.
fn set(&mut self) -> io::Result<()> {
set_text_attributes(self.kind.handle(), self.cur_attr.to_word())
}
/// Apply the given intensity and color attributes to the console
/// foreground.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.fg_color = color;
self.cur_attr.fg_intense = intense;
self.set()
}
/// Apply the given intensity and color attributes to the console
/// background.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.bg_color = color;
self.cur_attr.bg_intense = intense;
self.set()
}
/// Reset the console text attributes to their original settings.
///
/// The original settings correspond to the text attributes on the console
/// when this `Console` value was created.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn reset(&mut self) -> io::Result<()> {
self.cur_attr = self.start_attr;
self.set()
}
}
/// A representation of text attributes for the Windows console.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TextAttributes {
fg_color: Color,
fg_intense: Intense,
bg_color: Color,
bg_intense: Intense,
}
impl TextAttributes {
fn to_word(&self) -> WORD {
let mut w = 0;
w |= self.fg_color.to_fg();
w |= self.fg_intense.to_fg();
w |= self.bg_color.to_bg();
w |= self.bg_intense.to_bg();
w
}
fn from_word(word: WORD) -> TextAttributes {
TextAttributes {
fg_color: Color::from_fg(word),
fg_intense: Intense::from_fg(word),
bg_color: Color::from_bg(word),
bg_intense: Intense::from_bg(word),
}
}
}
/// Whether to use intense colors or not.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Intense {
Yes,
No,
}
impl Intense {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Intense {
Intense::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Intense::No => 0,
Intense::Yes => FG_INTENSITY,
}
}
fn from_fg(word: WORD) -> Intense {
if word & FG_INTENSITY > 0 {
Intense::Yes
} else {
Intense::No
}
}
}
/// The set of available colors for use with a Windows console.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Color {
Black,
Blue,
Green,
Red,
Cyan,
Magenta,
Yellow,
White,
}
impl Color {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Color {
Color::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Color::Black => 0,
Color::Blue => FG_BLUE,
Color::Green => FG_GREEN,
Color::Red => FG_RED,
Color::Cyan => FG_CYAN,
Color::Magenta => FG_MAGENTA,
Color::Yellow => FG_YELLOW,
Color::White => FG_WHITE,
}
}
fn from_fg(word: WORD) -> Color {
match word & 0b111 {
FG_BLUE => Color::Blue,
FG_GREEN => Color::Green,
FG_RED => Color::Red,
FG_CYAN => Color::Cyan,
FG_MAGENTA => Color::Magenta,
FG_YELLOW => Color::Yellow,
FG_WHITE => Color::White,
_ => Color::Black,
}
}
}
pub fn console_colors(out: &Term, mut con: Console, bytes: &[u8]) -> io::Result<()> {
use crate::ansi::AnsiCodeIterator;
use std::str::from_utf8;
let s = from_utf8(bytes).expect("data to be printed is not an ansi string");
let mut iter = AnsiCodeIterator::new(s);
while !iter.rest_slice().is_empty() {
if let Some((part, is_esc)) = iter.next() {
if !is_esc {
out.write_through_common(part.as_bytes())?;
} else if part == "\x1b[0m" {
con.reset()?;
} else if let Some((intense, color, fg_bg)) = driver(parse_color, part) {
match fg_bg {
FgBg::Foreground => con.fg(intense, color),
FgBg::Background => con.bg(intense, color),
}?;
} else if driver(parse_attr, part).is_none() {
out.write_through_common(part.as_bytes())?;
}
}
}
Ok(())
}
#[derive(Debug, PartialEq, Eq)]
enum FgBg {
Foreground,
Background,
}
impl FgBg {
fn new(byte: u8) -> Option<Self> {
match byte {
b'3' => Some(Self::Foreground),
b'4' => Some(Self::Background),
_ => None,
}
}
}
fn driver<Out>(parse: fn(Bytes<'_>) -> Option<Out>, part: &str) -> Option<Out> {
let mut bytes = part.bytes();
loop {
while bytes.next()? != b'\x1b' {}
if let ret @ Some(_) = (parse)(bytes.clone()) {
return ret;
}
}
}
// `driver(parse_color, s)` parses the equivalent of the regex
// \x1b\[(3|4)8;5;(8|9|1[0-5])m
// for intense or
// \x1b\[(3|4)([0-7])m
// for normal
fn parse_color(mut bytes: Bytes<'_>) -> Option<(Intense, Color, FgBg)> {
parse_prefix(&mut bytes)?;
let fg_bg = FgBg::new(bytes.next()?)?;
let (intense, color) = match bytes.next()? {
b @ b'0'..=b'7' => (Intense::No, normal_color_ansi_from_byte(b)?),
b'8' => {
if &[bytes.next()?, bytes.next()?, bytes.next()?] != b";5;" {
return None;
}
(Intense::Yes, parse_intense_color_ansi(&mut bytes)?)
}
_ => return None,
};
parse_suffix(&mut bytes)?;
Some((intense, color, fg_bg))
}
// `driver(parse_attr, s)` parses the equivalent of the regex
// \x1b\[([1-8])m
fn parse_attr(mut bytes: Bytes<'_>) -> Option<u8> {
parse_prefix(&mut bytes)?;
let attr = match bytes.next()? {
attr @ b'1'..=b'8' => attr,
_ => return None,
};
parse_suffix(&mut bytes)?;
Some(attr)
}
fn parse_prefix(bytes: &mut Bytes<'_>) -> Option<()> {
if bytes.next()? == b'[' {
Some(())
} else {
None
}
}
fn parse_intense_color_ansi(bytes: &mut Bytes<'_>) -> Option<Color> {
let color = match bytes.next()? {
b'8' => Color::Black,
b'9' => Color::Red,
b'1' => match bytes.next()? {
b'0' => Color::Green,
b'1' => Color::Yellow,
b'2' => Color::Blue,
b'3' => Color::Magenta,
b'4' => Color::Cyan,
b'5' => Color::White,
_ => return None,
},
_ => return None,
};
Some(color)
}
fn normal_color_ansi_from_byte(b: u8) -> Option<Color> {
let color = match b {
b'0' => Color::Black,
b'1' => Color::Red,
b'2' => Color::Green,
b'3' => Color::Yellow,
b'4' => Color::Blue,
b'5' => Color::Magenta,
b'6' => Color::Cyan,
b'7' => Color::White,
_ => return None,
};
Some(color)
}
fn parse_suffix(bytes: &mut Bytes<'_>) -> Option<()> {
if bytes.next()? == b'm' {
Some(())
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn color_parsing() {
let intense_color = "leading bytes \x1b[38;5;10m trailing bytes";
let parsed = driver(parse_color, intense_color).unwrap();
assert_eq!(parsed, (Intense::Yes, Color::Green, FgBg::Foreground));
let normal_color = "leading bytes \x1b[40m trailing bytes";
let parsed = driver(parse_color, normal_color).unwrap();
assert_eq!(parsed, (Intense::No, Color::Black, FgBg::Background));
}
#[test]
fn attr_parsing() {
let attr = "leading bytes \x1b[1m trailing bytes";
let parsed = driver(parse_attr, attr).unwrap();
assert_eq!(parsed, b'1');
}
} | #[derive(Debug)]
pub struct Console {
kind: HandleKind,
start_attr: TextAttributes, | random_line_split |
colors.rs | use std::io;
use std::mem;
use std::os::windows::io::AsRawHandle;
use std::str::Bytes;
use windows_sys::Win32::Foundation::HANDLE;
use windows_sys::Win32::System::Console::{
GetConsoleScreenBufferInfo, SetConsoleTextAttribute, CONSOLE_SCREEN_BUFFER_INFO,
FOREGROUND_BLUE as FG_BLUE, FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY,
FOREGROUND_RED as FG_RED,
};
use crate::Term;
type WORD = u16;
const FG_CYAN: WORD = FG_BLUE | FG_GREEN;
const FG_MAGENTA: WORD = FG_BLUE | FG_RED;
const FG_YELLOW: WORD = FG_GREEN | FG_RED;
const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED;
/// Query the given handle for information about the console's screen buffer.
///
/// The given handle should represent a console. Otherwise, an error is
/// returned.
///
/// This corresponds to calling [`GetConsoleScreenBufferInfo`].
///
/// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
pub fn screen_buffer_info(h: HANDLE) -> io::Result<ScreenBufferInfo> {
unsafe {
let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed();
let rc = GetConsoleScreenBufferInfo(h, &mut info);
if rc == 0 {
return Err(io::Error::last_os_error());
}
Ok(ScreenBufferInfo(info))
}
}
/// Set the text attributes of the console represented by the given handle.
///
/// This corresponds to calling [`SetConsoleTextAttribute`].
///
/// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
pub fn set_text_attributes(h: HANDLE, attributes: u16) -> io::Result<()> {
if unsafe { SetConsoleTextAttribute(h, attributes) } == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Represents console screen buffer information such as size, cursor position
/// and styling attributes.
///
/// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`].
///
/// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
#[derive(Clone)]
pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO);
impl ScreenBufferInfo {
/// Returns the character attributes associated with this console.
///
/// This corresponds to `wAttributes`.
///
/// See [`char info`] for more details.
///
/// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str
pub fn attributes(&self) -> u16 {
self.0.wAttributes
}
}
/// A Windows console.
///
/// This represents a very limited set of functionality available to a Windows
/// console. In particular, it can only change text attributes such as color
/// and intensity. This may grow over time. If you need more routines, please
/// file an issue and/or PR.
///
/// There is no way to "write" to this console. Simply write to
/// stdout or stderr instead, while interleaving instructions to the console
/// to change text attributes.
///
/// A common pitfall when using a console is to forget to flush writes to
/// stdout before setting new text attributes.
#[derive(Debug)]
pub struct Console {
kind: HandleKind,
start_attr: TextAttributes,
cur_attr: TextAttributes,
}
#[derive(Clone, Copy, Debug)]
enum HandleKind {
Stdout,
Stderr,
}
impl HandleKind {
fn handle(&self) -> HANDLE {
match *self {
HandleKind::Stdout => io::stdout().as_raw_handle() as HANDLE,
HandleKind::Stderr => io::stderr().as_raw_handle() as HANDLE,
}
}
}
impl Console {
/// Get a console for a standard I/O stream.
fn create_for_stream(kind: HandleKind) -> io::Result<Console> {
let h = kind.handle();
let info = screen_buffer_info(h)?;
let attr = TextAttributes::from_word(info.attributes());
Ok(Console {
kind: kind,
start_attr: attr,
cur_attr: attr,
})
}
/// Create a new Console to stdout.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stdout() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stdout)
}
/// Create a new Console to stderr.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stderr() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stderr)
}
/// Applies the current text attributes.
fn set(&mut self) -> io::Result<()> {
set_text_attributes(self.kind.handle(), self.cur_attr.to_word())
}
/// Apply the given intensity and color attributes to the console
/// foreground.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.fg_color = color;
self.cur_attr.fg_intense = intense;
self.set()
}
/// Apply the given intensity and color attributes to the console
/// background.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.bg_color = color;
self.cur_attr.bg_intense = intense;
self.set()
}
/// Reset the console text attributes to their original settings.
///
/// The original settings correspond to the text attributes on the console
/// when this `Console` value was created.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn reset(&mut self) -> io::Result<()> {
self.cur_attr = self.start_attr;
self.set()
}
}
/// A representation of text attributes for the Windows console.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TextAttributes {
fg_color: Color,
fg_intense: Intense,
bg_color: Color,
bg_intense: Intense,
}
impl TextAttributes {
fn to_word(&self) -> WORD {
let mut w = 0;
w |= self.fg_color.to_fg();
w |= self.fg_intense.to_fg();
w |= self.bg_color.to_bg();
w |= self.bg_intense.to_bg();
w
}
fn from_word(word: WORD) -> TextAttributes {
TextAttributes {
fg_color: Color::from_fg(word),
fg_intense: Intense::from_fg(word),
bg_color: Color::from_bg(word),
bg_intense: Intense::from_bg(word),
}
}
}
/// Whether to use intense colors or not.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Intense {
Yes,
No,
}
impl Intense {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Intense {
Intense::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Intense::No => 0,
Intense::Yes => FG_INTENSITY,
}
}
fn from_fg(word: WORD) -> Intense {
if word & FG_INTENSITY > 0 {
Intense::Yes
} else {
Intense::No
}
}
}
/// The set of available colors for use with a Windows console.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Color {
Black,
Blue,
Green,
Red,
Cyan,
Magenta,
Yellow,
White,
}
impl Color {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Color {
Color::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Color::Black => 0,
Color::Blue => FG_BLUE,
Color::Green => FG_GREEN,
Color::Red => FG_RED,
Color::Cyan => FG_CYAN,
Color::Magenta => FG_MAGENTA,
Color::Yellow => FG_YELLOW,
Color::White => FG_WHITE,
}
}
fn from_fg(word: WORD) -> Color {
match word & 0b111 {
FG_BLUE => Color::Blue,
FG_GREEN => Color::Green,
FG_RED => Color::Red,
FG_CYAN => Color::Cyan,
FG_MAGENTA => Color::Magenta,
FG_YELLOW => Color::Yellow,
FG_WHITE => Color::White,
_ => Color::Black,
}
}
}
pub fn console_colors(out: &Term, mut con: Console, bytes: &[u8]) -> io::Result<()> {
use crate::ansi::AnsiCodeIterator;
use std::str::from_utf8;
let s = from_utf8(bytes).expect("data to be printed is not an ansi string");
let mut iter = AnsiCodeIterator::new(s);
while !iter.rest_slice().is_empty() {
if let Some((part, is_esc)) = iter.next() {
if !is_esc {
out.write_through_common(part.as_bytes())?;
} else if part == "\x1b[0m" {
con.reset()?;
} else if let Some((intense, color, fg_bg)) = driver(parse_color, part) {
match fg_bg {
FgBg::Foreground => con.fg(intense, color),
FgBg::Background => con.bg(intense, color),
}?;
} else if driver(parse_attr, part).is_none() {
out.write_through_common(part.as_bytes())?;
}
}
}
Ok(())
}
#[derive(Debug, PartialEq, Eq)]
enum FgBg {
Foreground,
Background,
}
impl FgBg {
fn new(byte: u8) -> Option<Self> {
match byte {
b'3' => Some(Self::Foreground),
b'4' => Some(Self::Background),
_ => None,
}
}
}
fn driver<Out>(parse: fn(Bytes<'_>) -> Option<Out>, part: &str) -> Option<Out> {
let mut bytes = part.bytes();
loop {
while bytes.next()? != b'\x1b' {}
if let ret @ Some(_) = (parse)(bytes.clone()) {
return ret;
}
}
}
// `driver(parse_color, s)` parses the equivalent of the regex
// \x1b\[(3|4)8;5;(8|9|1[0-5])m
// for intense or
// \x1b\[(3|4)([0-7])m
// for normal
fn parse_color(mut bytes: Bytes<'_>) -> Option<(Intense, Color, FgBg)> {
parse_prefix(&mut bytes)?;
let fg_bg = FgBg::new(bytes.next()?)?;
let (intense, color) = match bytes.next()? {
b @ b'0'..=b'7' => (Intense::No, normal_color_ansi_from_byte(b)?),
b'8' => {
if &[bytes.next()?, bytes.next()?, bytes.next()?] != b";5;" {
return None;
}
(Intense::Yes, parse_intense_color_ansi(&mut bytes)?)
}
_ => return None,
};
parse_suffix(&mut bytes)?;
Some((intense, color, fg_bg))
}
// `driver(parse_attr, s)` parses the equivalent of the regex
// \x1b\[([1-8])m
fn parse_attr(mut bytes: Bytes<'_>) -> Option<u8> {
parse_prefix(&mut bytes)?;
let attr = match bytes.next()? {
attr @ b'1'..=b'8' => attr,
_ => return None,
};
parse_suffix(&mut bytes)?;
Some(attr)
}
fn parse_prefix(bytes: &mut Bytes<'_>) -> Option<()> {
if bytes.next()? == b'[' {
Some(())
} else {
None
}
}
fn parse_intense_color_ansi(bytes: &mut Bytes<'_>) -> Option<Color> {
let color = match bytes.next()? {
b'8' => Color::Black,
b'9' => Color::Red,
b'1' => match bytes.next()? {
b'0' => Color::Green,
b'1' => Color::Yellow,
b'2' => Color::Blue,
b'3' => Color::Magenta,
b'4' => Color::Cyan,
b'5' => Color::White,
_ => return None,
},
_ => return None,
};
Some(color)
}
fn normal_color_ansi_from_byte(b: u8) -> Option<Color> {
let color = match b {
b'0' => Color::Black,
b'1' => Color::Red,
b'2' => Color::Green,
b'3' => Color::Yellow,
b'4' => Color::Blue,
b'5' => Color::Magenta,
b'6' => Color::Cyan,
b'7' => Color::White,
_ => return None,
};
Some(color)
}
fn parse_suffix(bytes: &mut Bytes<'_>) -> Option<()> {
if bytes.next()? == b'm' {
Some(())
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn color_parsing() {
let intense_color = "leading bytes \x1b[38;5;10m trailing bytes";
let parsed = driver(parse_color, intense_color).unwrap();
assert_eq!(parsed, (Intense::Yes, Color::Green, FgBg::Foreground));
let normal_color = "leading bytes \x1b[40m trailing bytes";
let parsed = driver(parse_color, normal_color).unwrap();
assert_eq!(parsed, (Intense::No, Color::Black, FgBg::Background));
}
#[test]
fn attr_parsing() |
}
| {
let attr = "leading bytes \x1b[1m trailing bytes";
let parsed = driver(parse_attr, attr).unwrap();
assert_eq!(parsed, b'1');
} | identifier_body |
colors.rs | use std::io;
use std::mem;
use std::os::windows::io::AsRawHandle;
use std::str::Bytes;
use windows_sys::Win32::Foundation::HANDLE;
use windows_sys::Win32::System::Console::{
GetConsoleScreenBufferInfo, SetConsoleTextAttribute, CONSOLE_SCREEN_BUFFER_INFO,
FOREGROUND_BLUE as FG_BLUE, FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY,
FOREGROUND_RED as FG_RED,
};
use crate::Term;
type WORD = u16;
const FG_CYAN: WORD = FG_BLUE | FG_GREEN;
const FG_MAGENTA: WORD = FG_BLUE | FG_RED;
const FG_YELLOW: WORD = FG_GREEN | FG_RED;
const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED;
/// Query the given handle for information about the console's screen buffer.
///
/// The given handle should represent a console. Otherwise, an error is
/// returned.
///
/// This corresponds to calling [`GetConsoleScreenBufferInfo`].
///
/// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo
pub fn screen_buffer_info(h: HANDLE) -> io::Result<ScreenBufferInfo> {
unsafe {
let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed();
let rc = GetConsoleScreenBufferInfo(h, &mut info);
if rc == 0 {
return Err(io::Error::last_os_error());
}
Ok(ScreenBufferInfo(info))
}
}
/// Set the text attributes of the console represented by the given handle.
///
/// This corresponds to calling [`SetConsoleTextAttribute`].
///
/// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute
pub fn set_text_attributes(h: HANDLE, attributes: u16) -> io::Result<()> {
if unsafe { SetConsoleTextAttribute(h, attributes) } == 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Represents console screen buffer information such as size, cursor position
/// and styling attributes.
///
/// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`].
///
/// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
#[derive(Clone)]
pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO);
impl ScreenBufferInfo {
/// Returns the character attributes associated with this console.
///
/// This corresponds to `wAttributes`.
///
/// See [`char info`] for more details.
///
/// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str
pub fn attributes(&self) -> u16 {
self.0.wAttributes
}
}
/// A Windows console.
///
/// This represents a very limited set of functionality available to a Windows
/// console. In particular, it can only change text attributes such as color
/// and intensity. This may grow over time. If you need more routines, please
/// file an issue and/or PR.
///
/// There is no way to "write" to this console. Simply write to
/// stdout or stderr instead, while interleaving instructions to the console
/// to change text attributes.
///
/// A common pitfall when using a console is to forget to flush writes to
/// stdout before setting new text attributes.
#[derive(Debug)]
pub struct Console {
kind: HandleKind,
start_attr: TextAttributes,
cur_attr: TextAttributes,
}
#[derive(Clone, Copy, Debug)]
enum HandleKind {
Stdout,
Stderr,
}
impl HandleKind {
fn handle(&self) -> HANDLE {
match *self {
HandleKind::Stdout => io::stdout().as_raw_handle() as HANDLE,
HandleKind::Stderr => io::stderr().as_raw_handle() as HANDLE,
}
}
}
impl Console {
/// Get a console for a standard I/O stream.
fn create_for_stream(kind: HandleKind) -> io::Result<Console> {
let h = kind.handle();
let info = screen_buffer_info(h)?;
let attr = TextAttributes::from_word(info.attributes());
Ok(Console {
kind: kind,
start_attr: attr,
cur_attr: attr,
})
}
/// Create a new Console to stdout.
///
/// If there was a problem creating the console, then an error is returned.
pub fn | () -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stdout)
}
/// Create a new Console to stderr.
///
/// If there was a problem creating the console, then an error is returned.
pub fn stderr() -> io::Result<Console> {
Self::create_for_stream(HandleKind::Stderr)
}
/// Applies the current text attributes.
fn set(&mut self) -> io::Result<()> {
set_text_attributes(self.kind.handle(), self.cur_attr.to_word())
}
/// Apply the given intensity and color attributes to the console
/// foreground.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.fg_color = color;
self.cur_attr.fg_intense = intense;
self.set()
}
/// Apply the given intensity and color attributes to the console
/// background.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> {
self.cur_attr.bg_color = color;
self.cur_attr.bg_intense = intense;
self.set()
}
/// Reset the console text attributes to their original settings.
///
/// The original settings correspond to the text attributes on the console
/// when this `Console` value was created.
///
/// If there was a problem setting attributes on the console, then an error
/// is returned.
pub fn reset(&mut self) -> io::Result<()> {
self.cur_attr = self.start_attr;
self.set()
}
}
/// A representation of text attributes for the Windows console.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TextAttributes {
fg_color: Color,
fg_intense: Intense,
bg_color: Color,
bg_intense: Intense,
}
impl TextAttributes {
fn to_word(&self) -> WORD {
let mut w = 0;
w |= self.fg_color.to_fg();
w |= self.fg_intense.to_fg();
w |= self.bg_color.to_bg();
w |= self.bg_intense.to_bg();
w
}
fn from_word(word: WORD) -> TextAttributes {
TextAttributes {
fg_color: Color::from_fg(word),
fg_intense: Intense::from_fg(word),
bg_color: Color::from_bg(word),
bg_intense: Intense::from_bg(word),
}
}
}
/// Whether to use intense colors or not.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Intense {
Yes,
No,
}
impl Intense {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Intense {
Intense::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Intense::No => 0,
Intense::Yes => FG_INTENSITY,
}
}
fn from_fg(word: WORD) -> Intense {
if word & FG_INTENSITY > 0 {
Intense::Yes
} else {
Intense::No
}
}
}
/// The set of available colors for use with a Windows console.
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Color {
Black,
Blue,
Green,
Red,
Cyan,
Magenta,
Yellow,
White,
}
impl Color {
fn to_bg(&self) -> WORD {
self.to_fg() << 4
}
fn from_bg(word: WORD) -> Color {
Color::from_fg(word >> 4)
}
fn to_fg(&self) -> WORD {
match *self {
Color::Black => 0,
Color::Blue => FG_BLUE,
Color::Green => FG_GREEN,
Color::Red => FG_RED,
Color::Cyan => FG_CYAN,
Color::Magenta => FG_MAGENTA,
Color::Yellow => FG_YELLOW,
Color::White => FG_WHITE,
}
}
fn from_fg(word: WORD) -> Color {
match word & 0b111 {
FG_BLUE => Color::Blue,
FG_GREEN => Color::Green,
FG_RED => Color::Red,
FG_CYAN => Color::Cyan,
FG_MAGENTA => Color::Magenta,
FG_YELLOW => Color::Yellow,
FG_WHITE => Color::White,
_ => Color::Black,
}
}
}
pub fn console_colors(out: &Term, mut con: Console, bytes: &[u8]) -> io::Result<()> {
use crate::ansi::AnsiCodeIterator;
use std::str::from_utf8;
let s = from_utf8(bytes).expect("data to be printed is not an ansi string");
let mut iter = AnsiCodeIterator::new(s);
while !iter.rest_slice().is_empty() {
if let Some((part, is_esc)) = iter.next() {
if !is_esc {
out.write_through_common(part.as_bytes())?;
} else if part == "\x1b[0m" {
con.reset()?;
} else if let Some((intense, color, fg_bg)) = driver(parse_color, part) {
match fg_bg {
FgBg::Foreground => con.fg(intense, color),
FgBg::Background => con.bg(intense, color),
}?;
} else if driver(parse_attr, part).is_none() {
out.write_through_common(part.as_bytes())?;
}
}
}
Ok(())
}
#[derive(Debug, PartialEq, Eq)]
enum FgBg {
Foreground,
Background,
}
impl FgBg {
fn new(byte: u8) -> Option<Self> {
match byte {
b'3' => Some(Self::Foreground),
b'4' => Some(Self::Background),
_ => None,
}
}
}
fn driver<Out>(parse: fn(Bytes<'_>) -> Option<Out>, part: &str) -> Option<Out> {
let mut bytes = part.bytes();
loop {
while bytes.next()? != b'\x1b' {}
if let ret @ Some(_) = (parse)(bytes.clone()) {
return ret;
}
}
}
// `driver(parse_color, s)` parses the equivalent of the regex
// \x1b\[(3|4)8;5;(8|9|1[0-5])m
// for intense or
// \x1b\[(3|4)([0-7])m
// for normal
fn parse_color(mut bytes: Bytes<'_>) -> Option<(Intense, Color, FgBg)> {
parse_prefix(&mut bytes)?;
let fg_bg = FgBg::new(bytes.next()?)?;
let (intense, color) = match bytes.next()? {
b @ b'0'..=b'7' => (Intense::No, normal_color_ansi_from_byte(b)?),
b'8' => {
if &[bytes.next()?, bytes.next()?, bytes.next()?] != b";5;" {
return None;
}
(Intense::Yes, parse_intense_color_ansi(&mut bytes)?)
}
_ => return None,
};
parse_suffix(&mut bytes)?;
Some((intense, color, fg_bg))
}
// `driver(parse_attr, s)` parses the equivalent of the regex
// \x1b\[([1-8])m
fn parse_attr(mut bytes: Bytes<'_>) -> Option<u8> {
parse_prefix(&mut bytes)?;
let attr = match bytes.next()? {
attr @ b'1'..=b'8' => attr,
_ => return None,
};
parse_suffix(&mut bytes)?;
Some(attr)
}
fn parse_prefix(bytes: &mut Bytes<'_>) -> Option<()> {
if bytes.next()? == b'[' {
Some(())
} else {
None
}
}
fn parse_intense_color_ansi(bytes: &mut Bytes<'_>) -> Option<Color> {
let color = match bytes.next()? {
b'8' => Color::Black,
b'9' => Color::Red,
b'1' => match bytes.next()? {
b'0' => Color::Green,
b'1' => Color::Yellow,
b'2' => Color::Blue,
b'3' => Color::Magenta,
b'4' => Color::Cyan,
b'5' => Color::White,
_ => return None,
},
_ => return None,
};
Some(color)
}
fn normal_color_ansi_from_byte(b: u8) -> Option<Color> {
let color = match b {
b'0' => Color::Black,
b'1' => Color::Red,
b'2' => Color::Green,
b'3' => Color::Yellow,
b'4' => Color::Blue,
b'5' => Color::Magenta,
b'6' => Color::Cyan,
b'7' => Color::White,
_ => return None,
};
Some(color)
}
fn parse_suffix(bytes: &mut Bytes<'_>) -> Option<()> {
if bytes.next()? == b'm' {
Some(())
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn color_parsing() {
let intense_color = "leading bytes \x1b[38;5;10m trailing bytes";
let parsed = driver(parse_color, intense_color).unwrap();
assert_eq!(parsed, (Intense::Yes, Color::Green, FgBg::Foreground));
let normal_color = "leading bytes \x1b[40m trailing bytes";
let parsed = driver(parse_color, normal_color).unwrap();
assert_eq!(parsed, (Intense::No, Color::Black, FgBg::Background));
}
#[test]
fn attr_parsing() {
let attr = "leading bytes \x1b[1m trailing bytes";
let parsed = driver(parse_attr, attr).unwrap();
assert_eq!(parsed, b'1');
}
}
| stdout | identifier_name |
log.go | package log
import (
"errors"
"github.com/qjpcpu/filelog"
"github.com/qjpcpu/log/logging"
"io"
syslog "log"
"os"
"path/filepath"
"runtime/debug"
"strings"
"sync"
)
type moduleLoggers struct {
loggers map[string]*logWrapper
*sync.RWMutex
}
type logWrapper struct {
*logging.Logger
option *LogOption
leveldBackend logging.LeveledBackend
}
// package global variables
var (
mloggers *moduleLoggers
defaultLgr *logWrapper
)
const (
// NormFormat without color
NormFormat = "%{level} %{time:2006-01-02 15:04:05.000} %{shortfile} %{message}"
// DebugFormat with color
DebugFormat = "%{level} %{time:2006-01-02 15:04:05.000} grtid:%{goroutineid}/gcnt:%{goroutinecount} %{shortfile} %{message}"
// SimpleColorFormat simple format with color
SimpleColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32m%{message}\033[0m"
// DebugColorFormat with color
DebugColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32mgrtid:%{goroutineid}/gcnt:%{goroutinecount}\033[0m %{message}"
// CliFormat simple format
CliFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05}\033[0m \033[0;32m%{message}\033[0m"
)
// Level log level
type Level int
const (
// CRITICAL level
CRITICAL Level = iota + 1
// ERROR level
ERROR
// WARNING level
WARNING
// NOTICE level
NOTICE
// INFO level
INFO
// DEBUG level
DEBUG
)
func (lvl Level) loggingLevel() logging.Level {
return logging.Level(lvl - 1)
}
func parseLogLevel(lstr string) Level {
lstr = strings.ToLower(lstr)
switch lstr {
case "critical":
return CRITICAL
case "error":
return ERROR
case "warning":
return WARNING
case "notice":
return NOTICE
case "info":
return INFO
case "debug":
return DEBUG
default:
return INFO
}
}
// LogOption log config options
type LogOption struct {
LogFile string
Level Level
Format string
RotateType filelog.RotateType
CreateShortcut bool
ErrorLogFile string
files []io.WriteCloser
module string
}
// RotateType 轮转类型
type RotateType int
const (
// RotateDaily 按天轮转
RotateDaily RotateType = iota
// RotateHourly 按小时轮转
RotateHourly
// RotateWeekly 按周轮转
RotateWeekly
// RotateNone 不切割日志
RotateNone
)
// GetMBuilder module log builder
func GetMBuilder(m string) *LogOption {
opt := defaultLogOption()
opt.module = m
return &opt
}
// GetBuilder log builder
func GetBuilder() *LogOption {
opt := defaultLogOption()
return &opt
}
// SetFile set log file
func (lo *LogOption) SetFile(filename string) *LogOption {
lo.Lo | filename
return lo
}
// SetLevel set log level
func (lo *LogOption) SetLevel(level string) *LogOption {
lo.Level = parseLogLevel(level)
return lo
}
// SetTypedLevel set log level
func (lo *LogOption) SetTypedLevel(level Level) *LogOption {
lo.Level = level
return lo
}
// SetFormat set log format
func (lo *LogOption) SetFormat(format string) *LogOption {
lo.Format = format
return lo
}
// SetRotate set rotate type default daily
func (lo *LogOption) SetRotate(rt RotateType) *LogOption {
lo.RotateType = filelog.RotateType(rt)
return lo
}
// SetShortcut whether create shorcut when rotate
func (lo *LogOption) SetShortcut(create bool) *LogOption {
lo.CreateShortcut = create
return lo
}
// SetErrorLog set error log suffix,default is wf
func (lo *LogOption) SetErrorLog(f string) *LogOption {
lo.ErrorLogFile = f
return lo
}
// Submit use this buider options
func (lo *LogOption) Submit() {
lgr := createLogger(lo)
if lo.module == "" {
defaultLgr = lgr
} else {
lgr.ExtraCalldepth--
mloggers.Lock()
defer mloggers.Unlock()
mloggers.loggers[lo.module] = lgr
}
}
// M module log
func M(m string) *logging.Logger {
mloggers.RLock()
defer mloggers.RUnlock()
return mloggers.loggers[m].Logger
}
func defaultLogOption() LogOption {
return LogOption{
Level: DEBUG,
Format: DebugColorFormat,
RotateType: filelog.RotateNone,
CreateShortcut: false,
module: "",
}
}
func init() {
mloggers = &moduleLoggers{
RWMutex: new(sync.RWMutex),
loggers: make(map[string]*logWrapper),
}
dopt := defaultLogOption()
defaultLgr = createLogger(&dopt)
}
func createLogger(opt *LogOption) *logWrapper {
if opt.Format == "" {
opt.Format = NormFormat
}
if opt.Level <= 0 {
opt.Level = INFO
}
lgr := logging.MustGetLogger(opt.module)
format := logging.MustStringFormatter(opt.Format)
var leveldBackend logging.LeveledBackend
if opt.LogFile != "" {
var backends []logging.LeveledBackend
// mkdir log dir
os.MkdirAll(filepath.Dir(opt.LogFile), 0777)
os.MkdirAll(filepath.Dir(opt.ErrorLogFile), 0777)
filename := opt.LogFile
infoLogFp, err := filelog.NewWriter(filename, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s] failed[%s]", filename, err)
}
backendInfo := logging.NewLogBackend(infoLogFp, "", 0)
backendInfoFormatter := logging.NewBackendFormatter(backendInfo, format)
backendInfoLeveld := logging.AddModuleLevel(backendInfoFormatter)
backendInfoLeveld.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backendInfoLeveld
backends = append(backends, backendInfoLeveld)
opt.files = append(opt.files, infoLogFp)
if opt.ErrorLogFile != "" && opt.ErrorLogFile != opt.LogFile {
errLogFp, err := filelog.NewWriter(opt.ErrorLogFile, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s.wf] failed[%s]", filename, err)
}
backendErr := logging.NewLogBackend(errLogFp, "", 0)
backendErrFormatter := logging.NewBackendFormatter(backendErr, format)
backendErrLeveld := logging.AddModuleLevel(backendErrFormatter)
backendErrLeveld.SetLevel(logging.ERROR, "")
backends = append(backends, backendErrLeveld)
opt.files = append(opt.files, errLogFp)
}
var bl []logging.Backend
for _, lb := range backends {
bl = append(bl, lb)
}
ml := logging.MultiLogger(bl...)
lgr.SetBackend(ml)
} else {
backend1 := logging.NewLogBackend(os.Stderr, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backend1Leveled
lgr.SetBackend(backend1Leveled)
}
lgr.ExtraCalldepth++
return &logWrapper{Logger: lgr, option: opt, leveldBackend: leveldBackend}
}
// Infof write leveled log
func Infof(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(format, args...)
}
// Warningf write leveled log
func Warningf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(format, args...)
}
// Criticalf write leveled log
func Criticalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(format, args...)
}
// Fatalf write leveled log
func Fatalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(format, args...)
}
// Errorf write leveled log
func Errorf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(format, args...)
}
// Debugf write leveled log
func Debugf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(format, args...)
}
// Noticef write leveled log
func Noticef(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(format, args...)
}
// Info write leveled log
func Info(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Warning write leveled log
func Warning(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Critical write leveled log
func Critical(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Fatal write leveled log
func Fatal(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Error write leveled log
func Error(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Debug write leveled log
func Debug(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Notice write leveld log
func Notice(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// MustNoErr panic when err occur, should only used in test
func MustNoErr(err error, desc ...string) {
if err != nil {
stackInfo := debug.Stack()
start := 0
count := 0
for i, ch := range stackInfo {
if ch == '\n' {
if count == 0 {
start = i
} else if count == 4 {
stackInfo = append(stackInfo[0:start+1], stackInfo[i+1:]...)
break
}
count++
}
}
var extra string
if len(desc) > 0 && desc[0] != "" {
extra = "[" + desc[0] + "]"
}
defaultLgr.Fatalf("%s%v\nMustNoErr fail, %s", extra, err, stackInfo)
}
}
// GetLogLevel default logger level
func GetLogLevel() string {
switch defaultLgr.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
// SetLogLevel default logger level
func SetLogLevel(lvl string) error {
tlvl := parseLogLevel(lvl)
defaultLgr.option.Level = tlvl
defaultLgr.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// SetMLogLevel set module log level
func SetMLogLevel(module, lvl string) error {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return errors.New("no such module " + module)
}
tlvl := parseLogLevel(lvl)
wl.option.Level = tlvl
wl.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// GetMLogLevel get module log level
func GetMLogLevel(module string) string {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return ""
}
switch wl.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
| gFile = | identifier_name |
log.go | package log
import (
"errors"
"github.com/qjpcpu/filelog"
"github.com/qjpcpu/log/logging"
"io"
syslog "log"
"os"
"path/filepath"
"runtime/debug"
"strings"
"sync"
)
type moduleLoggers struct {
loggers map[string]*logWrapper
*sync.RWMutex
}
type logWrapper struct {
*logging.Logger
option *LogOption
leveldBackend logging.LeveledBackend
}
// package global variables
var (
mloggers *moduleLoggers
defaultLgr *logWrapper
)
const (
// NormFormat without color
NormFormat = "%{level} %{time:2006-01-02 15:04:05.000} %{shortfile} %{message}"
// DebugFormat with color
DebugFormat = "%{level} %{time:2006-01-02 15:04:05.000} grtid:%{goroutineid}/gcnt:%{goroutinecount} %{shortfile} %{message}"
// SimpleColorFormat simple format with color
SimpleColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32m%{message}\033[0m"
// DebugColorFormat with color
DebugColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32mgrtid:%{goroutineid}/gcnt:%{goroutinecount}\033[0m %{message}"
// CliFormat simple format
CliFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05}\033[0m \033[0;32m%{message}\033[0m"
)
// Level log level
type Level int
const (
// CRITICAL level
CRITICAL Level = iota + 1
// ERROR level
ERROR
// WARNING level
WARNING
// NOTICE level
NOTICE
// INFO level
INFO
// DEBUG level
DEBUG
)
func (lvl Level) loggingLevel() logging.Level {
return logging.Level(lvl - 1)
}
func parseLogLevel(lstr string) Level {
lstr = strings.ToLower(lstr)
switch lstr {
case "critical":
return CRITICAL
case "error":
return ERROR
case "warning":
return WARNING
case "notice":
return NOTICE
case "info":
return INFO
case "debug":
return DEBUG
default:
return INFO
}
}
// LogOption log config options
type LogOption struct {
LogFile string
Level Level
Format string
RotateType filelog.RotateType
CreateShortcut bool
ErrorLogFile string
files []io.WriteCloser
module string
}
// RotateType 轮转类型
type RotateType int
const (
// RotateDaily 按天轮转
RotateDaily RotateType = iota
// RotateHourly 按小时轮转
RotateHourly
// RotateWeekly 按周轮转
RotateWeekly
// RotateNone 不切割日志
RotateNone
)
// GetMBuilder module log builder
func GetMBuilder(m string) *LogOption {
opt := defaultLogOption()
opt.module = m
return &opt
}
// GetBuilder log builder
func GetBuilder() *LogOption {
opt := defaultLogOption()
return &opt
}
// SetFile set log file
func (lo *LogOption) SetFile(filename string) *LogOption {
lo.LogFile = filename
return lo
}
// SetLevel set log level
func (lo *LogOption) SetLevel(level string) *LogOption {
lo.Level = parseLogLevel(level)
return lo
}
// SetTypedLevel set log level
func (lo *LogOption) SetTypedLevel(level Level) *LogOption {
lo.Level = level
return lo
}
// SetFormat set log format
func (lo *LogOption) SetFormat(format string) *LogOption {
lo.Format = format
return lo
}
// SetRotate set rotate type default daily
func (lo *LogOption) SetRotate(rt RotateType) *LogOption {
lo.RotateType = filelog.RotateType(rt)
return lo
}
// SetShortcut whether create shorcut when rotate
func (lo *LogOption) SetShortcut(create bool) *LogOption {
lo.CreateShortcut = create
return lo
}
// SetErrorLog set error log suffix,default is wf
func (lo *LogOption) SetErrorLog(f string) *LogOption {
lo.ErrorLogFile = f
return lo
}
// Submit use this buider options
func (lo *LogOption) Submit() {
lgr := createLogger(lo)
if lo.module == "" {
defaultLgr = lgr
} else {
lgr.ExtraCalldepth--
mloggers.Lock()
defer mloggers.Unlock()
mloggers.loggers[lo.module] = lgr
}
}
// M module log
func M(m string) *logging.Logger {
mloggers.RLock()
defer mloggers.RUnlock()
return mloggers.loggers[m].Logger
}
func defaultLogOption() LogOption {
return LogOption{
Level: DEBUG,
Format: DebugColorFormat,
RotateType: filelog.RotateNone,
CreateShortcut: false,
module: "",
}
}
func init() {
mloggers = &moduleLoggers{
RWMutex: new(sync.RWMutex),
loggers: make(map[string]*logWrapper),
}
dopt := defaultLogOption()
defaultLgr = createLogger(&dopt)
}
func createLogger(opt *LogOption) *logWrapper {
if opt.Format == "" {
opt.Format = NormFormat
}
if opt.Level <= 0 {
opt.Level = INFO
}
lgr := logging.MustGetLogger(opt.module)
format := logging.MustStringFormatter(opt.Format)
var leveldBackend logging.LeveledBackend
if opt.LogFile != "" {
var backends []logging.LeveledBackend
// mkdir log dir
os.MkdirAll(filepath.Dir(opt.LogFile), 0777)
os.MkdirAll(filepath.Dir(opt.ErrorLogFile), 0777)
filename := opt.LogFile
infoLogFp, err := filelog.NewWriter(filename, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s] failed[%s]", filename, err)
}
backendInfo := logging.NewLogBackend(infoLogFp, "", 0)
backendInfoFormatter := logging.NewBackendFormatter(backendInfo, format)
backendInfoLeveld := logging.AddModuleLevel(backendInfoFormatter)
backendInfoLeveld.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backendInfoLeveld
backends = append(backends, backendInfoLeveld)
opt.files = append(opt.files, infoLogFp)
if opt.ErrorLogFile != "" && opt.ErrorLogFile != opt.LogFile {
errLogFp, err := filelog.NewWriter(opt.ErrorLogFile, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s.wf] failed[%s]", filename, err)
}
backendErr := logging.NewLogBackend(errLogFp, "", 0)
backendErrFormatter := logging.NewBackendFormatter(backendErr, format)
backendErrLeveld := logging.AddModuleLevel(backendErrFormatter)
backendErrLeveld.SetLevel(logging.ERROR, "")
backends = append(backends, backendErrLeveld)
opt.files = append(opt.files, errLogFp)
}
var bl []logging.Backend
for _, lb := range backends {
bl = append(bl, lb)
}
ml := logging.MultiLogger(bl...)
lgr.SetBackend(ml)
} else {
backend1 := logging.NewLogBackend(os.Stderr, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backend1Leveled
lgr.SetBackend(backend1Leveled)
}
lgr.ExtraCalldepth++
return &logWrapper{Logger: lgr, option: opt, leveldBackend: leveldBackend}
}
// Infof write leveled log
func Infof(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(format, args...)
}
// Warningf write leveled log
func Warningf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(format, args...)
}
// Criticalf write leveled log
func Criticalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(format, args...)
}
// Fatalf write leveled log
func Fatalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(format, args...)
}
// Errorf write leveled log
func Errorf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(format, args...)
}
// Debugf write leveled log
func Debugf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(format, args...)
}
// Noticef write leveled log
func Noticef(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(format, args...)
}
// Info write leveled log
func Info(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Warning write leveled log
func Warning(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Critical write leveled log
func Critical(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Fatal write leveled log
func Fatal(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Error write leveled log
func Error(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Debug write leveled log
func Debug(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Notice write leveld log
func Notice(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// MustNoErr panic when err occur, should only used in test | count := 0
for i, ch := range stackInfo {
if ch == '\n' {
if count == 0 {
start = i
} else if count == 4 {
stackInfo = append(stackInfo[0:start+1], stackInfo[i+1:]...)
break
}
count++
}
}
var extra string
if len(desc) > 0 && desc[0] != "" {
extra = "[" + desc[0] + "]"
}
defaultLgr.Fatalf("%s%v\nMustNoErr fail, %s", extra, err, stackInfo)
}
}
// GetLogLevel default logger level
func GetLogLevel() string {
switch defaultLgr.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
// SetLogLevel default logger level
func SetLogLevel(lvl string) error {
tlvl := parseLogLevel(lvl)
defaultLgr.option.Level = tlvl
defaultLgr.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// SetMLogLevel set module log level
func SetMLogLevel(module, lvl string) error {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return errors.New("no such module " + module)
}
tlvl := parseLogLevel(lvl)
wl.option.Level = tlvl
wl.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// GetMLogLevel get module log level
func GetMLogLevel(module string) string {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return ""
}
switch wl.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
} | func MustNoErr(err error, desc ...string) {
if err != nil {
stackInfo := debug.Stack()
start := 0 | random_line_split |
log.go | package log
import (
"errors"
"github.com/qjpcpu/filelog"
"github.com/qjpcpu/log/logging"
"io"
syslog "log"
"os"
"path/filepath"
"runtime/debug"
"strings"
"sync"
)
type moduleLoggers struct {
loggers map[string]*logWrapper
*sync.RWMutex
}
type logWrapper struct {
*logging.Logger
option *LogOption
leveldBackend logging.LeveledBackend
}
// package global variables
var (
mloggers *moduleLoggers
defaultLgr *logWrapper
)
const (
// NormFormat without color
NormFormat = "%{level} %{time:2006-01-02 15:04:05.000} %{shortfile} %{message}"
// DebugFormat with color
DebugFormat = "%{level} %{time:2006-01-02 15:04:05.000} grtid:%{goroutineid}/gcnt:%{goroutinecount} %{shortfile} %{message}"
// SimpleColorFormat simple format with color
SimpleColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32m%{message}\033[0m"
// DebugColorFormat with color
DebugColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32mgrtid:%{goroutineid}/gcnt:%{goroutinecount}\033[0m %{message}"
// CliFormat simple format
CliFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05}\033[0m \033[0;32m%{message}\033[0m"
)
// Level log level
type Level int
const (
// CRITICAL level
CRITICAL Level = iota + 1
// ERROR level
ERROR
// WARNING level
WARNING
// NOTICE level
NOTICE
// INFO level
INFO
// DEBUG level
DEBUG
)
func (lvl Level) loggingLevel() logging.Level {
return logging.Level(lvl - 1)
}
func parseLogLevel(lstr string) Level {
lstr = strings.ToLower(lstr)
switch lstr {
case "critical":
return CRITICAL
case "error":
return ERROR
case "warning":
return WARNING
case "notice":
return NOTICE
case "info":
return INFO
case "debug":
return DEBUG
default:
return INFO
}
}
// LogOption log config options
type LogOption struct {
LogFile string
Level Level
Format string
RotateType filelog.RotateType
CreateShortcut bool
ErrorLogFile string
files []io.WriteCloser
module string
}
// RotateType 轮转类型
type RotateType int
const (
// RotateDaily 按天轮转
RotateDaily RotateType = iota
// RotateHourly 按小时轮转
RotateHourly
// RotateWeekly 按周轮转
RotateWeekly
// RotateNone 不切割日志
RotateNone
)
// GetMBuilder module log builder
func GetMBuilder(m string) *LogOption {
opt := defaultLogOption()
opt.module = m
return &opt
}
// GetBuilder log builder
func GetBuilder() *LogOption {
opt := defaultLogOption()
return &opt
}
// SetFile set log file
func (lo *LogOption) SetFile(filename string) *LogOption {
lo.LogFile = filename
return lo
}
// SetLevel set log level
func (lo *LogOption) SetLevel(level string) *LogOption {
lo.Level = parseLogLevel(level)
return lo
}
// SetTypedLevel set log level
func (lo *LogOption) SetTypedLevel(level Level) *LogOption {
lo.Level = level
return lo
}
// SetFormat set log format
func (lo *LogOption) SetFormat(format string) *LogOption {
lo.Format = format
return lo
}
// SetRotate set rotate type default daily
func (lo *LogOption) SetRotate(rt RotateType) *LogOption {
lo.RotateType = filelog.RotateType(rt)
return lo
}
// SetShortcut whether create shorcut when rotate
func (lo *LogOption) SetShortcut(create bool) *LogOption {
lo.CreateShortcut = create
return lo
}
// SetErrorLog set error log suffix,default is wf
func (lo *LogOption) SetErrorLog(f string) *LogOption {
lo.ErrorLogFile = f
return lo
}
// Submit use this buider options
func (lo *LogOption) Submit() {
lgr := createLogger(lo)
if lo.module == "" {
defaultLgr = lgr
} else {
lgr.ExtraCalldepth--
mloggers.Lock()
defer mloggers.Unlock()
mloggers.loggers[lo.module] = lgr
}
}
// M module log
func M(m string) *logging.Logger {
mloggers.RLock()
defer mloggers.RUnlock()
return mloggers.loggers[m].Logger
}
func defaultLogOption() LogOption {
return LogOption{
Level: DEBUG,
Format: DebugColorFormat,
RotateType: filelog.RotateNone,
CreateShortcut: false,
module: "",
}
}
func init() {
mloggers = &moduleLoggers{
RWMutex: new(sync.RWMutex),
loggers: make(map[string]*logWrapper),
}
dopt := defaultLogOption()
defaultLgr = createLogger(&dopt)
}
func createLogger(opt *LogOption) *logWrapper {
if opt.Format == "" {
opt.Format = NormFormat
}
if opt.Level <= 0 {
opt.Level = INFO
}
lgr := logging.MustGetLogger(opt.module)
format := logging.MustStringFormatter(opt.Format)
var leveldBackend logging.LeveledBackend
if opt.LogFile != "" {
var backends []logging.LeveledBackend
// mkdir log dir
os.MkdirAll(filepath.Dir(opt.LogFile), 0777)
os.MkdirAll(filepath.Dir(opt.ErrorLogFile), 0777)
filename := opt.LogFile
infoLogFp, err := filelog.NewWriter(filename, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s] failed[%s] | LogFp, "", 0)
backendInfoFormatter := logging.NewBackendFormatter(backendInfo, format)
backendInfoLeveld := logging.AddModuleLevel(backendInfoFormatter)
backendInfoLeveld.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backendInfoLeveld
backends = append(backends, backendInfoLeveld)
opt.files = append(opt.files, infoLogFp)
if opt.ErrorLogFile != "" && opt.ErrorLogFile != opt.LogFile {
errLogFp, err := filelog.NewWriter(opt.ErrorLogFile, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s.wf] failed[%s]", filename, err)
}
backendErr := logging.NewLogBackend(errLogFp, "", 0)
backendErrFormatter := logging.NewBackendFormatter(backendErr, format)
backendErrLeveld := logging.AddModuleLevel(backendErrFormatter)
backendErrLeveld.SetLevel(logging.ERROR, "")
backends = append(backends, backendErrLeveld)
opt.files = append(opt.files, errLogFp)
}
var bl []logging.Backend
for _, lb := range backends {
bl = append(bl, lb)
}
ml := logging.MultiLogger(bl...)
lgr.SetBackend(ml)
} else {
backend1 := logging.NewLogBackend(os.Stderr, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backend1Leveled
lgr.SetBackend(backend1Leveled)
}
lgr.ExtraCalldepth++
return &logWrapper{Logger: lgr, option: opt, leveldBackend: leveldBackend}
}
// Infof write leveled log
func Infof(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(format, args...)
}
// Warningf write leveled log
func Warningf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(format, args...)
}
// Criticalf write leveled log
func Criticalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(format, args...)
}
// Fatalf write leveled log
func Fatalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(format, args...)
}
// Errorf write leveled log
func Errorf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(format, args...)
}
// Debugf write leveled log
func Debugf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(format, args...)
}
// Noticef write leveled log
func Noticef(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(format, args...)
}
// Info write leveled log
func Info(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Warning write leveled log
func Warning(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Critical write leveled log
func Critical(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Fatal write leveled log
func Fatal(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Error write leveled log
func Error(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Debug write leveled log
func Debug(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Notice write leveld log
func Notice(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// MustNoErr panic when err occur, should only used in test
func MustNoErr(err error, desc ...string) {
if err != nil {
stackInfo := debug.Stack()
start := 0
count := 0
for i, ch := range stackInfo {
if ch == '\n' {
if count == 0 {
start = i
} else if count == 4 {
stackInfo = append(stackInfo[0:start+1], stackInfo[i+1:]...)
break
}
count++
}
}
var extra string
if len(desc) > 0 && desc[0] != "" {
extra = "[" + desc[0] + "]"
}
defaultLgr.Fatalf("%s%v\nMustNoErr fail, %s", extra, err, stackInfo)
}
}
// GetLogLevel default logger level
func GetLogLevel() string {
switch defaultLgr.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
// SetLogLevel default logger level
func SetLogLevel(lvl string) error {
tlvl := parseLogLevel(lvl)
defaultLgr.option.Level = tlvl
defaultLgr.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// SetMLogLevel set module log level
func SetMLogLevel(module, lvl string) error {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return errors.New("no such module " + module)
}
tlvl := parseLogLevel(lvl)
wl.option.Level = tlvl
wl.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// GetMLogLevel get module log level
func GetMLogLevel(module string) string {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return ""
}
switch wl.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
| ", filename, err)
}
backendInfo := logging.NewLogBackend(info | conditional_block |
log.go | package log
import (
"errors"
"github.com/qjpcpu/filelog"
"github.com/qjpcpu/log/logging"
"io"
syslog "log"
"os"
"path/filepath"
"runtime/debug"
"strings"
"sync"
)
type moduleLoggers struct {
loggers map[string]*logWrapper
*sync.RWMutex
}
type logWrapper struct {
*logging.Logger
option *LogOption
leveldBackend logging.LeveledBackend
}
// package global variables
var (
mloggers *moduleLoggers
defaultLgr *logWrapper
)
const (
// NormFormat without color
NormFormat = "%{level} %{time:2006-01-02 15:04:05.000} %{shortfile} %{message}"
// DebugFormat with color
DebugFormat = "%{level} %{time:2006-01-02 15:04:05.000} grtid:%{goroutineid}/gcnt:%{goroutinecount} %{shortfile} %{message}"
// SimpleColorFormat simple format with color
SimpleColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32m%{message}\033[0m"
// DebugColorFormat with color
DebugColorFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05.000}\033[0m \033[0;34m%{shortfile}\033[0m \033[0;32mgrtid:%{goroutineid}/gcnt:%{goroutinecount}\033[0m %{message}"
// CliFormat simple format
CliFormat = "\033[1;33m%{level}\033[0m \033[1;36m%{time:2006-01-02 15:04:05}\033[0m \033[0;32m%{message}\033[0m"
)
// Level log level
type Level int
const (
// CRITICAL level
CRITICAL Level = iota + 1
// ERROR level
ERROR
// WARNING level
WARNING
// NOTICE level
NOTICE
// INFO level
INFO
// DEBUG level
DEBUG
)
func (lvl Level) loggingLevel() logging.Level {
return logging.Level(lvl - 1)
}
func parseLogLevel(lstr string) Level {
lstr = strings.ToLower(lstr)
switch lstr {
case "critical":
return CRITICAL
case "error":
return ERROR
case "warning":
return WARNING
case "notice":
return NOTICE
case "info":
return INFO
case "debug":
return DEBUG
default:
return INFO
}
}
// LogOption log config options
type LogOption struct {
LogFile string
Level Level
Format string
RotateType filelog.RotateType
CreateShortcut bool
ErrorLogFile string
files []io.WriteCloser
module string
}
// RotateType 轮转类型
type RotateType int
const (
// RotateDaily 按天轮转
RotateDaily RotateType = iota
// RotateHourly 按小时轮转
RotateHourly
// RotateWeekly 按周轮转
RotateWeekly
// RotateNone 不切割日志
RotateNone
)
// GetMBuilder module log builder
func GetMBuilder(m string) *LogOption {
opt := defaultLogOption()
opt.module = m
return &opt
}
// GetBuilder log builder
func GetBuilder() *LogOption {
opt := defaultLogOption()
return &opt
}
// SetFile set log file
func (lo *LogOption) SetFile(filename string) *LogOption {
lo.LogFile = filename
return lo
}
// SetLevel set log level
func (lo *LogOption) SetLevel(level string) *LogOption {
lo.Level = parseLogLevel(level)
return lo
}
// SetTypedLevel set log level
func (lo *LogOption) SetTypedLevel(level Level) *LogOption {
lo.Level = level
return lo
}
// SetFormat set log format
func (lo *LogOption) SetFormat(format string) *LogOption {
lo.Format = format
return lo
}
// SetRotate set rotate type default daily
func (lo *LogOption) SetRotate(rt RotateType) *LogOption {
lo.RotateType = filelog.RotateType(rt)
return lo
}
// SetShortcut whether create shorcut when rotate
func (lo *LogOption) SetShortcut(create bool) *LogOption {
lo.CreateShortcut = create
return lo
}
// SetErrorLog set error log suffix,default is wf
func (lo *LogOption) SetErrorLog(f string) *LogOption {
lo.ErrorLogFile = f
return lo
}
// Submit use this buider options
func (lo *LogOption) Submit() {
lgr := createLogger(lo)
if lo.module == "" {
defaultLgr = lgr
} else {
lgr.ExtraCalldepth--
mloggers.Lock()
defer mloggers.Unlock()
mloggers.loggers[lo.module] = lgr
}
}
// M module log
func M(m string) *logging.Logger {
mloggers.RLock()
defer mloggers.RUnlock()
return mloggers.loggers[m].Logger
}
func defaultLogOption() LogOption {
return LogOption{
Level: DEBUG,
Format: DebugColorFormat,
RotateType: filelog.RotateNone,
CreateShortcut: false,
module: "",
}
}
func init() {
mloggers = &moduleLoggers{
RWMutex: new(sync.RWMutex),
loggers: make(map[string]*logWrapper),
}
dopt := defaultLogOption()
defaultLgr = createLogger(&dopt)
}
func createLogger(opt *LogOption) *logWrapper {
if opt.Format == "" {
opt.Format = NormFormat
}
if opt.Level <= 0 {
opt.Level = INFO
}
lgr := logging.MustGetLogger(opt.module)
format := logging.MustStringFormatter(opt.Format)
var leveldBackend logging.LeveledBackend
if opt.LogFile != "" {
var backends []logging.LeveledBackend
// mkdir log dir
os.MkdirAll(filepath.Dir(opt.LogFile), 0777)
os.MkdirAll(filepath.Dir(opt.ErrorLogFile), 0777)
filename := opt.LogFile
infoLogFp, err := filelog.NewWriter(filename, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s] failed[%s]", filename, err)
}
backendInfo := logging.NewLogBackend(infoLogFp, "", 0)
backendInfoFormatter := logging.NewBackendFormatter(backendInfo, format)
backendInfoLeveld := logging.AddModuleLevel(backendInfoFormatter)
backendInfoLeveld.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backendInfoLeveld
backends = append(backends, backendInfoLeveld)
opt.files = append(opt.files, infoLogFp)
if opt.ErrorLogFile != "" && opt.ErrorLogFile != opt.LogFile {
errLogFp, err := filelog.NewWriter(opt.ErrorLogFile, func(fopt *filelog.Option) {
fopt.RotateType = opt.RotateType
fopt.CreateShortcut = opt.CreateShortcut
})
if err != nil {
syslog.Fatalf("open file[%s.wf] failed[%s]", filename, err)
}
backendErr := logging.NewLogBackend(errLogFp, "", 0)
backendErrFormatter := logging.NewBackendFormatter(backendErr, format)
backendErrLeveld := logging.AddModuleLevel(backendErrFormatter)
backendErrLeveld.SetLevel(logging.ERROR, "")
backends = append(backends, backendErrLeveld)
opt.files = append(opt.files, errLogFp)
}
var bl []logging.Backend
for _, lb := range backends {
bl = append(bl, lb)
}
ml := logging.MultiLogger(bl...)
lgr.SetBackend(ml)
} else {
backend1 := logging.NewLogBackend(os.Stderr, "", 0)
backend1Formatter := logging.NewBackendFormatter(backend1, format)
backend1Leveled := logging.AddModuleLevel(backend1Formatter)
backend1Leveled.SetLevel(opt.Level.loggingLevel(), "")
leveldBackend = backend1Leveled
lgr.SetBackend(backend1Leveled)
}
lgr.ExtraCalldepth++
return &logWrapper{Logger: lgr, option: opt, leveldBackend: leveldBackend}
}
// Infof write leveled log
func Infof(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(format, args...)
}
// Warningf write leveled log
func Warningf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(format, args...)
}
// Criticalf write leveled log
func Criticalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Criticalf(format, args...)
}
// Fatalf write leveled log
func Fatalf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(format, args...)
}
// Errorf write leveled log
func Errorf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(format, args...)
}
// Debugf write leveled log
func Debugf(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(format, args...)
}
// Noticef write leveled log
func Noticef(format string, args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(format, args...)
}
// Info write leveled log
func Info(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Infof(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Warning write leveled log
func Warning(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Warningf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Critical write leveled log
func Critical(args ...interface{}) {
if defaultLgr == nil {
return
}
defau | ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Fatalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Error write leveled log
func Error(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Errorf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Debug write leveled log
func Debug(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Debugf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Notice write leveld log
func Notice(args ...interface{}) {
if defaultLgr == nil {
return
}
defaultLgr.Noticef(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// MustNoErr panic when err occur, should only used in test
func MustNoErr(err error, desc ...string) {
if err != nil {
stackInfo := debug.Stack()
start := 0
count := 0
for i, ch := range stackInfo {
if ch == '\n' {
if count == 0 {
start = i
} else if count == 4 {
stackInfo = append(stackInfo[0:start+1], stackInfo[i+1:]...)
break
}
count++
}
}
var extra string
if len(desc) > 0 && desc[0] != "" {
extra = "[" + desc[0] + "]"
}
defaultLgr.Fatalf("%s%v\nMustNoErr fail, %s", extra, err, stackInfo)
}
}
// GetLogLevel default logger level
func GetLogLevel() string {
switch defaultLgr.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
// SetLogLevel default logger level
func SetLogLevel(lvl string) error {
tlvl := parseLogLevel(lvl)
defaultLgr.option.Level = tlvl
defaultLgr.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// SetMLogLevel set module log level
func SetMLogLevel(module, lvl string) error {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return errors.New("no such module " + module)
}
tlvl := parseLogLevel(lvl)
wl.option.Level = tlvl
wl.leveldBackend.SetLevel(tlvl.loggingLevel(), "")
return nil
}
// GetMLogLevel get module log level
func GetMLogLevel(module string) string {
mloggers.RLock()
defer mloggers.RUnlock()
wl, ok := mloggers.loggers[module]
if !ok {
return ""
}
switch wl.option.Level {
case CRITICAL:
return "critical"
case ERROR:
return "error"
case WARNING:
return "warning"
case NOTICE:
return "notice"
case INFO:
return "info"
case DEBUG:
return "debug"
default:
return ""
}
}
| ltLgr.Criticalf(strings.TrimSpace(strings.Repeat("%+v ", len(args))), args...)
}
// Fatal write leveled log
func Fatal(args | identifier_body |
appendlist.rs | use std::cell::{Cell, UnsafeCell};
use std::fmt::{self, Debug};
use std::iter::FromIterator;
use std::ops::Index;
use crate::common::{chunk_size, chunk_start, index_chunk};
/// A list that can be appended to while elements are borrowed
///
/// This looks like a fairly bare-bones list API, except that it has a `push`
/// method that works on non-`mut` lists. It is safe to hold references to
/// values inside this list and push a new value onto the end.
///
/// Additionally, the list has O(1) index and O(1) push (not amortized!).
///
/// For example, this would be illegal with a `Vec`:
///
/// ```
/// use appendlist::AppendList;
///
/// let list = AppendList::new();
///
/// list.push(1);
/// let first_item = &list[0];
/// list.push(2);
/// let second_item = &list[1];
///
/// assert_eq!(*first_item, list[0]);
/// assert_eq!(*second_item, list[1]);
/// ```
///
/// # Implementation details
///
/// This section is not necessary to use the API, it just describes the underlying
/// allocation and indexing strategies.
///
/// The list is a `Vec` of *chunks*. Each chunk is itself a `Vec<T>`. The list
/// will fill up a chunk, then allocate a new chunk with its full capacity.
/// Because the capacity of a given chunk never changes, the underlying `Vec<T>`
/// never reallocates, so references to that chunk are never invalidated. Each
/// chunk is twice the size of the previous chunk, so there will never be more
/// than O(log(n)) chunks.
///
/// Constant-time indexing is achieved because the chunk ID of a particular index
/// can be quickly calculated: if the first chunk has size c, index i will be
/// located in chunk floor(log2(i + c) - log2(c)). If c is a power of 2, this
/// is equivalent to floor(log2(i + c)) - floor(log2(c)), and a very fast floor
/// log2 algorithm can be derived from `usize::leading_zeros()`.
pub struct AppendList<T> {
chunks: UnsafeCell<Vec<Vec<T>>>,
len: Cell<usize>,
}
impl<T> AppendList<T> {
/// Wrapper to get the list of chunks immutably
fn chunks(&self) -> &[Vec<T>] {
unsafe { &*self.chunks.get() }
}
/// In test builds, check all of the unsafe invariants
///
/// In release builds, no-op
fn check_invariants(&self) |
/// Create a new `AppendList`
pub fn new() -> Self {
Self {
chunks: UnsafeCell::new(Vec::new()),
len: Cell::new(0),
}
}
/// Append an item to the end
///
/// Note that this does not require `mut`.
pub fn push(&self, item: T) {
self.check_invariants();
// Unsafe code alert!
//
// Preserve the following invariants:
// - Only the last chunk may be modified
// - A chunk cannot ever be reallocated
// - len must reflect the length
//
// Invariants are checked in the check_invariants method
let mut_chunks = unsafe { &mut *self.chunks.get() };
let new_index = self.len.get();
let chunk_id = index_chunk(new_index);
if chunk_id < mut_chunks.len() {
// We should always be inserting into the last chunk
debug_assert_eq!(chunk_id, mut_chunks.len() - 1);
// Insert into the appropriate chunk
let chunk = &mut mut_chunks[chunk_id];
// The chunk must not be reallocated! Save the pre-insertion capacity
// so we can check it later (debug builds only)
#[cfg(test)]
let prev_capacity = chunk.capacity();
// Do the insertion
chunk.push(item);
// Check that the capacity didn't change (debug builds only)
#[cfg(test)]
assert_eq!(prev_capacity, chunk.capacity());
} else {
// Need to allocate a new chunk
// New chunk should be the immediate next chunk
debug_assert_eq!(chunk_id, mut_chunks.len());
// New chunk must be big enough
let mut new_chunk = Vec::with_capacity(chunk_size(chunk_id));
debug_assert!(new_chunk.capacity() >= chunk_size(chunk_id));
new_chunk.push(item);
mut_chunks.push(new_chunk);
}
// Increment the length
self.len.set(self.len.get() + 1);
self.check_invariants();
}
/// Get the length of the list
pub fn len(&self) -> usize {
self.check_invariants();
self.len.get()
}
/// Get an item from the list, if it is in bounds
///
/// Returns `None` if the `index` is out-of-bounds. Note that you can also
/// index with `[]`, which will panic on out-of-bounds.
pub fn get(&self, index: usize) -> Option<&T> {
self.check_invariants();
if index >= self.len() {
return None;
}
let chunk_id = index_chunk(index);
let chunk_start = chunk_start(chunk_id);
return Some(&self.chunks()[chunk_id][index - chunk_start]);
}
/// Get an iterator over the list
pub fn iter(&self) -> Iter<T> {
self.check_invariants();
Iter {
list: &self,
index: 0,
}
}
}
impl<T> Default for AppendList<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> Index<usize> for AppendList<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index)
.expect("AppendList indexed beyond its length")
}
}
impl<T> FromIterator<T> for AppendList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let list = Self::new();
for item in iter {
list.push(item);
}
list
}
}
impl<'l, T> IntoIterator for &'l AppendList<T> {
type Item = &'l T;
type IntoIter = Iter<'l, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<T: PartialEq> PartialEq for AppendList<T> {
fn eq(&self, other: &AppendList<T>) -> bool {
let mut s = self.iter();
let mut o = other.iter();
loop {
match (s.next(), o.next()) {
(Some(a), Some(b)) if a == b => {},
(None, None) => return true,
_ => return false,
}
}
}
}
impl<T: Debug> Debug for AppendList<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
pub struct Iter<'l, T> {
list: &'l AppendList<T>,
index: usize,
}
impl<'l, T> Iterator for Iter<'l, T> {
type Item = &'l T;
fn next(&mut self) -> Option<Self::Item> {
let item = self.list.get(self.index);
self.index += 1;
item
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.list.len() - self.index;
(remaining, Some(remaining))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn from_iterator() {
let l: AppendList<i32> = (0..100).collect();
for i in 0..100 {
assert_eq!(l[i], i as i32);
}
}
#[test]
fn iterator() {
let l: AppendList<i32> = (0..100).collect();
let mut i1 = l.iter();
let mut i2 = l.into_iter();
for item in 0..100 {
assert_eq!(i1.next(), Some(&item));
assert_eq!(i2.next(), Some(&item));
}
assert_eq!(i1.next(), None);
assert_eq!(i2.next(), None);
}
#[test]
fn equality() {
let a = AppendList::new();
let b = AppendList::new();
assert_eq!(a, b);
a.push("foo");
assert_ne!(a, b);
b.push("foo");
assert_eq!(a, b);
a.push("bar");
a.push("baz");
assert_ne!(a, b);
}
#[test]
fn iterator_size_hint() {
let l: AppendList<i32> = AppendList::new();
let mut i = l.iter();
assert_eq!(i.size_hint(), (0, Some(0)));
l.push(1);
assert_eq!(i.size_hint(), (1, Some(1)));
l.push(2);
assert_eq!(i.size_hint(), (2, Some(2)));
i.next();
assert_eq!(i.size_hint(), (1, Some(1)));
l.push(3);
assert_eq!(i.size_hint(), (2, Some(2)));
i.next();
assert_eq!(i.size_hint(), (1, Some(1)));
i.next();
assert_eq!(i.size_hint(), (0, Some(0)));
}
#[test]
fn empty_list() {
let n: AppendList<usize> = AppendList::new();
assert_eq!(n.len(), 0);
assert_eq!(n.get(0), None);
let d: AppendList<usize> = AppendList::default();
assert_eq!(d.len(), 0);
assert_eq!(d.get(0), None);
}
#[test]
fn thousand_item_list() {
test_big_list(1_000);
}
#[test]
#[ignore]
fn million_item_list() {
test_big_list(1_000_000);
}
fn test_big_list(size: usize) {
let l = AppendList::new();
let mut refs = Vec::new();
for i in 0..size {
assert_eq!(l.len(), i);
l.push(i);
refs.push(l[i]);
assert_eq!(l.len(), i + 1);
}
for i in 0..size {
assert_eq!(Some(&refs[i]), l.get(i));
}
}
}
| {
#[cfg(test)]
{
if self.len.get() > 0 {
// Correct number of chunks
assert_eq!(index_chunk(self.len.get() - 1), self.chunks().len() - 1);
// Every chunk holds enough items
for chunk_id in 0..self.chunks().len() {
assert!(chunk_size(chunk_id) <= self.chunks()[chunk_id].capacity());
}
// Intermediate chunks are full
for chunk_id in 0..self.chunks().len() - 1 {
assert_eq!(chunk_size(chunk_id), self.chunks()[chunk_id].len());
}
// Last chunk is correct length
assert_eq!(
self.chunks().last().unwrap().len(),
self.len.get() - chunk_start(self.chunks().len() - 1)
);
} else {
// No chunks
assert_eq!(0, self.chunks().len());
}
}
} | identifier_body |
appendlist.rs | use std::cell::{Cell, UnsafeCell};
use std::fmt::{self, Debug};
use std::iter::FromIterator;
use std::ops::Index;
use crate::common::{chunk_size, chunk_start, index_chunk};
/// A list that can be appended to while elements are borrowed
///
/// This looks like a fairly bare-bones list API, except that it has a `push`
/// method that works on non-`mut` lists. It is safe to hold references to
/// values inside this list and push a new value onto the end.
///
/// Additionally, the list has O(1) index and O(1) push (not amortized!).
///
/// For example, this would be illegal with a `Vec`:
///
/// ```
/// use appendlist::AppendList;
///
/// let list = AppendList::new();
///
/// list.push(1);
/// let first_item = &list[0];
/// list.push(2);
/// let second_item = &list[1];
///
/// assert_eq!(*first_item, list[0]);
/// assert_eq!(*second_item, list[1]);
/// ```
///
/// # Implementation details
///
/// This section is not necessary to use the API, it just describes the underlying
/// allocation and indexing strategies.
///
/// The list is a `Vec` of *chunks*. Each chunk is itself a `Vec<T>`. The list
/// will fill up a chunk, then allocate a new chunk with its full capacity.
/// Because the capacity of a given chunk never changes, the underlying `Vec<T>`
/// never reallocates, so references to that chunk are never invalidated. Each
/// chunk is twice the size of the previous chunk, so there will never be more
/// than O(log(n)) chunks.
///
/// Constant-time indexing is achieved because the chunk ID of a particular index
/// can be quickly calculated: if the first chunk has size c, index i will be
/// located in chunk floor(log2(i + c) - log2(c)). If c is a power of 2, this
/// is equivalent to floor(log2(i + c)) - floor(log2(c)), and a very fast floor
/// log2 algorithm can be derived from `usize::leading_zeros()`.
pub struct AppendList<T> {
chunks: UnsafeCell<Vec<Vec<T>>>,
len: Cell<usize>,
}
impl<T> AppendList<T> {
/// Wrapper to get the list of chunks immutably
fn chunks(&self) -> &[Vec<T>] {
unsafe { &*self.chunks.get() }
}
/// In test builds, check all of the unsafe invariants
///
/// In release builds, no-op
fn check_invariants(&self) {
#[cfg(test)]
{
if self.len.get() > 0 {
// Correct number of chunks
assert_eq!(index_chunk(self.len.get() - 1), self.chunks().len() - 1);
// Every chunk holds enough items
for chunk_id in 0..self.chunks().len() {
assert!(chunk_size(chunk_id) <= self.chunks()[chunk_id].capacity());
}
// Intermediate chunks are full
for chunk_id in 0..self.chunks().len() - 1 {
assert_eq!(chunk_size(chunk_id), self.chunks()[chunk_id].len());
}
// Last chunk is correct length
assert_eq!(
self.chunks().last().unwrap().len(),
self.len.get() - chunk_start(self.chunks().len() - 1)
);
} else {
// No chunks
assert_eq!(0, self.chunks().len());
}
}
}
/// Create a new `AppendList`
pub fn new() -> Self {
Self {
chunks: UnsafeCell::new(Vec::new()),
len: Cell::new(0),
}
}
/// Append an item to the end
///
/// Note that this does not require `mut`.
pub fn push(&self, item: T) {
self.check_invariants();
// Unsafe code alert!
//
// Preserve the following invariants:
// - Only the last chunk may be modified
// - A chunk cannot ever be reallocated
// - len must reflect the length
//
// Invariants are checked in the check_invariants method
let mut_chunks = unsafe { &mut *self.chunks.get() };
let new_index = self.len.get();
let chunk_id = index_chunk(new_index);
if chunk_id < mut_chunks.len() {
// We should always be inserting into the last chunk
debug_assert_eq!(chunk_id, mut_chunks.len() - 1);
// Insert into the appropriate chunk
let chunk = &mut mut_chunks[chunk_id];
// The chunk must not be reallocated! Save the pre-insertion capacity
// so we can check it later (debug builds only)
#[cfg(test)]
let prev_capacity = chunk.capacity();
// Do the insertion
chunk.push(item);
// Check that the capacity didn't change (debug builds only)
#[cfg(test)]
assert_eq!(prev_capacity, chunk.capacity());
} else {
// Need to allocate a new chunk
// New chunk should be the immediate next chunk
debug_assert_eq!(chunk_id, mut_chunks.len());
// New chunk must be big enough
let mut new_chunk = Vec::with_capacity(chunk_size(chunk_id));
debug_assert!(new_chunk.capacity() >= chunk_size(chunk_id));
new_chunk.push(item);
mut_chunks.push(new_chunk);
}
// Increment the length
self.len.set(self.len.get() + 1);
self.check_invariants();
}
/// Get the length of the list
pub fn | (&self) -> usize {
self.check_invariants();
self.len.get()
}
/// Get an item from the list, if it is in bounds
///
/// Returns `None` if the `index` is out-of-bounds. Note that you can also
/// index with `[]`, which will panic on out-of-bounds.
pub fn get(&self, index: usize) -> Option<&T> {
self.check_invariants();
if index >= self.len() {
return None;
}
let chunk_id = index_chunk(index);
let chunk_start = chunk_start(chunk_id);
return Some(&self.chunks()[chunk_id][index - chunk_start]);
}
/// Get an iterator over the list
pub fn iter(&self) -> Iter<T> {
self.check_invariants();
Iter {
list: &self,
index: 0,
}
}
}
impl<T> Default for AppendList<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> Index<usize> for AppendList<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index)
.expect("AppendList indexed beyond its length")
}
}
impl<T> FromIterator<T> for AppendList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let list = Self::new();
for item in iter {
list.push(item);
}
list
}
}
impl<'l, T> IntoIterator for &'l AppendList<T> {
type Item = &'l T;
type IntoIter = Iter<'l, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<T: PartialEq> PartialEq for AppendList<T> {
fn eq(&self, other: &AppendList<T>) -> bool {
let mut s = self.iter();
let mut o = other.iter();
loop {
match (s.next(), o.next()) {
(Some(a), Some(b)) if a == b => {},
(None, None) => return true,
_ => return false,
}
}
}
}
impl<T: Debug> Debug for AppendList<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
pub struct Iter<'l, T> {
list: &'l AppendList<T>,
index: usize,
}
impl<'l, T> Iterator for Iter<'l, T> {
type Item = &'l T;
fn next(&mut self) -> Option<Self::Item> {
let item = self.list.get(self.index);
self.index += 1;
item
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.list.len() - self.index;
(remaining, Some(remaining))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn from_iterator() {
let l: AppendList<i32> = (0..100).collect();
for i in 0..100 {
assert_eq!(l[i], i as i32);
}
}
#[test]
fn iterator() {
let l: AppendList<i32> = (0..100).collect();
let mut i1 = l.iter();
let mut i2 = l.into_iter();
for item in 0..100 {
assert_eq!(i1.next(), Some(&item));
assert_eq!(i2.next(), Some(&item));
}
assert_eq!(i1.next(), None);
assert_eq!(i2.next(), None);
}
#[test]
fn equality() {
let a = AppendList::new();
let b = AppendList::new();
assert_eq!(a, b);
a.push("foo");
assert_ne!(a, b);
b.push("foo");
assert_eq!(a, b);
a.push("bar");
a.push("baz");
assert_ne!(a, b);
}
#[test]
fn iterator_size_hint() {
let l: AppendList<i32> = AppendList::new();
let mut i = l.iter();
assert_eq!(i.size_hint(), (0, Some(0)));
l.push(1);
assert_eq!(i.size_hint(), (1, Some(1)));
l.push(2);
assert_eq!(i.size_hint(), (2, Some(2)));
i.next();
assert_eq!(i.size_hint(), (1, Some(1)));
l.push(3);
assert_eq!(i.size_hint(), (2, Some(2)));
i.next();
assert_eq!(i.size_hint(), (1, Some(1)));
i.next();
assert_eq!(i.size_hint(), (0, Some(0)));
}
#[test]
fn empty_list() {
let n: AppendList<usize> = AppendList::new();
assert_eq!(n.len(), 0);
assert_eq!(n.get(0), None);
let d: AppendList<usize> = AppendList::default();
assert_eq!(d.len(), 0);
assert_eq!(d.get(0), None);
}
#[test]
fn thousand_item_list() {
test_big_list(1_000);
}
#[test]
#[ignore]
fn million_item_list() {
test_big_list(1_000_000);
}
fn test_big_list(size: usize) {
let l = AppendList::new();
let mut refs = Vec::new();
for i in 0..size {
assert_eq!(l.len(), i);
l.push(i);
refs.push(l[i]);
assert_eq!(l.len(), i + 1);
}
for i in 0..size {
assert_eq!(Some(&refs[i]), l.get(i));
}
}
}
| len | identifier_name |
appendlist.rs | use std::cell::{Cell, UnsafeCell};
use std::fmt::{self, Debug};
use std::iter::FromIterator;
use std::ops::Index;
use crate::common::{chunk_size, chunk_start, index_chunk};
/// A list that can be appended to while elements are borrowed
///
/// This looks like a fairly bare-bones list API, except that it has a `push`
/// method that works on non-`mut` lists. It is safe to hold references to
/// values inside this list and push a new value onto the end.
///
/// Additionally, the list has O(1) index and O(1) push (not amortized!).
///
/// For example, this would be illegal with a `Vec`:
///
/// ```
/// use appendlist::AppendList;
///
/// let list = AppendList::new();
///
/// list.push(1);
/// let first_item = &list[0];
/// list.push(2);
/// let second_item = &list[1];
///
/// assert_eq!(*first_item, list[0]);
/// assert_eq!(*second_item, list[1]);
/// ```
///
/// # Implementation details
///
/// This section is not necessary to use the API, it just describes the underlying
/// allocation and indexing strategies.
///
/// The list is a `Vec` of *chunks*. Each chunk is itself a `Vec<T>`. The list
/// will fill up a chunk, then allocate a new chunk with its full capacity.
/// Because the capacity of a given chunk never changes, the underlying `Vec<T>`
/// never reallocates, so references to that chunk are never invalidated. Each
/// chunk is twice the size of the previous chunk, so there will never be more
/// than O(log(n)) chunks.
///
/// Constant-time indexing is achieved because the chunk ID of a particular index
/// can be quickly calculated: if the first chunk has size c, index i will be
/// located in chunk floor(log2(i + c) - log2(c)). If c is a power of 2, this
/// is equivalent to floor(log2(i + c)) - floor(log2(c)), and a very fast floor
/// log2 algorithm can be derived from `usize::leading_zeros()`.
pub struct AppendList<T> {
chunks: UnsafeCell<Vec<Vec<T>>>,
len: Cell<usize>,
}
impl<T> AppendList<T> {
/// Wrapper to get the list of chunks immutably
fn chunks(&self) -> &[Vec<T>] {
unsafe { &*self.chunks.get() }
}
/// In test builds, check all of the unsafe invariants
///
/// In release builds, no-op
fn check_invariants(&self) {
#[cfg(test)]
{
if self.len.get() > 0 {
// Correct number of chunks
assert_eq!(index_chunk(self.len.get() - 1), self.chunks().len() - 1);
// Every chunk holds enough items
for chunk_id in 0..self.chunks().len() {
assert!(chunk_size(chunk_id) <= self.chunks()[chunk_id].capacity());
}
// Intermediate chunks are full
for chunk_id in 0..self.chunks().len() - 1 {
assert_eq!(chunk_size(chunk_id), self.chunks()[chunk_id].len());
}
// Last chunk is correct length
assert_eq!(
self.chunks().last().unwrap().len(),
self.len.get() - chunk_start(self.chunks().len() - 1)
);
} else {
// No chunks
assert_eq!(0, self.chunks().len());
}
}
}
/// Create a new `AppendList`
pub fn new() -> Self {
Self {
chunks: UnsafeCell::new(Vec::new()),
len: Cell::new(0),
}
}
/// Append an item to the end
///
/// Note that this does not require `mut`.
pub fn push(&self, item: T) {
self.check_invariants();
// Unsafe code alert!
//
// Preserve the following invariants:
// - Only the last chunk may be modified
// - A chunk cannot ever be reallocated
// - len must reflect the length
//
// Invariants are checked in the check_invariants method
let mut_chunks = unsafe { &mut *self.chunks.get() };
let new_index = self.len.get();
let chunk_id = index_chunk(new_index);
if chunk_id < mut_chunks.len() {
// We should always be inserting into the last chunk
debug_assert_eq!(chunk_id, mut_chunks.len() - 1);
// Insert into the appropriate chunk
let chunk = &mut mut_chunks[chunk_id];
// The chunk must not be reallocated! Save the pre-insertion capacity
// so we can check it later (debug builds only)
#[cfg(test)]
let prev_capacity = chunk.capacity();
// Do the insertion
chunk.push(item);
// Check that the capacity didn't change (debug builds only)
#[cfg(test)]
assert_eq!(prev_capacity, chunk.capacity());
} else {
// Need to allocate a new chunk
// New chunk should be the immediate next chunk
debug_assert_eq!(chunk_id, mut_chunks.len());
// New chunk must be big enough
let mut new_chunk = Vec::with_capacity(chunk_size(chunk_id));
debug_assert!(new_chunk.capacity() >= chunk_size(chunk_id));
new_chunk.push(item);
mut_chunks.push(new_chunk);
}
// Increment the length
self.len.set(self.len.get() + 1);
self.check_invariants();
}
/// Get the length of the list
pub fn len(&self) -> usize {
self.check_invariants();
self.len.get()
}
/// Get an item from the list, if it is in bounds
///
/// Returns `None` if the `index` is out-of-bounds. Note that you can also
/// index with `[]`, which will panic on out-of-bounds.
pub fn get(&self, index: usize) -> Option<&T> {
self.check_invariants();
if index >= self.len() {
return None;
}
let chunk_id = index_chunk(index);
let chunk_start = chunk_start(chunk_id);
return Some(&self.chunks()[chunk_id][index - chunk_start]);
}
/// Get an iterator over the list
pub fn iter(&self) -> Iter<T> {
self.check_invariants();
Iter {
list: &self,
index: 0,
}
}
}
impl<T> Default for AppendList<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> Index<usize> for AppendList<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index)
.expect("AppendList indexed beyond its length")
}
}
impl<T> FromIterator<T> for AppendList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let list = Self::new();
for item in iter {
list.push(item);
}
list
}
}
impl<'l, T> IntoIterator for &'l AppendList<T> {
type Item = &'l T;
type IntoIter = Iter<'l, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<T: PartialEq> PartialEq for AppendList<T> {
fn eq(&self, other: &AppendList<T>) -> bool {
let mut s = self.iter();
let mut o = other.iter();
loop {
match (s.next(), o.next()) {
(Some(a), Some(b)) if a == b => {},
(None, None) => return true,
_ => return false,
}
}
}
}
impl<T: Debug> Debug for AppendList<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_list().entries(self.iter()).finish()
}
}
pub struct Iter<'l, T> {
list: &'l AppendList<T>, | impl<'l, T> Iterator for Iter<'l, T> {
type Item = &'l T;
fn next(&mut self) -> Option<Self::Item> {
let item = self.list.get(self.index);
self.index += 1;
item
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.list.len() - self.index;
(remaining, Some(remaining))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn from_iterator() {
let l: AppendList<i32> = (0..100).collect();
for i in 0..100 {
assert_eq!(l[i], i as i32);
}
}
#[test]
fn iterator() {
let l: AppendList<i32> = (0..100).collect();
let mut i1 = l.iter();
let mut i2 = l.into_iter();
for item in 0..100 {
assert_eq!(i1.next(), Some(&item));
assert_eq!(i2.next(), Some(&item));
}
assert_eq!(i1.next(), None);
assert_eq!(i2.next(), None);
}
#[test]
fn equality() {
let a = AppendList::new();
let b = AppendList::new();
assert_eq!(a, b);
a.push("foo");
assert_ne!(a, b);
b.push("foo");
assert_eq!(a, b);
a.push("bar");
a.push("baz");
assert_ne!(a, b);
}
#[test]
fn iterator_size_hint() {
let l: AppendList<i32> = AppendList::new();
let mut i = l.iter();
assert_eq!(i.size_hint(), (0, Some(0)));
l.push(1);
assert_eq!(i.size_hint(), (1, Some(1)));
l.push(2);
assert_eq!(i.size_hint(), (2, Some(2)));
i.next();
assert_eq!(i.size_hint(), (1, Some(1)));
l.push(3);
assert_eq!(i.size_hint(), (2, Some(2)));
i.next();
assert_eq!(i.size_hint(), (1, Some(1)));
i.next();
assert_eq!(i.size_hint(), (0, Some(0)));
}
#[test]
fn empty_list() {
let n: AppendList<usize> = AppendList::new();
assert_eq!(n.len(), 0);
assert_eq!(n.get(0), None);
let d: AppendList<usize> = AppendList::default();
assert_eq!(d.len(), 0);
assert_eq!(d.get(0), None);
}
#[test]
fn thousand_item_list() {
test_big_list(1_000);
}
#[test]
#[ignore]
fn million_item_list() {
test_big_list(1_000_000);
}
fn test_big_list(size: usize) {
let l = AppendList::new();
let mut refs = Vec::new();
for i in 0..size {
assert_eq!(l.len(), i);
l.push(i);
refs.push(l[i]);
assert_eq!(l.len(), i + 1);
}
for i in 0..size {
assert_eq!(Some(&refs[i]), l.get(i));
}
}
} | index: usize,
}
| random_line_split |
unified.rs | use std::cmp;
use std::collections::HashSet;
use std::convert::{TryFrom, TryInto};
use std::error::Error;
use std::fmt;
use std::io::Write;
use zcash_encoding::CompactSize;
use crate::kind;
/// The HRP for a Bech32m-encoded mainnet Unified Address.
///
/// Defined in [ZIP 316][zip-0316].
///
/// [zip-0316]: https://zips.z.cash/zip-0316
pub(crate) const MAINNET: &str = "u";
/// The HRP for a Bech32m-encoded testnet Unified Address.
///
/// Defined in [ZIP 316][zip-0316].
///
/// [zip-0316]: https://zips.z.cash/zip-0316
pub(crate) const TESTNET: &str = "utest";
/// The HRP for a Bech32m-encoded regtest Unified Address.
pub(crate) const REGTEST: &str = "uregtest";
const PADDING_LEN: usize = 16;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Typecode {
P2pkh,
P2sh,
Sapling,
Orchard,
Unknown(u32),
}
impl Ord for Typecode {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match (self, other) {
// Trivial equality checks.
(Self::Orchard, Self::Orchard)
| (Self::Sapling, Self::Sapling)
| (Self::P2sh, Self::P2sh)
| (Self::P2pkh, Self::P2pkh) => cmp::Ordering::Equal,
// We don't know for certain the preference order of unknown receivers, but it
// is likely that the higher typecode has higher preference. The exact order
// doesn't really matter, as unknown receivers have lower preference than
// known receivers.
(Self::Unknown(a), Self::Unknown(b)) => b.cmp(a),
// For the remaining cases, we rely on `match` always choosing the first arm
// with a matching pattern. Patterns below are listed in priority order:
(Self::Orchard, _) => cmp::Ordering::Less,
(_, Self::Orchard) => cmp::Ordering::Greater,
(Self::Sapling, _) => cmp::Ordering::Less,
(_, Self::Sapling) => cmp::Ordering::Greater,
(Self::P2sh, _) => cmp::Ordering::Less,
(_, Self::P2sh) => cmp::Ordering::Greater,
(Self::P2pkh, _) => cmp::Ordering::Less,
(_, Self::P2pkh) => cmp::Ordering::Greater,
}
}
}
impl PartialOrd for Typecode {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl TryFrom<u32> for Typecode {
type Error = ParseError;
fn try_from(typecode: u32) -> Result<Self, Self::Error> {
match typecode {
0x00 => Ok(Typecode::P2pkh),
0x01 => Ok(Typecode::P2sh),
0x02 => Ok(Typecode::Sapling),
0x03 => Ok(Typecode::Orchard),
0x04..=0x02000000 => Ok(Typecode::Unknown(typecode)),
0x02000001..=u32::MAX => Err(ParseError::InvalidTypecodeValue(typecode as u64)),
}
}
}
impl From<Typecode> for u32 {
fn from(t: Typecode) -> Self {
match t {
Typecode::P2pkh => 0x00,
Typecode::P2sh => 0x01,
Typecode::Sapling => 0x02,
Typecode::Orchard => 0x03,
Typecode::Unknown(typecode) => typecode,
}
}
}
impl Typecode {
fn is_transparent(&self) -> bool {
// Unknown typecodes are treated as not transparent for the purpose of disallowing
// only-transparent UAs, which can be represented with existing address encodings.
matches!(self, Typecode::P2pkh | Typecode::P2sh)
}
}
/// An error while attempting to parse a string as a Zcash address.
#[derive(Debug, PartialEq)]
pub enum ParseError {
/// The unified address contains both P2PKH and P2SH receivers.
BothP2phkAndP2sh,
/// The unified address contains a duplicated typecode.
DuplicateTypecode(Typecode),
/// The parsed typecode exceeds the maximum allowed CompactSize value.
InvalidTypecodeValue(u64),
/// The string is an invalid encoding.
InvalidEncoding(String),
/// The unified address only contains transparent receivers.
OnlyTransparent,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseError::BothP2phkAndP2sh => write!(f, "UA contains both P2PKH and P2SH receivers"),
ParseError::DuplicateTypecode(c) => write!(f, "Duplicate typecode {}", u32::from(*c)),
ParseError::InvalidTypecodeValue(v) => write!(f, "Typecode value out of range {}", v),
ParseError::InvalidEncoding(msg) => write!(f, "Invalid encoding: {}", msg),
ParseError::OnlyTransparent => write!(f, "UA only contains transparent receivers"),
}
}
}
impl Error for ParseError {}
/// The set of known Receivers for Unified Addresses.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Receiver {
Orchard([u8; 43]),
Sapling(kind::sapling::Data),
P2pkh(kind::p2pkh::Data),
P2sh(kind::p2sh::Data),
Unknown { typecode: u32, data: Vec<u8> },
}
impl cmp::Ord for Receiver {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match self.typecode().cmp(&other.typecode()) {
cmp::Ordering::Equal => self.addr().cmp(other.addr()),
res => res,
}
}
}
impl cmp::PartialOrd for Receiver {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl TryFrom<(u32, &[u8])> for Receiver {
type Error = ParseError;
fn try_from((typecode, addr): (u32, &[u8])) -> Result<Self, Self::Error> {
match typecode.try_into()? {
Typecode::P2pkh => addr.try_into().map(Receiver::P2pkh),
Typecode::P2sh => addr.try_into().map(Receiver::P2sh),
Typecode::Sapling => addr.try_into().map(Receiver::Sapling),
Typecode::Orchard => addr.try_into().map(Receiver::Orchard),
Typecode::Unknown(_) => Ok(Receiver::Unknown {
typecode,
data: addr.to_vec(),
}),
}
.map_err(|e| {
ParseError::InvalidEncoding(format!("Invalid address for typecode {}: {}", typecode, e))
})
}
}
impl Receiver {
fn typecode(&self) -> Typecode {
match self {
Receiver::P2pkh(_) => Typecode::P2pkh,
Receiver::P2sh(_) => Typecode::P2sh,
Receiver::Sapling(_) => Typecode::Sapling,
Receiver::Orchard(_) => Typecode::Orchard,
Receiver::Unknown { typecode, .. } => Typecode::Unknown(*typecode),
}
}
fn addr(&self) -> &[u8] {
match self {
Receiver::P2pkh(data) => data,
Receiver::P2sh(data) => data,
Receiver::Sapling(data) => data,
Receiver::Orchard(data) => data,
Receiver::Unknown { data, .. } => data,
}
}
}
/// A Unified Address.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Address(pub(crate) Vec<Receiver>);
impl TryFrom<(&str, &[u8])> for Address {
type Error = ParseError;
fn try_from((hrp, buf): (&str, &[u8])) -> Result<Self, Self::Error> {
fn read_receiver(mut cursor: &mut std::io::Cursor<&[u8]>) -> Result<Receiver, ParseError> {
let typecode = CompactSize::read(&mut cursor)
.map(|v| u32::try_from(v).expect("CompactSize::read enforces MAX_SIZE limit"))
.map_err(|e| {
ParseError::InvalidEncoding(format!(
"Failed to deserialize CompactSize-encoded typecode {}",
e
))
})?;
let length = CompactSize::read(&mut cursor).map_err(|e| {
ParseError::InvalidEncoding(format!(
"Failed to deserialize CompactSize-encoded length {}",
e
))
})?;
let addr_end = cursor.position().checked_add(length).ok_or_else(|| {
ParseError::InvalidEncoding(format!(
"Length value {} caused an overflow error",
length
))
})?;
let buf = cursor.get_ref();
if (buf.len() as u64) < addr_end {
return Err(ParseError::InvalidEncoding(format!(
"Truncated: unable to read {} bytes of address data",
length
)));
}
let result = Receiver::try_from((
typecode,
&buf[cursor.position() as usize..addr_end as usize],
));
cursor.set_position(addr_end);
result
}
let encoded = f4jumble::f4jumble_inv(buf)
.ok_or_else(|| ParseError::InvalidEncoding("F4Jumble decoding failed".to_owned()))?;
// Validate and strip trailing padding bytes.
if hrp.len() > 16 {
return Err(ParseError::InvalidEncoding(
"Invalid human-readable part".to_owned(),
));
}
let mut expected_padding = [0; PADDING_LEN];
expected_padding[0..hrp.len()].copy_from_slice(hrp.as_bytes());
let encoded = match encoded.split_at(encoded.len() - PADDING_LEN) {
(encoded, tail) if tail == expected_padding => Ok(encoded),
_ => Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned(),
)),
}?;
let mut cursor = std::io::Cursor::new(encoded);
let mut result = vec![];
while cursor.position() < encoded.len().try_into().unwrap() {
result.push(read_receiver(&mut cursor)?);
}
assert_eq!(cursor.position(), encoded.len().try_into().unwrap());
result.try_into()
}
}
impl TryFrom<Vec<Receiver>> for Address {
type Error = ParseError;
fn try_from(receivers: Vec<Receiver>) -> Result<Self, Self::Error> {
let mut typecodes = HashSet::with_capacity(receivers.len());
for receiver in &receivers {
let t = receiver.typecode();
if typecodes.contains(&t) {
return Err(ParseError::DuplicateTypecode(t));
} else if (t == Typecode::P2pkh && typecodes.contains(&Typecode::P2sh))
|| (t == Typecode::P2sh && typecodes.contains(&Typecode::P2pkh))
| else {
typecodes.insert(t);
}
}
if typecodes.iter().all(|t| t.is_transparent()) {
Err(ParseError::OnlyTransparent)
} else {
// All checks pass!
Ok(Address(receivers))
}
}
}
impl Address {
/// Returns the raw encoding of this Unified Address.
pub(crate) fn to_bytes(&self, hrp: &str) -> Vec<u8> {
assert!(hrp.len() <= PADDING_LEN);
let mut writer = std::io::Cursor::new(Vec::new());
for receiver in &self.0 {
let addr = receiver.addr();
CompactSize::write(
&mut writer,
<u32>::from(receiver.typecode()).try_into().unwrap(),
)
.unwrap();
CompactSize::write(&mut writer, addr.len()).unwrap();
writer.write_all(addr).unwrap();
}
let mut padding = [0u8; PADDING_LEN];
padding[0..hrp.len()].copy_from_slice(&hrp.as_bytes());
writer.write_all(&padding).unwrap();
f4jumble::f4jumble(&writer.into_inner()).unwrap()
}
/// Returns the receivers contained within this address, sorted in preference order.
pub fn receivers(&self) -> Vec<Receiver> {
let mut receivers = self.0.clone();
// Unstable sorting is fine, because all receivers are guaranteed by construction
// to have distinct typecodes.
receivers.sort_unstable_by_key(|r| r.typecode());
receivers
}
/// Returns the receivers contained within this address, in the order they were
/// parsed from the string encoding.
///
/// This API is for advanced usage; in most cases you should use `Address::receivers`.
pub fn receivers_as_parsed(&self) -> &[Receiver] {
&self.0
}
}
#[cfg(test)]
pub(crate) mod test_vectors;
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use std::convert::TryFrom;
use proptest::{
array::{uniform11, uniform20, uniform32},
prelude::*,
};
use super::{Address, ParseError, Receiver, Typecode, MAINNET, REGTEST, TESTNET};
prop_compose! {
fn uniform43()(a in uniform11(0u8..), b in uniform32(0u8..)) -> [u8; 43] {
let mut c = [0; 43];
c[..11].copy_from_slice(&a);
c[11..].copy_from_slice(&b);
c
}
}
fn arb_shielded_receiver() -> BoxedStrategy<Receiver> {
prop_oneof![
uniform43().prop_map(Receiver::Sapling),
uniform43().prop_map(Receiver::Orchard),
]
.boxed()
}
fn arb_transparent_receiver() -> BoxedStrategy<Receiver> {
prop_oneof![
uniform20(0u8..).prop_map(Receiver::P2pkh),
uniform20(0u8..).prop_map(Receiver::P2sh),
]
.boxed()
}
prop_compose! {
fn arb_unified_address()(
shielded in prop::collection::hash_set(arb_shielded_receiver(), 1..2),
transparent in prop::option::of(arb_transparent_receiver()),
) -> Address {
Address(shielded.into_iter().chain(transparent).collect())
}
}
proptest! {
#[test]
fn ua_roundtrip(
hrp in prop_oneof![MAINNET, TESTNET, REGTEST],
ua in arb_unified_address(),
) {
let bytes = ua.to_bytes(&hrp);
let decoded = Address::try_from((hrp.as_str(), &bytes[..]));
prop_assert_eq!(decoded, Ok(ua));
}
}
#[test]
fn padding() {
// The test cases below use `Address(vec![Receiver::Orchard([1; 43])])` as base.
// Invalid padding ([0xff; 16] instead of [b'u', 0x00, 0x00, 0x00...])
let invalid_padding = [
0xe6, 0x59, 0xd1, 0xed, 0xf7, 0x4b, 0xe3, 0x5e, 0x5a, 0x54, 0x0e, 0x41, 0x5d, 0x2f,
0x0c, 0x0d, 0x33, 0x42, 0xbd, 0xbe, 0x9f, 0x82, 0x62, 0x01, 0xc1, 0x1b, 0xd4, 0x1e,
0x42, 0x47, 0x86, 0x23, 0x05, 0x4b, 0x98, 0xd7, 0x76, 0x86, 0xa5, 0xe3, 0x1b, 0xd3,
0x03, 0xca, 0x24, 0x44, 0x8e, 0x72, 0xc1, 0x4a, 0xc6, 0xbf, 0x3f, 0x2b, 0xce, 0xa7,
0x7b, 0x28, 0x69, 0xc9, 0x84,
];
assert_eq!(
Address::try_from((MAINNET, &invalid_padding[..])),
Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned()
))
);
// Short padding (padded to 15 bytes instead of 16)
let truncated_padding = [
0x9a, 0x56, 0x12, 0xa3, 0x43, 0x45, 0xe0, 0x82, 0x6c, 0xac, 0x24, 0x8b, 0x3b, 0x45,
0x72, 0x9a, 0x53, 0xd5, 0xf8, 0xda, 0xec, 0x07, 0x7c, 0xba, 0x9f, 0xa8, 0xd2, 0x97,
0x5b, 0xda, 0x73, 0x1b, 0xd2, 0xd1, 0x32, 0x6b, 0x7b, 0x36, 0xdd, 0x57, 0x84, 0x2a,
0xa0, 0x21, 0x23, 0x89, 0x73, 0x85, 0xe1, 0x4b, 0x3e, 0x95, 0xb7, 0xd4, 0x67, 0xbc,
0x4b, 0x31, 0xee, 0x5a,
];
assert_eq!(
Address::try_from((MAINNET, &truncated_padding[..])),
Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned()
))
);
}
#[test]
fn truncated() {
// The test cases below start from an encoding of
// `Address(vec![Receiver::Orchard([1; 43]), Receiver::Sapling([2; 43])])`
// with the receiver data truncated, but valid padding.
// - Missing the last data byte of the Sapling receiver.
let truncated_sapling_data = [
0xaa, 0xb0, 0x6e, 0x7b, 0x26, 0x7a, 0x22, 0x17, 0x39, 0xfa, 0x07, 0x69, 0xe9, 0x32,
0x2b, 0xac, 0x8c, 0x9e, 0x5e, 0x8a, 0xd9, 0x24, 0x06, 0x5a, 0x13, 0x79, 0x3a, 0x8d,
0xb4, 0x52, 0xfa, 0x18, 0x4e, 0x33, 0x4d, 0x8c, 0x17, 0x77, 0x4d, 0x63, 0x69, 0x34,
0x22, 0x70, 0x3a, 0xea, 0x30, 0x82, 0x5a, 0x6b, 0x37, 0xd1, 0x0d, 0xbe, 0x20, 0xab,
0x82, 0x86, 0x98, 0x34, 0x6a, 0xd8, 0x45, 0x40, 0xd0, 0x25, 0x60, 0xbf, 0x1e, 0xb6,
0xeb, 0x06, 0x85, 0x70, 0x4c, 0x42, 0xbc, 0x19, 0x14, 0xef, 0x7a, 0x05, 0xa0, 0x71,
0xb2, 0x63, 0x80, 0xbb, 0xdc, 0x12, 0x08, 0x48, 0x28, 0x8f, 0x1c, 0x9e, 0xc3, 0x42,
0xc6, 0x5e, 0x68, 0xa2, 0x78, 0x6c, 0x9e,
];
assert_matches!(
Address::try_from((MAINNET, &truncated_sapling_data[..])),
Err(ParseError::InvalidEncoding(_))
);
// - Truncated after the typecode of the Sapling receiver.
let truncated_after_sapling_typecode = [
0x87, 0x7a, 0xdf, 0x79, 0x6b, 0xe3, 0xb3, 0x40, 0xef, 0xe4, 0x5d, 0xc2, 0x91, 0xa2,
0x81, 0xfc, 0x7d, 0x76, 0xbb, 0xb0, 0x58, 0x98, 0x53, 0x59, 0xd3, 0x3f, 0xbc, 0x4b,
0x86, 0x59, 0x66, 0x62, 0x75, 0x92, 0xba, 0xcc, 0x31, 0x1e, 0x60, 0x02, 0x3b, 0xd8,
0x4c, 0xdf, 0x36, 0xa1, 0xac, 0x82, 0x57, 0xed, 0x0c, 0x98, 0x49, 0x8f, 0x49, 0x7e,
0xe6, 0x70, 0x36, 0x5b, 0x7b, 0x9e,
];
assert_matches!(
Address::try_from((MAINNET, &truncated_after_sapling_typecode[..])),
Err(ParseError::InvalidEncoding(_))
);
}
#[test]
fn duplicate_typecode() {
// Construct and serialize an invalid UA.
let ua = Address(vec![Receiver::Sapling([1; 43]), Receiver::Sapling([2; 43])]);
let encoded = ua.to_bytes(MAINNET);
assert_eq!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::DuplicateTypecode(Typecode::Sapling))
);
}
#[test]
fn p2pkh_and_p2sh() {
// Construct and serialize an invalid UA.
let ua = Address(vec![Receiver::P2pkh([0; 20]), Receiver::P2sh([0; 20])]);
let encoded = ua.to_bytes(MAINNET);
assert_eq!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::BothP2phkAndP2sh)
);
}
#[test]
fn only_transparent() {
// Encoding of `Address(vec![Receiver::P2pkh([0; 20])])`.
let encoded = vec![
0xf0, 0x9e, 0x9d, 0x6e, 0xf5, 0xa6, 0xac, 0x16, 0x50, 0xf0, 0xdb, 0xe1, 0x2c, 0xa5,
0x36, 0x22, 0xa2, 0x04, 0x89, 0x86, 0xe9, 0x6a, 0x9b, 0xf3, 0xff, 0x6d, 0x2f, 0xe6,
0xea, 0xdb, 0xc5, 0x20, 0x62, 0xf9, 0x6f, 0xa9, 0x86, 0xcc,
];
// We can't actually exercise this error, because at present the only transparent
// receivers we can use are P2PKH and P2SH (which cannot be used together), and
// with only one of them we don't have sufficient data for F4Jumble (so we hit a
// different error).
assert_matches!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::InvalidEncoding(_))
);
}
#[test]
fn receivers_are_sorted() {
// Construct a UA with receivers in an unsorted order.
let ua = Address(vec![
Receiver::P2pkh([0; 20]),
Receiver::Orchard([0; 43]),
Receiver::Unknown {
typecode: 0xff,
data: vec![],
},
Receiver::Sapling([0; 43]),
]);
// `Address::receivers` sorts the receivers in priority order.
assert_eq!(
ua.receivers(),
vec![
Receiver::Orchard([0; 43]),
Receiver::Sapling([0; 43]),
Receiver::P2pkh([0; 20]),
Receiver::Unknown {
typecode: 0xff,
data: vec![],
},
]
)
}
}
| {
return Err(ParseError::BothP2phkAndP2sh);
} | conditional_block |
unified.rs | use std::cmp;
use std::collections::HashSet;
use std::convert::{TryFrom, TryInto};
use std::error::Error;
use std::fmt;
use std::io::Write;
use zcash_encoding::CompactSize;
use crate::kind;
/// The HRP for a Bech32m-encoded mainnet Unified Address.
///
/// Defined in [ZIP 316][zip-0316].
///
/// [zip-0316]: https://zips.z.cash/zip-0316
pub(crate) const MAINNET: &str = "u";
/// The HRP for a Bech32m-encoded testnet Unified Address.
///
/// Defined in [ZIP 316][zip-0316].
///
/// [zip-0316]: https://zips.z.cash/zip-0316
pub(crate) const TESTNET: &str = "utest";
/// The HRP for a Bech32m-encoded regtest Unified Address.
pub(crate) const REGTEST: &str = "uregtest";
const PADDING_LEN: usize = 16;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Typecode {
P2pkh,
P2sh,
Sapling,
Orchard,
Unknown(u32),
}
impl Ord for Typecode {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match (self, other) {
// Trivial equality checks.
(Self::Orchard, Self::Orchard)
| (Self::Sapling, Self::Sapling)
| (Self::P2sh, Self::P2sh)
| (Self::P2pkh, Self::P2pkh) => cmp::Ordering::Equal,
// We don't know for certain the preference order of unknown receivers, but it
// is likely that the higher typecode has higher preference. The exact order
// doesn't really matter, as unknown receivers have lower preference than
// known receivers.
(Self::Unknown(a), Self::Unknown(b)) => b.cmp(a),
// For the remaining cases, we rely on `match` always choosing the first arm
// with a matching pattern. Patterns below are listed in priority order:
(Self::Orchard, _) => cmp::Ordering::Less,
(_, Self::Orchard) => cmp::Ordering::Greater,
(Self::Sapling, _) => cmp::Ordering::Less,
(_, Self::Sapling) => cmp::Ordering::Greater,
(Self::P2sh, _) => cmp::Ordering::Less,
(_, Self::P2sh) => cmp::Ordering::Greater,
(Self::P2pkh, _) => cmp::Ordering::Less,
(_, Self::P2pkh) => cmp::Ordering::Greater,
}
}
}
impl PartialOrd for Typecode {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl TryFrom<u32> for Typecode {
type Error = ParseError;
fn try_from(typecode: u32) -> Result<Self, Self::Error> {
match typecode {
0x00 => Ok(Typecode::P2pkh),
0x01 => Ok(Typecode::P2sh),
0x02 => Ok(Typecode::Sapling),
0x03 => Ok(Typecode::Orchard),
0x04..=0x02000000 => Ok(Typecode::Unknown(typecode)),
0x02000001..=u32::MAX => Err(ParseError::InvalidTypecodeValue(typecode as u64)),
}
}
}
impl From<Typecode> for u32 {
fn from(t: Typecode) -> Self {
match t {
Typecode::P2pkh => 0x00,
Typecode::P2sh => 0x01,
Typecode::Sapling => 0x02,
Typecode::Orchard => 0x03,
Typecode::Unknown(typecode) => typecode,
}
}
}
impl Typecode {
fn is_transparent(&self) -> bool {
// Unknown typecodes are treated as not transparent for the purpose of disallowing
// only-transparent UAs, which can be represented with existing address encodings.
matches!(self, Typecode::P2pkh | Typecode::P2sh)
}
}
/// An error while attempting to parse a string as a Zcash address.
#[derive(Debug, PartialEq)]
pub enum | {
/// The unified address contains both P2PKH and P2SH receivers.
BothP2phkAndP2sh,
/// The unified address contains a duplicated typecode.
DuplicateTypecode(Typecode),
/// The parsed typecode exceeds the maximum allowed CompactSize value.
InvalidTypecodeValue(u64),
/// The string is an invalid encoding.
InvalidEncoding(String),
/// The unified address only contains transparent receivers.
OnlyTransparent,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseError::BothP2phkAndP2sh => write!(f, "UA contains both P2PKH and P2SH receivers"),
ParseError::DuplicateTypecode(c) => write!(f, "Duplicate typecode {}", u32::from(*c)),
ParseError::InvalidTypecodeValue(v) => write!(f, "Typecode value out of range {}", v),
ParseError::InvalidEncoding(msg) => write!(f, "Invalid encoding: {}", msg),
ParseError::OnlyTransparent => write!(f, "UA only contains transparent receivers"),
}
}
}
impl Error for ParseError {}
/// The set of known Receivers for Unified Addresses.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Receiver {
Orchard([u8; 43]),
Sapling(kind::sapling::Data),
P2pkh(kind::p2pkh::Data),
P2sh(kind::p2sh::Data),
Unknown { typecode: u32, data: Vec<u8> },
}
impl cmp::Ord for Receiver {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match self.typecode().cmp(&other.typecode()) {
cmp::Ordering::Equal => self.addr().cmp(other.addr()),
res => res,
}
}
}
impl cmp::PartialOrd for Receiver {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl TryFrom<(u32, &[u8])> for Receiver {
type Error = ParseError;
fn try_from((typecode, addr): (u32, &[u8])) -> Result<Self, Self::Error> {
match typecode.try_into()? {
Typecode::P2pkh => addr.try_into().map(Receiver::P2pkh),
Typecode::P2sh => addr.try_into().map(Receiver::P2sh),
Typecode::Sapling => addr.try_into().map(Receiver::Sapling),
Typecode::Orchard => addr.try_into().map(Receiver::Orchard),
Typecode::Unknown(_) => Ok(Receiver::Unknown {
typecode,
data: addr.to_vec(),
}),
}
.map_err(|e| {
ParseError::InvalidEncoding(format!("Invalid address for typecode {}: {}", typecode, e))
})
}
}
impl Receiver {
fn typecode(&self) -> Typecode {
match self {
Receiver::P2pkh(_) => Typecode::P2pkh,
Receiver::P2sh(_) => Typecode::P2sh,
Receiver::Sapling(_) => Typecode::Sapling,
Receiver::Orchard(_) => Typecode::Orchard,
Receiver::Unknown { typecode, .. } => Typecode::Unknown(*typecode),
}
}
fn addr(&self) -> &[u8] {
match self {
Receiver::P2pkh(data) => data,
Receiver::P2sh(data) => data,
Receiver::Sapling(data) => data,
Receiver::Orchard(data) => data,
Receiver::Unknown { data, .. } => data,
}
}
}
/// A Unified Address.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Address(pub(crate) Vec<Receiver>);
impl TryFrom<(&str, &[u8])> for Address {
type Error = ParseError;
fn try_from((hrp, buf): (&str, &[u8])) -> Result<Self, Self::Error> {
fn read_receiver(mut cursor: &mut std::io::Cursor<&[u8]>) -> Result<Receiver, ParseError> {
let typecode = CompactSize::read(&mut cursor)
.map(|v| u32::try_from(v).expect("CompactSize::read enforces MAX_SIZE limit"))
.map_err(|e| {
ParseError::InvalidEncoding(format!(
"Failed to deserialize CompactSize-encoded typecode {}",
e
))
})?;
let length = CompactSize::read(&mut cursor).map_err(|e| {
ParseError::InvalidEncoding(format!(
"Failed to deserialize CompactSize-encoded length {}",
e
))
})?;
let addr_end = cursor.position().checked_add(length).ok_or_else(|| {
ParseError::InvalidEncoding(format!(
"Length value {} caused an overflow error",
length
))
})?;
let buf = cursor.get_ref();
if (buf.len() as u64) < addr_end {
return Err(ParseError::InvalidEncoding(format!(
"Truncated: unable to read {} bytes of address data",
length
)));
}
let result = Receiver::try_from((
typecode,
&buf[cursor.position() as usize..addr_end as usize],
));
cursor.set_position(addr_end);
result
}
let encoded = f4jumble::f4jumble_inv(buf)
.ok_or_else(|| ParseError::InvalidEncoding("F4Jumble decoding failed".to_owned()))?;
// Validate and strip trailing padding bytes.
if hrp.len() > 16 {
return Err(ParseError::InvalidEncoding(
"Invalid human-readable part".to_owned(),
));
}
let mut expected_padding = [0; PADDING_LEN];
expected_padding[0..hrp.len()].copy_from_slice(hrp.as_bytes());
let encoded = match encoded.split_at(encoded.len() - PADDING_LEN) {
(encoded, tail) if tail == expected_padding => Ok(encoded),
_ => Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned(),
)),
}?;
let mut cursor = std::io::Cursor::new(encoded);
let mut result = vec![];
while cursor.position() < encoded.len().try_into().unwrap() {
result.push(read_receiver(&mut cursor)?);
}
assert_eq!(cursor.position(), encoded.len().try_into().unwrap());
result.try_into()
}
}
impl TryFrom<Vec<Receiver>> for Address {
type Error = ParseError;
fn try_from(receivers: Vec<Receiver>) -> Result<Self, Self::Error> {
let mut typecodes = HashSet::with_capacity(receivers.len());
for receiver in &receivers {
let t = receiver.typecode();
if typecodes.contains(&t) {
return Err(ParseError::DuplicateTypecode(t));
} else if (t == Typecode::P2pkh && typecodes.contains(&Typecode::P2sh))
|| (t == Typecode::P2sh && typecodes.contains(&Typecode::P2pkh))
{
return Err(ParseError::BothP2phkAndP2sh);
} else {
typecodes.insert(t);
}
}
if typecodes.iter().all(|t| t.is_transparent()) {
Err(ParseError::OnlyTransparent)
} else {
// All checks pass!
Ok(Address(receivers))
}
}
}
impl Address {
/// Returns the raw encoding of this Unified Address.
pub(crate) fn to_bytes(&self, hrp: &str) -> Vec<u8> {
assert!(hrp.len() <= PADDING_LEN);
let mut writer = std::io::Cursor::new(Vec::new());
for receiver in &self.0 {
let addr = receiver.addr();
CompactSize::write(
&mut writer,
<u32>::from(receiver.typecode()).try_into().unwrap(),
)
.unwrap();
CompactSize::write(&mut writer, addr.len()).unwrap();
writer.write_all(addr).unwrap();
}
let mut padding = [0u8; PADDING_LEN];
padding[0..hrp.len()].copy_from_slice(&hrp.as_bytes());
writer.write_all(&padding).unwrap();
f4jumble::f4jumble(&writer.into_inner()).unwrap()
}
/// Returns the receivers contained within this address, sorted in preference order.
pub fn receivers(&self) -> Vec<Receiver> {
let mut receivers = self.0.clone();
// Unstable sorting is fine, because all receivers are guaranteed by construction
// to have distinct typecodes.
receivers.sort_unstable_by_key(|r| r.typecode());
receivers
}
/// Returns the receivers contained within this address, in the order they were
/// parsed from the string encoding.
///
/// This API is for advanced usage; in most cases you should use `Address::receivers`.
pub fn receivers_as_parsed(&self) -> &[Receiver] {
&self.0
}
}
#[cfg(test)]
pub(crate) mod test_vectors;
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use std::convert::TryFrom;
use proptest::{
array::{uniform11, uniform20, uniform32},
prelude::*,
};
use super::{Address, ParseError, Receiver, Typecode, MAINNET, REGTEST, TESTNET};
prop_compose! {
fn uniform43()(a in uniform11(0u8..), b in uniform32(0u8..)) -> [u8; 43] {
let mut c = [0; 43];
c[..11].copy_from_slice(&a);
c[11..].copy_from_slice(&b);
c
}
}
fn arb_shielded_receiver() -> BoxedStrategy<Receiver> {
prop_oneof![
uniform43().prop_map(Receiver::Sapling),
uniform43().prop_map(Receiver::Orchard),
]
.boxed()
}
fn arb_transparent_receiver() -> BoxedStrategy<Receiver> {
prop_oneof![
uniform20(0u8..).prop_map(Receiver::P2pkh),
uniform20(0u8..).prop_map(Receiver::P2sh),
]
.boxed()
}
prop_compose! {
fn arb_unified_address()(
shielded in prop::collection::hash_set(arb_shielded_receiver(), 1..2),
transparent in prop::option::of(arb_transparent_receiver()),
) -> Address {
Address(shielded.into_iter().chain(transparent).collect())
}
}
proptest! {
#[test]
fn ua_roundtrip(
hrp in prop_oneof![MAINNET, TESTNET, REGTEST],
ua in arb_unified_address(),
) {
let bytes = ua.to_bytes(&hrp);
let decoded = Address::try_from((hrp.as_str(), &bytes[..]));
prop_assert_eq!(decoded, Ok(ua));
}
}
#[test]
fn padding() {
// The test cases below use `Address(vec![Receiver::Orchard([1; 43])])` as base.
// Invalid padding ([0xff; 16] instead of [b'u', 0x00, 0x00, 0x00...])
let invalid_padding = [
0xe6, 0x59, 0xd1, 0xed, 0xf7, 0x4b, 0xe3, 0x5e, 0x5a, 0x54, 0x0e, 0x41, 0x5d, 0x2f,
0x0c, 0x0d, 0x33, 0x42, 0xbd, 0xbe, 0x9f, 0x82, 0x62, 0x01, 0xc1, 0x1b, 0xd4, 0x1e,
0x42, 0x47, 0x86, 0x23, 0x05, 0x4b, 0x98, 0xd7, 0x76, 0x86, 0xa5, 0xe3, 0x1b, 0xd3,
0x03, 0xca, 0x24, 0x44, 0x8e, 0x72, 0xc1, 0x4a, 0xc6, 0xbf, 0x3f, 0x2b, 0xce, 0xa7,
0x7b, 0x28, 0x69, 0xc9, 0x84,
];
assert_eq!(
Address::try_from((MAINNET, &invalid_padding[..])),
Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned()
))
);
// Short padding (padded to 15 bytes instead of 16)
let truncated_padding = [
0x9a, 0x56, 0x12, 0xa3, 0x43, 0x45, 0xe0, 0x82, 0x6c, 0xac, 0x24, 0x8b, 0x3b, 0x45,
0x72, 0x9a, 0x53, 0xd5, 0xf8, 0xda, 0xec, 0x07, 0x7c, 0xba, 0x9f, 0xa8, 0xd2, 0x97,
0x5b, 0xda, 0x73, 0x1b, 0xd2, 0xd1, 0x32, 0x6b, 0x7b, 0x36, 0xdd, 0x57, 0x84, 0x2a,
0xa0, 0x21, 0x23, 0x89, 0x73, 0x85, 0xe1, 0x4b, 0x3e, 0x95, 0xb7, 0xd4, 0x67, 0xbc,
0x4b, 0x31, 0xee, 0x5a,
];
assert_eq!(
Address::try_from((MAINNET, &truncated_padding[..])),
Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned()
))
);
}
#[test]
fn truncated() {
// The test cases below start from an encoding of
// `Address(vec![Receiver::Orchard([1; 43]), Receiver::Sapling([2; 43])])`
// with the receiver data truncated, but valid padding.
// - Missing the last data byte of the Sapling receiver.
let truncated_sapling_data = [
0xaa, 0xb0, 0x6e, 0x7b, 0x26, 0x7a, 0x22, 0x17, 0x39, 0xfa, 0x07, 0x69, 0xe9, 0x32,
0x2b, 0xac, 0x8c, 0x9e, 0x5e, 0x8a, 0xd9, 0x24, 0x06, 0x5a, 0x13, 0x79, 0x3a, 0x8d,
0xb4, 0x52, 0xfa, 0x18, 0x4e, 0x33, 0x4d, 0x8c, 0x17, 0x77, 0x4d, 0x63, 0x69, 0x34,
0x22, 0x70, 0x3a, 0xea, 0x30, 0x82, 0x5a, 0x6b, 0x37, 0xd1, 0x0d, 0xbe, 0x20, 0xab,
0x82, 0x86, 0x98, 0x34, 0x6a, 0xd8, 0x45, 0x40, 0xd0, 0x25, 0x60, 0xbf, 0x1e, 0xb6,
0xeb, 0x06, 0x85, 0x70, 0x4c, 0x42, 0xbc, 0x19, 0x14, 0xef, 0x7a, 0x05, 0xa0, 0x71,
0xb2, 0x63, 0x80, 0xbb, 0xdc, 0x12, 0x08, 0x48, 0x28, 0x8f, 0x1c, 0x9e, 0xc3, 0x42,
0xc6, 0x5e, 0x68, 0xa2, 0x78, 0x6c, 0x9e,
];
assert_matches!(
Address::try_from((MAINNET, &truncated_sapling_data[..])),
Err(ParseError::InvalidEncoding(_))
);
// - Truncated after the typecode of the Sapling receiver.
let truncated_after_sapling_typecode = [
0x87, 0x7a, 0xdf, 0x79, 0x6b, 0xe3, 0xb3, 0x40, 0xef, 0xe4, 0x5d, 0xc2, 0x91, 0xa2,
0x81, 0xfc, 0x7d, 0x76, 0xbb, 0xb0, 0x58, 0x98, 0x53, 0x59, 0xd3, 0x3f, 0xbc, 0x4b,
0x86, 0x59, 0x66, 0x62, 0x75, 0x92, 0xba, 0xcc, 0x31, 0x1e, 0x60, 0x02, 0x3b, 0xd8,
0x4c, 0xdf, 0x36, 0xa1, 0xac, 0x82, 0x57, 0xed, 0x0c, 0x98, 0x49, 0x8f, 0x49, 0x7e,
0xe6, 0x70, 0x36, 0x5b, 0x7b, 0x9e,
];
assert_matches!(
Address::try_from((MAINNET, &truncated_after_sapling_typecode[..])),
Err(ParseError::InvalidEncoding(_))
);
}
#[test]
fn duplicate_typecode() {
// Construct and serialize an invalid UA.
let ua = Address(vec![Receiver::Sapling([1; 43]), Receiver::Sapling([2; 43])]);
let encoded = ua.to_bytes(MAINNET);
assert_eq!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::DuplicateTypecode(Typecode::Sapling))
);
}
#[test]
fn p2pkh_and_p2sh() {
// Construct and serialize an invalid UA.
let ua = Address(vec![Receiver::P2pkh([0; 20]), Receiver::P2sh([0; 20])]);
let encoded = ua.to_bytes(MAINNET);
assert_eq!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::BothP2phkAndP2sh)
);
}
#[test]
fn only_transparent() {
// Encoding of `Address(vec![Receiver::P2pkh([0; 20])])`.
let encoded = vec![
0xf0, 0x9e, 0x9d, 0x6e, 0xf5, 0xa6, 0xac, 0x16, 0x50, 0xf0, 0xdb, 0xe1, 0x2c, 0xa5,
0x36, 0x22, 0xa2, 0x04, 0x89, 0x86, 0xe9, 0x6a, 0x9b, 0xf3, 0xff, 0x6d, 0x2f, 0xe6,
0xea, 0xdb, 0xc5, 0x20, 0x62, 0xf9, 0x6f, 0xa9, 0x86, 0xcc,
];
// We can't actually exercise this error, because at present the only transparent
// receivers we can use are P2PKH and P2SH (which cannot be used together), and
// with only one of them we don't have sufficient data for F4Jumble (so we hit a
// different error).
assert_matches!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::InvalidEncoding(_))
);
}
#[test]
fn receivers_are_sorted() {
// Construct a UA with receivers in an unsorted order.
let ua = Address(vec![
Receiver::P2pkh([0; 20]),
Receiver::Orchard([0; 43]),
Receiver::Unknown {
typecode: 0xff,
data: vec![],
},
Receiver::Sapling([0; 43]),
]);
// `Address::receivers` sorts the receivers in priority order.
assert_eq!(
ua.receivers(),
vec![
Receiver::Orchard([0; 43]),
Receiver::Sapling([0; 43]),
Receiver::P2pkh([0; 20]),
Receiver::Unknown {
typecode: 0xff,
data: vec![],
},
]
)
}
}
| ParseError | identifier_name |
unified.rs | use std::cmp;
use std::collections::HashSet;
use std::convert::{TryFrom, TryInto};
use std::error::Error;
use std::fmt;
use std::io::Write;
use zcash_encoding::CompactSize;
use crate::kind;
/// The HRP for a Bech32m-encoded mainnet Unified Address.
///
/// Defined in [ZIP 316][zip-0316].
///
/// [zip-0316]: https://zips.z.cash/zip-0316
pub(crate) const MAINNET: &str = "u";
/// The HRP for a Bech32m-encoded testnet Unified Address.
///
/// Defined in [ZIP 316][zip-0316].
///
/// [zip-0316]: https://zips.z.cash/zip-0316
pub(crate) const TESTNET: &str = "utest";
/// The HRP for a Bech32m-encoded regtest Unified Address.
pub(crate) const REGTEST: &str = "uregtest";
const PADDING_LEN: usize = 16;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Typecode {
P2pkh,
P2sh,
Sapling,
Orchard,
Unknown(u32),
}
impl Ord for Typecode {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match (self, other) {
// Trivial equality checks.
(Self::Orchard, Self::Orchard)
| (Self::Sapling, Self::Sapling)
| (Self::P2sh, Self::P2sh)
| (Self::P2pkh, Self::P2pkh) => cmp::Ordering::Equal,
// We don't know for certain the preference order of unknown receivers, but it
// is likely that the higher typecode has higher preference. The exact order
// doesn't really matter, as unknown receivers have lower preference than
// known receivers.
(Self::Unknown(a), Self::Unknown(b)) => b.cmp(a),
// For the remaining cases, we rely on `match` always choosing the first arm
// with a matching pattern. Patterns below are listed in priority order:
(Self::Orchard, _) => cmp::Ordering::Less,
(_, Self::Orchard) => cmp::Ordering::Greater,
(Self::Sapling, _) => cmp::Ordering::Less,
(_, Self::Sapling) => cmp::Ordering::Greater,
(Self::P2sh, _) => cmp::Ordering::Less,
(_, Self::P2sh) => cmp::Ordering::Greater,
(Self::P2pkh, _) => cmp::Ordering::Less,
(_, Self::P2pkh) => cmp::Ordering::Greater,
}
}
}
impl PartialOrd for Typecode {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl TryFrom<u32> for Typecode {
type Error = ParseError;
fn try_from(typecode: u32) -> Result<Self, Self::Error> {
match typecode {
0x00 => Ok(Typecode::P2pkh),
0x01 => Ok(Typecode::P2sh),
0x02 => Ok(Typecode::Sapling),
0x03 => Ok(Typecode::Orchard),
0x04..=0x02000000 => Ok(Typecode::Unknown(typecode)),
0x02000001..=u32::MAX => Err(ParseError::InvalidTypecodeValue(typecode as u64)),
}
}
}
impl From<Typecode> for u32 {
fn from(t: Typecode) -> Self {
match t {
Typecode::P2pkh => 0x00,
Typecode::P2sh => 0x01,
Typecode::Sapling => 0x02,
Typecode::Orchard => 0x03,
Typecode::Unknown(typecode) => typecode,
}
}
}
| // Unknown typecodes are treated as not transparent for the purpose of disallowing
// only-transparent UAs, which can be represented with existing address encodings.
matches!(self, Typecode::P2pkh | Typecode::P2sh)
}
}
/// An error while attempting to parse a string as a Zcash address.
#[derive(Debug, PartialEq)]
pub enum ParseError {
/// The unified address contains both P2PKH and P2SH receivers.
BothP2phkAndP2sh,
/// The unified address contains a duplicated typecode.
DuplicateTypecode(Typecode),
/// The parsed typecode exceeds the maximum allowed CompactSize value.
InvalidTypecodeValue(u64),
/// The string is an invalid encoding.
InvalidEncoding(String),
/// The unified address only contains transparent receivers.
OnlyTransparent,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseError::BothP2phkAndP2sh => write!(f, "UA contains both P2PKH and P2SH receivers"),
ParseError::DuplicateTypecode(c) => write!(f, "Duplicate typecode {}", u32::from(*c)),
ParseError::InvalidTypecodeValue(v) => write!(f, "Typecode value out of range {}", v),
ParseError::InvalidEncoding(msg) => write!(f, "Invalid encoding: {}", msg),
ParseError::OnlyTransparent => write!(f, "UA only contains transparent receivers"),
}
}
}
impl Error for ParseError {}
/// The set of known Receivers for Unified Addresses.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Receiver {
Orchard([u8; 43]),
Sapling(kind::sapling::Data),
P2pkh(kind::p2pkh::Data),
P2sh(kind::p2sh::Data),
Unknown { typecode: u32, data: Vec<u8> },
}
impl cmp::Ord for Receiver {
fn cmp(&self, other: &Self) -> cmp::Ordering {
match self.typecode().cmp(&other.typecode()) {
cmp::Ordering::Equal => self.addr().cmp(other.addr()),
res => res,
}
}
}
impl cmp::PartialOrd for Receiver {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl TryFrom<(u32, &[u8])> for Receiver {
type Error = ParseError;
fn try_from((typecode, addr): (u32, &[u8])) -> Result<Self, Self::Error> {
match typecode.try_into()? {
Typecode::P2pkh => addr.try_into().map(Receiver::P2pkh),
Typecode::P2sh => addr.try_into().map(Receiver::P2sh),
Typecode::Sapling => addr.try_into().map(Receiver::Sapling),
Typecode::Orchard => addr.try_into().map(Receiver::Orchard),
Typecode::Unknown(_) => Ok(Receiver::Unknown {
typecode,
data: addr.to_vec(),
}),
}
.map_err(|e| {
ParseError::InvalidEncoding(format!("Invalid address for typecode {}: {}", typecode, e))
})
}
}
impl Receiver {
fn typecode(&self) -> Typecode {
match self {
Receiver::P2pkh(_) => Typecode::P2pkh,
Receiver::P2sh(_) => Typecode::P2sh,
Receiver::Sapling(_) => Typecode::Sapling,
Receiver::Orchard(_) => Typecode::Orchard,
Receiver::Unknown { typecode, .. } => Typecode::Unknown(*typecode),
}
}
fn addr(&self) -> &[u8] {
match self {
Receiver::P2pkh(data) => data,
Receiver::P2sh(data) => data,
Receiver::Sapling(data) => data,
Receiver::Orchard(data) => data,
Receiver::Unknown { data, .. } => data,
}
}
}
/// A Unified Address.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Address(pub(crate) Vec<Receiver>);
impl TryFrom<(&str, &[u8])> for Address {
type Error = ParseError;
fn try_from((hrp, buf): (&str, &[u8])) -> Result<Self, Self::Error> {
fn read_receiver(mut cursor: &mut std::io::Cursor<&[u8]>) -> Result<Receiver, ParseError> {
let typecode = CompactSize::read(&mut cursor)
.map(|v| u32::try_from(v).expect("CompactSize::read enforces MAX_SIZE limit"))
.map_err(|e| {
ParseError::InvalidEncoding(format!(
"Failed to deserialize CompactSize-encoded typecode {}",
e
))
})?;
let length = CompactSize::read(&mut cursor).map_err(|e| {
ParseError::InvalidEncoding(format!(
"Failed to deserialize CompactSize-encoded length {}",
e
))
})?;
let addr_end = cursor.position().checked_add(length).ok_or_else(|| {
ParseError::InvalidEncoding(format!(
"Length value {} caused an overflow error",
length
))
})?;
let buf = cursor.get_ref();
if (buf.len() as u64) < addr_end {
return Err(ParseError::InvalidEncoding(format!(
"Truncated: unable to read {} bytes of address data",
length
)));
}
let result = Receiver::try_from((
typecode,
&buf[cursor.position() as usize..addr_end as usize],
));
cursor.set_position(addr_end);
result
}
let encoded = f4jumble::f4jumble_inv(buf)
.ok_or_else(|| ParseError::InvalidEncoding("F4Jumble decoding failed".to_owned()))?;
// Validate and strip trailing padding bytes.
if hrp.len() > 16 {
return Err(ParseError::InvalidEncoding(
"Invalid human-readable part".to_owned(),
));
}
let mut expected_padding = [0; PADDING_LEN];
expected_padding[0..hrp.len()].copy_from_slice(hrp.as_bytes());
let encoded = match encoded.split_at(encoded.len() - PADDING_LEN) {
(encoded, tail) if tail == expected_padding => Ok(encoded),
_ => Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned(),
)),
}?;
let mut cursor = std::io::Cursor::new(encoded);
let mut result = vec![];
while cursor.position() < encoded.len().try_into().unwrap() {
result.push(read_receiver(&mut cursor)?);
}
assert_eq!(cursor.position(), encoded.len().try_into().unwrap());
result.try_into()
}
}
impl TryFrom<Vec<Receiver>> for Address {
type Error = ParseError;
fn try_from(receivers: Vec<Receiver>) -> Result<Self, Self::Error> {
let mut typecodes = HashSet::with_capacity(receivers.len());
for receiver in &receivers {
let t = receiver.typecode();
if typecodes.contains(&t) {
return Err(ParseError::DuplicateTypecode(t));
} else if (t == Typecode::P2pkh && typecodes.contains(&Typecode::P2sh))
|| (t == Typecode::P2sh && typecodes.contains(&Typecode::P2pkh))
{
return Err(ParseError::BothP2phkAndP2sh);
} else {
typecodes.insert(t);
}
}
if typecodes.iter().all(|t| t.is_transparent()) {
Err(ParseError::OnlyTransparent)
} else {
// All checks pass!
Ok(Address(receivers))
}
}
}
impl Address {
/// Returns the raw encoding of this Unified Address.
pub(crate) fn to_bytes(&self, hrp: &str) -> Vec<u8> {
assert!(hrp.len() <= PADDING_LEN);
let mut writer = std::io::Cursor::new(Vec::new());
for receiver in &self.0 {
let addr = receiver.addr();
CompactSize::write(
&mut writer,
<u32>::from(receiver.typecode()).try_into().unwrap(),
)
.unwrap();
CompactSize::write(&mut writer, addr.len()).unwrap();
writer.write_all(addr).unwrap();
}
let mut padding = [0u8; PADDING_LEN];
padding[0..hrp.len()].copy_from_slice(&hrp.as_bytes());
writer.write_all(&padding).unwrap();
f4jumble::f4jumble(&writer.into_inner()).unwrap()
}
/// Returns the receivers contained within this address, sorted in preference order.
pub fn receivers(&self) -> Vec<Receiver> {
let mut receivers = self.0.clone();
// Unstable sorting is fine, because all receivers are guaranteed by construction
// to have distinct typecodes.
receivers.sort_unstable_by_key(|r| r.typecode());
receivers
}
/// Returns the receivers contained within this address, in the order they were
/// parsed from the string encoding.
///
/// This API is for advanced usage; in most cases you should use `Address::receivers`.
pub fn receivers_as_parsed(&self) -> &[Receiver] {
&self.0
}
}
#[cfg(test)]
pub(crate) mod test_vectors;
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use std::convert::TryFrom;
use proptest::{
array::{uniform11, uniform20, uniform32},
prelude::*,
};
use super::{Address, ParseError, Receiver, Typecode, MAINNET, REGTEST, TESTNET};
prop_compose! {
fn uniform43()(a in uniform11(0u8..), b in uniform32(0u8..)) -> [u8; 43] {
let mut c = [0; 43];
c[..11].copy_from_slice(&a);
c[11..].copy_from_slice(&b);
c
}
}
fn arb_shielded_receiver() -> BoxedStrategy<Receiver> {
prop_oneof![
uniform43().prop_map(Receiver::Sapling),
uniform43().prop_map(Receiver::Orchard),
]
.boxed()
}
fn arb_transparent_receiver() -> BoxedStrategy<Receiver> {
prop_oneof![
uniform20(0u8..).prop_map(Receiver::P2pkh),
uniform20(0u8..).prop_map(Receiver::P2sh),
]
.boxed()
}
prop_compose! {
fn arb_unified_address()(
shielded in prop::collection::hash_set(arb_shielded_receiver(), 1..2),
transparent in prop::option::of(arb_transparent_receiver()),
) -> Address {
Address(shielded.into_iter().chain(transparent).collect())
}
}
proptest! {
#[test]
fn ua_roundtrip(
hrp in prop_oneof![MAINNET, TESTNET, REGTEST],
ua in arb_unified_address(),
) {
let bytes = ua.to_bytes(&hrp);
let decoded = Address::try_from((hrp.as_str(), &bytes[..]));
prop_assert_eq!(decoded, Ok(ua));
}
}
#[test]
fn padding() {
// The test cases below use `Address(vec![Receiver::Orchard([1; 43])])` as base.
// Invalid padding ([0xff; 16] instead of [b'u', 0x00, 0x00, 0x00...])
let invalid_padding = [
0xe6, 0x59, 0xd1, 0xed, 0xf7, 0x4b, 0xe3, 0x5e, 0x5a, 0x54, 0x0e, 0x41, 0x5d, 0x2f,
0x0c, 0x0d, 0x33, 0x42, 0xbd, 0xbe, 0x9f, 0x82, 0x62, 0x01, 0xc1, 0x1b, 0xd4, 0x1e,
0x42, 0x47, 0x86, 0x23, 0x05, 0x4b, 0x98, 0xd7, 0x76, 0x86, 0xa5, 0xe3, 0x1b, 0xd3,
0x03, 0xca, 0x24, 0x44, 0x8e, 0x72, 0xc1, 0x4a, 0xc6, 0xbf, 0x3f, 0x2b, 0xce, 0xa7,
0x7b, 0x28, 0x69, 0xc9, 0x84,
];
assert_eq!(
Address::try_from((MAINNET, &invalid_padding[..])),
Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned()
))
);
// Short padding (padded to 15 bytes instead of 16)
let truncated_padding = [
0x9a, 0x56, 0x12, 0xa3, 0x43, 0x45, 0xe0, 0x82, 0x6c, 0xac, 0x24, 0x8b, 0x3b, 0x45,
0x72, 0x9a, 0x53, 0xd5, 0xf8, 0xda, 0xec, 0x07, 0x7c, 0xba, 0x9f, 0xa8, 0xd2, 0x97,
0x5b, 0xda, 0x73, 0x1b, 0xd2, 0xd1, 0x32, 0x6b, 0x7b, 0x36, 0xdd, 0x57, 0x84, 0x2a,
0xa0, 0x21, 0x23, 0x89, 0x73, 0x85, 0xe1, 0x4b, 0x3e, 0x95, 0xb7, 0xd4, 0x67, 0xbc,
0x4b, 0x31, 0xee, 0x5a,
];
assert_eq!(
Address::try_from((MAINNET, &truncated_padding[..])),
Err(ParseError::InvalidEncoding(
"Invalid padding bytes".to_owned()
))
);
}
#[test]
fn truncated() {
// The test cases below start from an encoding of
// `Address(vec![Receiver::Orchard([1; 43]), Receiver::Sapling([2; 43])])`
// with the receiver data truncated, but valid padding.
// - Missing the last data byte of the Sapling receiver.
let truncated_sapling_data = [
0xaa, 0xb0, 0x6e, 0x7b, 0x26, 0x7a, 0x22, 0x17, 0x39, 0xfa, 0x07, 0x69, 0xe9, 0x32,
0x2b, 0xac, 0x8c, 0x9e, 0x5e, 0x8a, 0xd9, 0x24, 0x06, 0x5a, 0x13, 0x79, 0x3a, 0x8d,
0xb4, 0x52, 0xfa, 0x18, 0x4e, 0x33, 0x4d, 0x8c, 0x17, 0x77, 0x4d, 0x63, 0x69, 0x34,
0x22, 0x70, 0x3a, 0xea, 0x30, 0x82, 0x5a, 0x6b, 0x37, 0xd1, 0x0d, 0xbe, 0x20, 0xab,
0x82, 0x86, 0x98, 0x34, 0x6a, 0xd8, 0x45, 0x40, 0xd0, 0x25, 0x60, 0xbf, 0x1e, 0xb6,
0xeb, 0x06, 0x85, 0x70, 0x4c, 0x42, 0xbc, 0x19, 0x14, 0xef, 0x7a, 0x05, 0xa0, 0x71,
0xb2, 0x63, 0x80, 0xbb, 0xdc, 0x12, 0x08, 0x48, 0x28, 0x8f, 0x1c, 0x9e, 0xc3, 0x42,
0xc6, 0x5e, 0x68, 0xa2, 0x78, 0x6c, 0x9e,
];
assert_matches!(
Address::try_from((MAINNET, &truncated_sapling_data[..])),
Err(ParseError::InvalidEncoding(_))
);
// - Truncated after the typecode of the Sapling receiver.
let truncated_after_sapling_typecode = [
0x87, 0x7a, 0xdf, 0x79, 0x6b, 0xe3, 0xb3, 0x40, 0xef, 0xe4, 0x5d, 0xc2, 0x91, 0xa2,
0x81, 0xfc, 0x7d, 0x76, 0xbb, 0xb0, 0x58, 0x98, 0x53, 0x59, 0xd3, 0x3f, 0xbc, 0x4b,
0x86, 0x59, 0x66, 0x62, 0x75, 0x92, 0xba, 0xcc, 0x31, 0x1e, 0x60, 0x02, 0x3b, 0xd8,
0x4c, 0xdf, 0x36, 0xa1, 0xac, 0x82, 0x57, 0xed, 0x0c, 0x98, 0x49, 0x8f, 0x49, 0x7e,
0xe6, 0x70, 0x36, 0x5b, 0x7b, 0x9e,
];
assert_matches!(
Address::try_from((MAINNET, &truncated_after_sapling_typecode[..])),
Err(ParseError::InvalidEncoding(_))
);
}
#[test]
fn duplicate_typecode() {
// Construct and serialize an invalid UA.
let ua = Address(vec![Receiver::Sapling([1; 43]), Receiver::Sapling([2; 43])]);
let encoded = ua.to_bytes(MAINNET);
assert_eq!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::DuplicateTypecode(Typecode::Sapling))
);
}
#[test]
fn p2pkh_and_p2sh() {
// Construct and serialize an invalid UA.
let ua = Address(vec![Receiver::P2pkh([0; 20]), Receiver::P2sh([0; 20])]);
let encoded = ua.to_bytes(MAINNET);
assert_eq!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::BothP2phkAndP2sh)
);
}
#[test]
fn only_transparent() {
// Encoding of `Address(vec![Receiver::P2pkh([0; 20])])`.
let encoded = vec![
0xf0, 0x9e, 0x9d, 0x6e, 0xf5, 0xa6, 0xac, 0x16, 0x50, 0xf0, 0xdb, 0xe1, 0x2c, 0xa5,
0x36, 0x22, 0xa2, 0x04, 0x89, 0x86, 0xe9, 0x6a, 0x9b, 0xf3, 0xff, 0x6d, 0x2f, 0xe6,
0xea, 0xdb, 0xc5, 0x20, 0x62, 0xf9, 0x6f, 0xa9, 0x86, 0xcc,
];
// We can't actually exercise this error, because at present the only transparent
// receivers we can use are P2PKH and P2SH (which cannot be used together), and
// with only one of them we don't have sufficient data for F4Jumble (so we hit a
// different error).
assert_matches!(
Address::try_from((MAINNET, &encoded[..])),
Err(ParseError::InvalidEncoding(_))
);
}
#[test]
fn receivers_are_sorted() {
// Construct a UA with receivers in an unsorted order.
let ua = Address(vec![
Receiver::P2pkh([0; 20]),
Receiver::Orchard([0; 43]),
Receiver::Unknown {
typecode: 0xff,
data: vec![],
},
Receiver::Sapling([0; 43]),
]);
// `Address::receivers` sorts the receivers in priority order.
assert_eq!(
ua.receivers(),
vec![
Receiver::Orchard([0; 43]),
Receiver::Sapling([0; 43]),
Receiver::P2pkh([0; 20]),
Receiver::Unknown {
typecode: 0xff,
data: vec![],
},
]
)
}
} | impl Typecode {
fn is_transparent(&self) -> bool { | random_line_split |
__init__.py | """
Copyright 2015-2016 Christian Fobel and Ryan Fobel
This file is part of dropbot_dx_plugin.
dropbot_dx_plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dropbot_dx_plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with dropbot_dx_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import timedelta
from functools import wraps
import datetime as dt
import itertools
import json
import logging
import re
import subprocess
import sys, traceback
import time
import types
import gtk
import pango
from path_helpers import path
from flatland import Boolean, Float, Form
from pygtkhelpers.ui.extra_widgets import Filepath
from microdrop.plugin_helpers import (AppDataController, StepOptionsController,
get_plugin_info, hub_execute)
from microdrop.plugin_manager import (PluginGlobals, Plugin, IPlugin,
ScheduleRequest, implements, emit_signal,
get_service_instance_by_name)
from microdrop.app_context import get_app
import dropbot_dx as dx
import dropbot_elisa_analysis as ea
import gobject
from pygtkhelpers.ui.extra_dialogs import yesno, FormViewDialog
from pygtkhelpers.utils import dict_to_form
from arduino_helpers.upload import upload_firmware
import pandas as pd
logger = logging.getLogger(__name__)
PluginGlobals.push_env('microdrop.managed')
def dataframe_display_dialog(df, message='', parent=None):
'''
Display a string representation of a `pandas.DataFrame` in a
`gtk.MessageDialog`.
'''
dialog = gtk.MessageDialog(parent, buttons=gtk.BUTTONS_OK)
label = dialog.props.message_area.get_children()[-1]
label.modify_font(pango.FontDescription('mono'))
dialog.props.text = message
dialog.props.secondary_text = df.to_string()
try:
return dialog.run()
finally:
dialog.destroy()
def is_connected(_lambda):
'''
Decorator to check if DropBot DX instrument is connected.
If not connected, warning is logged, but wrapped function is not called.
'''
def wrapper(f):
@wraps(f)
def wrapped(self, *f_args, **f_kwargs):
if not self.connected():
logger.warning('DropBot DX not connected.')
else:
|
return wrapped
return wrapper(_lambda)
def get_unique_path(filepath):
'''
Append `-###` to the base name of a file until a file path is found that
does not exist.
Args
----
filepath (str) : Full file path to target file.
Returns
-------
(path) : Full path where no file exists.
'''
filepath = path(filepath)
cre_incremental = re.compile(r'^(?P<namebase>.*)-(?P<index>\d+)$')
while filepath.isfile():
# Output path exists.
parent_i = filepath.parent
namebase_i = filepath.namebase
ext_i = filepath.ext
match = cre_incremental.search(namebase_i)
if match:
# File name ends with `-##`. Increment and retry.
index_i = int(match.group('index')) + 1
namebase_i = match.group('namebase')
else:
index_i = 0
filepath = parent_i.joinpath(namebase_i + '-%02d%s' % (index_i, ext_i))
return filepath
class DropBotDxAccessoriesPlugin(Plugin, AppDataController, StepOptionsController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
version = get_plugin_info(path(__file__).parent).version
plugin_name = get_plugin_info(path(__file__).parent).plugin_name
AppFields = Form.of(Float.named('dstat_delay_s')
.using(default=2., optional=True,
properties={'title': 'Delay before D-stat '
'measurement (seconds)'}),
Filepath.named('calibrator_file')
.using(#pylint: disable-msg=E1120
default='', optional=True,
properties={'action': gtk.FILE_CHOOSER_ACTION_SAVE}))
StepFields = Form.of(Boolean.named('magnet_engaged').using(default=False,
optional=True),
Boolean.named('dstat_enabled').using(default=False,
optional=True))
def __init__(self):
self.name = self.plugin_name
self.dstat_timeout_id = None # Periodic Dstat status check timeout id
self.dstat_experiment_id = None # UUID of active Dstat experiment
self.dropbot_dx_remote = None # `dropbot_dx.SerialProxy` instance
self.initialized = False # Latch to, e.g., config menus, only once
self._metadata = None
self.has_environment_data = False
self.environment_sensor_master = None
# Number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
self.dropbot_dx_id = None
def connect(self):
'''
Connect to dropbot-dx instrument.
'''
self.has_environment_data = False
self.environment_sensor_master = None
# if the dropbot dx plugin is installed and enabled, try getting its
# reference
try:
service = get_service_instance_by_name('wheelerlab.dropbot_dx')
if service.enabled():
self.dropbot_dx_remote = service.control_board
except:
pass
if self.dropbot_dx_remote is None:
# if we couldn't get a reference, try finding a DropBot DX connected to
# a serial port
try:
self.dropbot_dx_remote = dx.SerialProxy()
host_version = self.dropbot_dx_remote.host_software_version
remote_version = self.dropbot_dx_remote.remote_software_version
if remote_version != host_version:
response = yesno('The DropBot DX firmware version (%s) '
'does not match the driver version (%s). '
'Update firmware?' % (remote_version,
host_version))
if response == gtk.RESPONSE_YES:
self.on_flash_firmware()
# turn on the light by default
self.dropbot_dx_remote.light_enabled = True
except IOError:
logger.warning('Could not connect to DropBot DX.')
# Try to read temperature/humidity over i2c bus through a remote proxy
# --------------------------------------------------------------------
remote_proxies = [self.dropbot_dx_remote]
try:
service = get_service_instance_by_name('wheelerlab'
'.dmf_control_board_plugin')
except KeyError:
# DropBot v2.0 control board plugin is not available.
pass
else:
if service.enabled() and service.control_board.connected():
# The DropBot v2.0 control board plugin is loaded and the
# DropBot v2.0 control board is connected.
#
# Try to read temperature/humidity over i2c through control
# board first.
remote_proxies.insert(0, service.control_board)
# Try each proxy (in order) until the temperature/humidity is read
# successfully.
for proxy_i in remote_proxies:
try:
climate_info = self.get_environment_state(proxy_i)
logger.info('temp=%.1fC, Rel. humidity=%.1f%% (%s)',
climate_info['temperature_celsius'],
100 * climate_info['relative_humidity'], proxy_i)
# Cache remote proxy reference for future calls.
self.has_environment_data = True
self.environment_sensor_master = proxy_i
break
except:
# Try next remote proxy.
pass
# Get instrument identifier, if available.
self.dropbot_dx_id = getattr(self.dropbot_dx_remote, 'id', None)
def get_environment_state(self, master=None, i2c_address=0x27):
'''
Acquire temperature and humidity from Honeywell HIH6000 series
sensor.
[1]: http://sensing.honeywell.com/index.php/ci_id/142171/la_id/1/document/1/re_id/0
'''
if master is None:
master = self.environment_sensor_master
# Trigger measurement.
master.i2c_write(i2c_address, [])
time.sleep(.01)
while True:
# Read 4 bytes from sensor and cast as 2 16-bit integers with reversed
# byte order
humidity_data, temperature_data = master.i2c_read(i2c_address, 4) \
.astype('uint8').view('>u2')
status_code = (humidity_data >> 14) & 0x03
if status_code == 0:
# Measurement completed successfully.
break
elif status_code > 1:
raise IOError('Error reading from sensor.')
# Measurement data is stale (i.e., measurement still in
# progress). Try again.
time.sleep(.001)
# See URL from docstring for source of equations.
relative_humidity = float(humidity_data & 0x03FFF) / ((1 << 14) - 2)
temperature_celsius = (float((temperature_data >> 2) & 0x3FFF) /
((1 << 14) - 2) * 165 - 40)
return pd.Series([relative_humidity, temperature_celsius],
index=['relative_humidity',
'temperature_celsius'])
def connected(self):
'''
Returns
-------
(bool) : `True` if dropbot-dx instrument is connected.
'''
return (self.dropbot_dx_remote is not None)
def data_dir(self):
app = get_app()
data_dir = app.experiment_log.get_log_path().joinpath(self.name)
if not data_dir.isdir():
data_dir.makedirs_p()
return data_dir
def dstat_summary_frame(self, **kwargs):
'''
Generate DStat signal results summary, normalized against
calibrator signal where applicable.
'''
if self.dstat_experiment_data is None:
return pd.DataFrame(None)
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file')
# Reduce measurements from each DStat acquisition step into a single
# signal value.
df_md_reduced = ea.reduce_microdrop_dstat_data(self
.dstat_experiment_data)
# Subtract respective background signal from each row in DStat results
# summary. See `dropbot_elisa_analysis.subtract_background_signal` for
# more details.
try:
df_adjusted =\
ea.subtract_background_signal(df_md_reduced
.set_index('step_label'))
df_md_reduced.loc[:, 'signal'] = df_adjusted.signal.values
logger.info('Adjusted signals according to background signal '
'(where available).')
except Exception, exception:
logger.info('Could not adjust signals according to background '
'signal.\n%s', exception)
return ea.microdrop_dstat_summary_table(df_md_reduced,
calibrator_csv_path=
calibrator_file, **kwargs)
def get_step_metadata(self):
'''
Returns
-------
(OrderedDict) : Contents of `self.metadata` dictionary, updated
with the additional fields `batch_id`, `step_number`,
`attempt_number`, `temperature_celsius`, `relative_humidity`.
'''
app = get_app()
# Construct dictionary of metadata for extra columns in the `pandas.DataFrame`.
metadata = self.metadata.copy()
cre_device_id = re.compile(r'#(?P<batch_id>[a-fA-F0-9]+)'
r'%(?P<device_id>[a-fA-F0-9]+)$')
device_id = metadata.get('device_id', '')
# If `device_id` is in the form '#<batch-id>%<device-id>', extract batch and
# device identifiers separately.
match = cre_device_id.match(device_id)
if match:
metadata['device_id'] = str(match.group('device_id'))
metadata['batch_id'] = str(match.group('batch_id'))
else:
metadata['device_id'] = None
metadata['batch_id'] = None
metadata['step_number'] = app.protocol.current_step_number + 1
# Number of times the DStat experiment has been run for the current step.
metadata['attempt_number'] = (self.dstat_experiment_count_by_step
[app.protocol.current_step_number])
# Current temperature and humidity.
if self.has_environment_data:
metadata.update(self.get_environment_state())
# Instrument identifier.
metadata['instrument_id'] = self.dropbot_dx_id
if 'sample_id' not in metadata:
sample_labels = [str(v) for k, v in metadata.iteritems()
if str(k).lower().startswith('sample')]
metadata['sample_id'] = ' and '.join(sample_labels)
return metadata
###########################################################################
# # Accessor methods #
def get_step_label(self):
try:
step_label_plugin =\
get_service_instance_by_name('wheelerlab.step_label_plugin')
return step_label_plugin.get_step_options().get('label')
except:
return None
@property
def metadata(self):
'''
Add experiment index and experiment UUID to metadata.
'''
metadata = self._metadata.copy() if self._metadata else {}
app = get_app()
metadata['experiment_id'] = app.experiment_log.experiment_id
metadata['experiment_uuid'] = app.experiment_log.uuid
return metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
###########################################################################
# # Menu callbacks #
def on_edit_configuration(self, widget=None, data=None):
'''
Display a dialog to manually edit the configuration settings.
'''
config = self.dropbot_dx_remote.config
form = dict_to_form(config)
dialog = FormViewDialog(form, 'Edit configuration settings')
valid, response = dialog.run()
if valid:
self.dropbot_dx_remote.update_config(**response)
def on_flash_firmware(self):
board = dx.get_firmwares().keys()[0]
firmware_path = dx.get_firmwares()[board][0]
port = self.dropbot_dx_remote.stream.serial_device.port
# disconnect from DropBot DX so that we can flash it
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
logger.info(upload_firmware(firmware_path, board, port=port))
# reconnect
self.connect()
def on_launch_dstat_interface(self, widget, data=None):
subprocess.Popen([sys.executable, '-m', 'dstat_interface.main'])
def on_set_dstat_params_file(self, widget, data=None):
options = self.get_step_options()
form = Form.of(Filepath.named('dstat_params_file')
.using(default=options.get('dstat_params_file', ''),
optional=True,
properties={'patterns':
[('Dstat parameters file (*.yml)',
('*.yml', ))]}))
dialog = FormViewDialog(form, 'Set DStat parameters file')
valid, response = dialog.run()
if valid:
options['dstat_params_file'] = response['dstat_params_file']
self.set_step_values(options)
###########################################################################
# # Plugin signal handlers #
def get_schedule_requests(self, function_name):
"""
Returns a list of scheduling requests (i.e., ScheduleRequest
instances) for the function specified by function_name.
"""
if function_name in ['on_plugin_enable']:
return [ScheduleRequest('wheelerlab.dropbot_dx', self.name),
ScheduleRequest('wheelerlab.dmf_control_board_plugin',
self.name)]
elif function_name == 'on_step_run':
return [ScheduleRequest('wheelerlab.dmf_device_ui_plugin',
self.name)]
elif function_name == 'on_experiment_log_changed':
# Ensure that the app's reference to the new experiment log gets
# set.
return [ScheduleRequest('microdrop.app', self.name)]
return []
def on_experiment_log_changed(self, experiment_log):
# Reset number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
app = get_app()
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file', '')
data = {'calibrator_file': calibrator_file}
if hasattr(app, 'experiment_log') and app.experiment_log:
app.experiment_log.metadata[self.name] = data
# copy the calibrator file to the experiment log directory
if calibrator_file:
if not path(calibrator_file).isfile():
logger.error('Calibration file (%s) does not exist.' %
calibrator_file)
else:
try:
output_path = path(app.experiment_log.get_log_path()) / self.name
if not output_path.isdir():
output_path.mkdir()
path(calibrator_file).copy2(output_path / 'calibrator.csv')
except:
logger.error('Could not copy calibration file to the '
'experiment log directory.' , exc_info=True)
def on_metadata_changed(self, schema, original_metadata, metadata):
'''
Notify DStat interface of updates to the experiment metadata.
'''
metadata = metadata.copy()
metadata['metadata_schema'] = json.dumps(schema)
self.metadata = metadata
def on_plugin_enable(self):
self.connect()
if not self.initialized:
app = get_app()
self.tools_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_tools.append(self.tools_menu_item)
self.tools_menu = gtk.Menu()
self.tools_menu.show()
self.tools_menu_item.set_submenu(self.tools_menu)
menu_item = gtk.MenuItem("Launch Dstat interface")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_launch_dstat_interface)
menu_item.show()
menu_item = gtk.MenuItem("Set step Dstat parameters file...")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_set_dstat_params_file)
menu_item.show()
self.edit_config_menu_item = \
gtk.MenuItem("Edit configuration settings...")
self.tools_menu.append(self.edit_config_menu_item)
self.edit_config_menu_item.connect("activate",
self.on_edit_configuration)
self.view_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_view.append(self.view_menu_item)
self.view_menu = gtk.Menu()
self.view_menu.show()
self.view_menu_item.set_submenu(self.view_menu)
menu_item = gtk.MenuItem("View DStat results...")
self.view_menu.append(menu_item)
# Display DStat summary table in dialog.
menu_item.connect("activate", lambda *args:
dataframe_display_dialog
(self.dstat_summary_frame(unit='n'),
message='DStat result summary'))
menu_item.show()
self.initialized = True
self.tools_menu_item.show()
self.view_menu_item.show()
if self.connected():
self.edit_config_menu_item.show()
super(DropBotDxAccessoriesPlugin, self).on_plugin_enable()
def on_plugin_disable(self):
if self.connected():
# delete to free up the serial port
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
self.tools_menu_item.hide()
@is_connected
def on_protocol_run(self):
pass
def on_step_options_swapped(self, plugin, old_step_number, step_number):
"""
Handler called when the step options are changed for a particular
plugin. This will, for example, allow for GUI elements to be
updated based on step specified.
Parameters:
plugin : plugin instance for which the step options changed
step_number : step number that the options changed for
"""
pass
def on_step_run(self):
'''
Handler called whenever a step is executed. Note that this signal is
only emitted in realtime mode or if a protocol is running.
Plugins that handle this signal must emit the `on_step_complete` signal
once they have completed the step. The protocol controller will wait
until all plugins have completed the current step before proceeding.
The `on_step_complete` signal is emitted with following signature:
emit_signal('on_step_complete', [plugin_name, return_value])
where `plugin_name` is the name of the emitting plugin, and
`return_value` can be one of:
- `None`: Step completed successfully.
- `'Repeat'`: Repeat the step.
- `'Fail'`: Unrecoverable error (stop the protocol).
'''
app = get_app()
logger.info('[DropBotDxAccessoriesPlugin] on_step_run(): step #%d',
app.protocol.current_step_number)
options = self.get_step_options()
app_values = self.get_app_values()
if self.connected():
self.dropbot_dx_remote.light_enabled = not options['dstat_enabled']
self.dropbot_dx_remote.magnet_engaged=options['magnet_engaged']
try:
if self.has_environment_data:
env = self.get_environment_state().to_dict()
logger.info('temp=%.1fC, Rel. humidity=%.1f%%' %
(env['temperature_celsius'],
100 * env['relative_humidity']))
app.experiment_log.add_data({"environment state": env},
self.name)
except ValueError:
self.has_environment_data = False
if options['dstat_enabled']:
# D-stat is enabled for step. Request acquisition.
try:
if 'dstat_params_file' in options:
# Load Dstat parameters.
hub_execute('dstat-interface', 'load_params',
params_path=options['dstat_params_file'])
if self.dstat_timeout_id is not None:
# Timer was already set, so cancel previous timer.
gobject.source_remove(self.dstat_timeout_id)
# Delay before D-stat measurement (e.g., to allow webcam
# light to turn off).
dstat_delay_s = app_values.get('dstat_delay_s', 0)
time.sleep(max(0, dstat_delay_s))
step_label = self.get_step_label()
# Send Microdrop step label (if available) to provide name
# for DStat experiment.
metadata = self.metadata.copy()
metadata['name'] = (step_label if step_label else
str(app.protocol.current_step_number +
1))
metadata['patient_id'] = metadata.get('sample_id', 'None')
# Get target path for DStat database directory.
dstat_database_path = (path(app.config['data_dir'])
.realpath().joinpath('dstat-db'))
self.dstat_experiment_id = \
hub_execute('dstat-interface', 'run_active_experiment',
metadata=metadata,
params={'db_path_entry':
str(dstat_database_path),
'db_enable_checkbutton': True})
self._dstat_spinner = itertools.cycle(r'-\|/')
print ''
# Check every 250ms to see if dstat acquisition has
# completed.
self.dstat_timeout_id = \
gobject.timeout_add(250, self.check_dstat_status)
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
# An error occurred while initializing Analyst remote
# control.
emit_signal('on_step_complete', [self.name, 'Fail'])
else:
# D-State is not enabled, so step is complete.
emit_signal('on_step_complete', [self.name, None])
else:
# DropBox-DX device is not connected, but allow protocol to
# continue.
#
# N.B., A warning message is display once at the *start* of the
# protocol if no DropBot-DX connection has been established, but
# *not* on each step.
emit_signal('on_step_complete', [self.name, None])
###########################################################################
# # Periodic callbacks #
def check_dstat_status(self):
'''
1. Check to see if acquisition is finished.
2. If (1), emit `on_step_complete` signal.
'''
try:
completed_timestamp = hub_execute('dstat-interface',
'acquisition_complete',
experiment_id=
self.dstat_experiment_id,
timeout_s=5.)
if completed_timestamp is not None:
# ## Acquisition is complete ##
app = get_app()
# Increment the number of completed DStat experiments for
# current step.
step_i = app.protocol.current_step_number
count_i = 1 + self.dstat_experiment_count_by_step.get(step_i,
0)
self.dstat_experiment_count_by_step[step_i] = count_i
# ### Save results data and plot ###
output_directory = (path(app.experiment_log.get_log_path())
.abspath())
output_namebase = str(app.protocol.current_step_number)
step_label = self.get_step_label()
if step_label is not None:
# Replace characters that are not allowed in a filename
# with underscore.
output_namebase = re.sub(r'[:/\\\?{}]', '_', step_label)
# Save results to a text file in the experiment log directory.
output_txt_path = get_unique_path(output_directory
.joinpath(output_namebase +
'.txt'))
logger.info('Save results to: %s', output_txt_path)
dstat_params = hub_execute('dstat-interface', 'get_params')
hub_execute('dstat-interface', 'save_text',
save_data_path=output_txt_path)
data_i = hub_execute('dstat-interface', 'get_experiment_data',
experiment_id=self.dstat_experiment_id)
metadata_i = self.get_step_metadata()
# Compute (approximate) `utc_timestamp` for each DStat
# measurement.
max_time_s = data_i.time_s.max()
metadata_i['utc_timestamp'] = (completed_timestamp -
data_i.time_s
.map(lambda t:
timedelta(seconds=
max_time_s - t)))
# Step label from step label plugin.
metadata_i['step_label'] = step_label
# Compute UTC start time from local experiment start time.
metadata_i['experiment_start'] = \
(dt.datetime.fromtimestamp(app.experiment_log.start_time())
+ (dt.datetime.utcnow() - dt.datetime.now()))
# Compute UTC start time from local experiment start time.
metadata_i['experiment_length_min'] = \
(completed_timestamp -
metadata_i['experiment_start']).total_seconds() / 60.
# Record synchronous detection parameters from DStat (if set).
if dstat_params['sync_true']:
metadata_i['target_hz'] = float(dstat_params['sync_freq'])
else:
metadata_i['target_hz'] = None
metadata_i['sample_frequency_hz'] = float(dstat_params['adc_rate_hz'])
# Cast metadata `unicode` fields as `str` to enable HDF export.
for k, v in metadata_i.iteritems():
if isinstance(v, types.StringTypes):
metadata_i[k] = str(v)
data_md_i = data_i.copy()
for i, (k, v) in enumerate(metadata_i.iteritems()):
try:
data_md_i.insert(i, k, v)
except Exception, e:
logger.info('Skipping metadata field %s: %s.\n%s', k,
v, e)
# Set order for known columns. Unknown columns are ordered
# last, alphabetically.
column_order = ['instrument_id', 'experiment_id',
'experiment_uuid', 'experiment_start',
'experiment_length_min', 'utc_timestamp',
'device_id', 'batch_id', 'sample_id',
'step_label', 'step_number', 'attempt_number',
'temperature_celsius', 'relative_humidity',
'target_hz', 'sample_frequency_hz', 'time_s',
'current_amps']
column_index = dict([(k, i) for i, k in
enumerate(column_order)])
ordered_columns = sorted(data_md_i.columns, key=lambda k:
(column_index
.get(k, len(column_order)), k))
data_md_i = data_md_i[ordered_columns]
namebase_i = ('e[{}]-d[{}]-s[{}]'
.format(metadata_i['experiment_uuid'][:8],
metadata_i.get('device_id'),
metadata_i.get('sample_id')))
if self.dstat_experiment_data is None:
self.dstat_experiment_data = data_md_i
else:
combined = pd.concat([self.dstat_experiment_data,
data_md_i])
self.dstat_experiment_data = combined.reset_index(drop=
True)
# Append DStat experiment data to CSV file.
csv_output_path = self.data_dir().joinpath(namebase_i + '.csv')
# Only include header if the file does not exist or is empty.
include_header = not (csv_output_path.isfile() and
(csv_output_path.size > 0))
with csv_output_path.open('a') as output:
data_md_i.to_csv(output, index=False,
header=include_header)
df_dstat_summary = self.dstat_summary_frame(numeric=True)
# Write DStat summary table to CSV file.
csv_summary_path = self.data_dir().joinpath('dstat-summary'
'.csv')
with csv_summary_path.open('w') as output:
df_dstat_summary.to_csv(output)
# Turn light back on after photomultiplier tube (PMT)
# measurement.
self.dropbot_dx_remote.light_enabled = True
# notify step complete.
emit_signal('on_step_complete', [self.name, None])
self.dstat_timeout_id = None
return False
else:
print '\rWaiting for Dstat...', self._dstat_spinner.next(),
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
emit_signal('on_step_complete', [self.name, 'Fail'])
self.dstat_timeout_id = None
return False
return True
PluginGlobals.pop_env()
| f(self, *f_args, **f_kwargs) | conditional_block |
__init__.py | """
Copyright 2015-2016 Christian Fobel and Ryan Fobel
This file is part of dropbot_dx_plugin.
dropbot_dx_plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dropbot_dx_plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with dropbot_dx_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import timedelta
from functools import wraps
import datetime as dt
import itertools
import json
import logging
import re
import subprocess
import sys, traceback
import time
import types
import gtk
import pango
from path_helpers import path
from flatland import Boolean, Float, Form
from pygtkhelpers.ui.extra_widgets import Filepath
from microdrop.plugin_helpers import (AppDataController, StepOptionsController,
get_plugin_info, hub_execute)
from microdrop.plugin_manager import (PluginGlobals, Plugin, IPlugin,
ScheduleRequest, implements, emit_signal,
get_service_instance_by_name)
from microdrop.app_context import get_app
import dropbot_dx as dx
import dropbot_elisa_analysis as ea
import gobject
from pygtkhelpers.ui.extra_dialogs import yesno, FormViewDialog
from pygtkhelpers.utils import dict_to_form
from arduino_helpers.upload import upload_firmware
import pandas as pd
logger = logging.getLogger(__name__)
PluginGlobals.push_env('microdrop.managed')
def dataframe_display_dialog(df, message='', parent=None):
'''
Display a string representation of a `pandas.DataFrame` in a
`gtk.MessageDialog`.
'''
dialog = gtk.MessageDialog(parent, buttons=gtk.BUTTONS_OK)
label = dialog.props.message_area.get_children()[-1]
label.modify_font(pango.FontDescription('mono'))
dialog.props.text = message
dialog.props.secondary_text = df.to_string()
try:
return dialog.run()
finally:
dialog.destroy()
def is_connected(_lambda):
'''
Decorator to check if DropBot DX instrument is connected.
If not connected, warning is logged, but wrapped function is not called.
'''
def wrapper(f):
@wraps(f)
def wrapped(self, *f_args, **f_kwargs):
if not self.connected():
logger.warning('DropBot DX not connected.')
else:
f(self, *f_args, **f_kwargs)
return wrapped
return wrapper(_lambda)
def get_unique_path(filepath):
'''
Append `-###` to the base name of a file until a file path is found that
does not exist.
Args
----
filepath (str) : Full file path to target file.
Returns
-------
(path) : Full path where no file exists.
'''
filepath = path(filepath)
cre_incremental = re.compile(r'^(?P<namebase>.*)-(?P<index>\d+)$')
while filepath.isfile():
# Output path exists.
parent_i = filepath.parent
namebase_i = filepath.namebase
ext_i = filepath.ext
match = cre_incremental.search(namebase_i)
if match:
# File name ends with `-##`. Increment and retry.
index_i = int(match.group('index')) + 1
namebase_i = match.group('namebase')
else:
index_i = 0
filepath = parent_i.joinpath(namebase_i + '-%02d%s' % (index_i, ext_i))
return filepath
class DropBotDxAccessoriesPlugin(Plugin, AppDataController, StepOptionsController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
version = get_plugin_info(path(__file__).parent).version
plugin_name = get_plugin_info(path(__file__).parent).plugin_name
AppFields = Form.of(Float.named('dstat_delay_s')
.using(default=2., optional=True,
properties={'title': 'Delay before D-stat '
'measurement (seconds)'}),
Filepath.named('calibrator_file')
.using(#pylint: disable-msg=E1120
default='', optional=True,
properties={'action': gtk.FILE_CHOOSER_ACTION_SAVE}))
StepFields = Form.of(Boolean.named('magnet_engaged').using(default=False,
optional=True),
Boolean.named('dstat_enabled').using(default=False,
optional=True))
def __init__(self):
self.name = self.plugin_name
self.dstat_timeout_id = None # Periodic Dstat status check timeout id
self.dstat_experiment_id = None # UUID of active Dstat experiment
self.dropbot_dx_remote = None # `dropbot_dx.SerialProxy` instance
self.initialized = False # Latch to, e.g., config menus, only once
self._metadata = None
self.has_environment_data = False
self.environment_sensor_master = None
# Number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
self.dropbot_dx_id = None
def connect(self):
|
def get_environment_state(self, master=None, i2c_address=0x27):
'''
Acquire temperature and humidity from Honeywell HIH6000 series
sensor.
[1]: http://sensing.honeywell.com/index.php/ci_id/142171/la_id/1/document/1/re_id/0
'''
if master is None:
master = self.environment_sensor_master
# Trigger measurement.
master.i2c_write(i2c_address, [])
time.sleep(.01)
while True:
# Read 4 bytes from sensor and cast as 2 16-bit integers with reversed
# byte order
humidity_data, temperature_data = master.i2c_read(i2c_address, 4) \
.astype('uint8').view('>u2')
status_code = (humidity_data >> 14) & 0x03
if status_code == 0:
# Measurement completed successfully.
break
elif status_code > 1:
raise IOError('Error reading from sensor.')
# Measurement data is stale (i.e., measurement still in
# progress). Try again.
time.sleep(.001)
# See URL from docstring for source of equations.
relative_humidity = float(humidity_data & 0x03FFF) / ((1 << 14) - 2)
temperature_celsius = (float((temperature_data >> 2) & 0x3FFF) /
((1 << 14) - 2) * 165 - 40)
return pd.Series([relative_humidity, temperature_celsius],
index=['relative_humidity',
'temperature_celsius'])
def connected(self):
'''
Returns
-------
(bool) : `True` if dropbot-dx instrument is connected.
'''
return (self.dropbot_dx_remote is not None)
def data_dir(self):
app = get_app()
data_dir = app.experiment_log.get_log_path().joinpath(self.name)
if not data_dir.isdir():
data_dir.makedirs_p()
return data_dir
def dstat_summary_frame(self, **kwargs):
'''
Generate DStat signal results summary, normalized against
calibrator signal where applicable.
'''
if self.dstat_experiment_data is None:
return pd.DataFrame(None)
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file')
# Reduce measurements from each DStat acquisition step into a single
# signal value.
df_md_reduced = ea.reduce_microdrop_dstat_data(self
.dstat_experiment_data)
# Subtract respective background signal from each row in DStat results
# summary. See `dropbot_elisa_analysis.subtract_background_signal` for
# more details.
try:
df_adjusted =\
ea.subtract_background_signal(df_md_reduced
.set_index('step_label'))
df_md_reduced.loc[:, 'signal'] = df_adjusted.signal.values
logger.info('Adjusted signals according to background signal '
'(where available).')
except Exception, exception:
logger.info('Could not adjust signals according to background '
'signal.\n%s', exception)
return ea.microdrop_dstat_summary_table(df_md_reduced,
calibrator_csv_path=
calibrator_file, **kwargs)
def get_step_metadata(self):
'''
Returns
-------
(OrderedDict) : Contents of `self.metadata` dictionary, updated
with the additional fields `batch_id`, `step_number`,
`attempt_number`, `temperature_celsius`, `relative_humidity`.
'''
app = get_app()
# Construct dictionary of metadata for extra columns in the `pandas.DataFrame`.
metadata = self.metadata.copy()
cre_device_id = re.compile(r'#(?P<batch_id>[a-fA-F0-9]+)'
r'%(?P<device_id>[a-fA-F0-9]+)$')
device_id = metadata.get('device_id', '')
# If `device_id` is in the form '#<batch-id>%<device-id>', extract batch and
# device identifiers separately.
match = cre_device_id.match(device_id)
if match:
metadata['device_id'] = str(match.group('device_id'))
metadata['batch_id'] = str(match.group('batch_id'))
else:
metadata['device_id'] = None
metadata['batch_id'] = None
metadata['step_number'] = app.protocol.current_step_number + 1
# Number of times the DStat experiment has been run for the current step.
metadata['attempt_number'] = (self.dstat_experiment_count_by_step
[app.protocol.current_step_number])
# Current temperature and humidity.
if self.has_environment_data:
metadata.update(self.get_environment_state())
# Instrument identifier.
metadata['instrument_id'] = self.dropbot_dx_id
if 'sample_id' not in metadata:
sample_labels = [str(v) for k, v in metadata.iteritems()
if str(k).lower().startswith('sample')]
metadata['sample_id'] = ' and '.join(sample_labels)
return metadata
###########################################################################
# # Accessor methods #
def get_step_label(self):
try:
step_label_plugin =\
get_service_instance_by_name('wheelerlab.step_label_plugin')
return step_label_plugin.get_step_options().get('label')
except:
return None
@property
def metadata(self):
'''
Add experiment index and experiment UUID to metadata.
'''
metadata = self._metadata.copy() if self._metadata else {}
app = get_app()
metadata['experiment_id'] = app.experiment_log.experiment_id
metadata['experiment_uuid'] = app.experiment_log.uuid
return metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
###########################################################################
# # Menu callbacks #
def on_edit_configuration(self, widget=None, data=None):
'''
Display a dialog to manually edit the configuration settings.
'''
config = self.dropbot_dx_remote.config
form = dict_to_form(config)
dialog = FormViewDialog(form, 'Edit configuration settings')
valid, response = dialog.run()
if valid:
self.dropbot_dx_remote.update_config(**response)
def on_flash_firmware(self):
board = dx.get_firmwares().keys()[0]
firmware_path = dx.get_firmwares()[board][0]
port = self.dropbot_dx_remote.stream.serial_device.port
# disconnect from DropBot DX so that we can flash it
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
logger.info(upload_firmware(firmware_path, board, port=port))
# reconnect
self.connect()
def on_launch_dstat_interface(self, widget, data=None):
subprocess.Popen([sys.executable, '-m', 'dstat_interface.main'])
def on_set_dstat_params_file(self, widget, data=None):
options = self.get_step_options()
form = Form.of(Filepath.named('dstat_params_file')
.using(default=options.get('dstat_params_file', ''),
optional=True,
properties={'patterns':
[('Dstat parameters file (*.yml)',
('*.yml', ))]}))
dialog = FormViewDialog(form, 'Set DStat parameters file')
valid, response = dialog.run()
if valid:
options['dstat_params_file'] = response['dstat_params_file']
self.set_step_values(options)
###########################################################################
# # Plugin signal handlers #
def get_schedule_requests(self, function_name):
"""
Returns a list of scheduling requests (i.e., ScheduleRequest
instances) for the function specified by function_name.
"""
if function_name in ['on_plugin_enable']:
return [ScheduleRequest('wheelerlab.dropbot_dx', self.name),
ScheduleRequest('wheelerlab.dmf_control_board_plugin',
self.name)]
elif function_name == 'on_step_run':
return [ScheduleRequest('wheelerlab.dmf_device_ui_plugin',
self.name)]
elif function_name == 'on_experiment_log_changed':
# Ensure that the app's reference to the new experiment log gets
# set.
return [ScheduleRequest('microdrop.app', self.name)]
return []
def on_experiment_log_changed(self, experiment_log):
# Reset number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
app = get_app()
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file', '')
data = {'calibrator_file': calibrator_file}
if hasattr(app, 'experiment_log') and app.experiment_log:
app.experiment_log.metadata[self.name] = data
# copy the calibrator file to the experiment log directory
if calibrator_file:
if not path(calibrator_file).isfile():
logger.error('Calibration file (%s) does not exist.' %
calibrator_file)
else:
try:
output_path = path(app.experiment_log.get_log_path()) / self.name
if not output_path.isdir():
output_path.mkdir()
path(calibrator_file).copy2(output_path / 'calibrator.csv')
except:
logger.error('Could not copy calibration file to the '
'experiment log directory.' , exc_info=True)
def on_metadata_changed(self, schema, original_metadata, metadata):
'''
Notify DStat interface of updates to the experiment metadata.
'''
metadata = metadata.copy()
metadata['metadata_schema'] = json.dumps(schema)
self.metadata = metadata
def on_plugin_enable(self):
self.connect()
if not self.initialized:
app = get_app()
self.tools_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_tools.append(self.tools_menu_item)
self.tools_menu = gtk.Menu()
self.tools_menu.show()
self.tools_menu_item.set_submenu(self.tools_menu)
menu_item = gtk.MenuItem("Launch Dstat interface")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_launch_dstat_interface)
menu_item.show()
menu_item = gtk.MenuItem("Set step Dstat parameters file...")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_set_dstat_params_file)
menu_item.show()
self.edit_config_menu_item = \
gtk.MenuItem("Edit configuration settings...")
self.tools_menu.append(self.edit_config_menu_item)
self.edit_config_menu_item.connect("activate",
self.on_edit_configuration)
self.view_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_view.append(self.view_menu_item)
self.view_menu = gtk.Menu()
self.view_menu.show()
self.view_menu_item.set_submenu(self.view_menu)
menu_item = gtk.MenuItem("View DStat results...")
self.view_menu.append(menu_item)
# Display DStat summary table in dialog.
menu_item.connect("activate", lambda *args:
dataframe_display_dialog
(self.dstat_summary_frame(unit='n'),
message='DStat result summary'))
menu_item.show()
self.initialized = True
self.tools_menu_item.show()
self.view_menu_item.show()
if self.connected():
self.edit_config_menu_item.show()
super(DropBotDxAccessoriesPlugin, self).on_plugin_enable()
def on_plugin_disable(self):
if self.connected():
# delete to free up the serial port
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
self.tools_menu_item.hide()
@is_connected
def on_protocol_run(self):
pass
def on_step_options_swapped(self, plugin, old_step_number, step_number):
"""
Handler called when the step options are changed for a particular
plugin. This will, for example, allow for GUI elements to be
updated based on step specified.
Parameters:
plugin : plugin instance for which the step options changed
step_number : step number that the options changed for
"""
pass
def on_step_run(self):
'''
Handler called whenever a step is executed. Note that this signal is
only emitted in realtime mode or if a protocol is running.
Plugins that handle this signal must emit the `on_step_complete` signal
once they have completed the step. The protocol controller will wait
until all plugins have completed the current step before proceeding.
The `on_step_complete` signal is emitted with following signature:
emit_signal('on_step_complete', [plugin_name, return_value])
where `plugin_name` is the name of the emitting plugin, and
`return_value` can be one of:
- `None`: Step completed successfully.
- `'Repeat'`: Repeat the step.
- `'Fail'`: Unrecoverable error (stop the protocol).
'''
app = get_app()
logger.info('[DropBotDxAccessoriesPlugin] on_step_run(): step #%d',
app.protocol.current_step_number)
options = self.get_step_options()
app_values = self.get_app_values()
if self.connected():
self.dropbot_dx_remote.light_enabled = not options['dstat_enabled']
self.dropbot_dx_remote.magnet_engaged=options['magnet_engaged']
try:
if self.has_environment_data:
env = self.get_environment_state().to_dict()
logger.info('temp=%.1fC, Rel. humidity=%.1f%%' %
(env['temperature_celsius'],
100 * env['relative_humidity']))
app.experiment_log.add_data({"environment state": env},
self.name)
except ValueError:
self.has_environment_data = False
if options['dstat_enabled']:
# D-stat is enabled for step. Request acquisition.
try:
if 'dstat_params_file' in options:
# Load Dstat parameters.
hub_execute('dstat-interface', 'load_params',
params_path=options['dstat_params_file'])
if self.dstat_timeout_id is not None:
# Timer was already set, so cancel previous timer.
gobject.source_remove(self.dstat_timeout_id)
# Delay before D-stat measurement (e.g., to allow webcam
# light to turn off).
dstat_delay_s = app_values.get('dstat_delay_s', 0)
time.sleep(max(0, dstat_delay_s))
step_label = self.get_step_label()
# Send Microdrop step label (if available) to provide name
# for DStat experiment.
metadata = self.metadata.copy()
metadata['name'] = (step_label if step_label else
str(app.protocol.current_step_number +
1))
metadata['patient_id'] = metadata.get('sample_id', 'None')
# Get target path for DStat database directory.
dstat_database_path = (path(app.config['data_dir'])
.realpath().joinpath('dstat-db'))
self.dstat_experiment_id = \
hub_execute('dstat-interface', 'run_active_experiment',
metadata=metadata,
params={'db_path_entry':
str(dstat_database_path),
'db_enable_checkbutton': True})
self._dstat_spinner = itertools.cycle(r'-\|/')
print ''
# Check every 250ms to see if dstat acquisition has
# completed.
self.dstat_timeout_id = \
gobject.timeout_add(250, self.check_dstat_status)
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
# An error occurred while initializing Analyst remote
# control.
emit_signal('on_step_complete', [self.name, 'Fail'])
else:
# D-State is not enabled, so step is complete.
emit_signal('on_step_complete', [self.name, None])
else:
# DropBox-DX device is not connected, but allow protocol to
# continue.
#
# N.B., A warning message is display once at the *start* of the
# protocol if no DropBot-DX connection has been established, but
# *not* on each step.
emit_signal('on_step_complete', [self.name, None])
###########################################################################
# # Periodic callbacks #
def check_dstat_status(self):
'''
1. Check to see if acquisition is finished.
2. If (1), emit `on_step_complete` signal.
'''
try:
completed_timestamp = hub_execute('dstat-interface',
'acquisition_complete',
experiment_id=
self.dstat_experiment_id,
timeout_s=5.)
if completed_timestamp is not None:
# ## Acquisition is complete ##
app = get_app()
# Increment the number of completed DStat experiments for
# current step.
step_i = app.protocol.current_step_number
count_i = 1 + self.dstat_experiment_count_by_step.get(step_i,
0)
self.dstat_experiment_count_by_step[step_i] = count_i
# ### Save results data and plot ###
output_directory = (path(app.experiment_log.get_log_path())
.abspath())
output_namebase = str(app.protocol.current_step_number)
step_label = self.get_step_label()
if step_label is not None:
# Replace characters that are not allowed in a filename
# with underscore.
output_namebase = re.sub(r'[:/\\\?{}]', '_', step_label)
# Save results to a text file in the experiment log directory.
output_txt_path = get_unique_path(output_directory
.joinpath(output_namebase +
'.txt'))
logger.info('Save results to: %s', output_txt_path)
dstat_params = hub_execute('dstat-interface', 'get_params')
hub_execute('dstat-interface', 'save_text',
save_data_path=output_txt_path)
data_i = hub_execute('dstat-interface', 'get_experiment_data',
experiment_id=self.dstat_experiment_id)
metadata_i = self.get_step_metadata()
# Compute (approximate) `utc_timestamp` for each DStat
# measurement.
max_time_s = data_i.time_s.max()
metadata_i['utc_timestamp'] = (completed_timestamp -
data_i.time_s
.map(lambda t:
timedelta(seconds=
max_time_s - t)))
# Step label from step label plugin.
metadata_i['step_label'] = step_label
# Compute UTC start time from local experiment start time.
metadata_i['experiment_start'] = \
(dt.datetime.fromtimestamp(app.experiment_log.start_time())
+ (dt.datetime.utcnow() - dt.datetime.now()))
# Compute UTC start time from local experiment start time.
metadata_i['experiment_length_min'] = \
(completed_timestamp -
metadata_i['experiment_start']).total_seconds() / 60.
# Record synchronous detection parameters from DStat (if set).
if dstat_params['sync_true']:
metadata_i['target_hz'] = float(dstat_params['sync_freq'])
else:
metadata_i['target_hz'] = None
metadata_i['sample_frequency_hz'] = float(dstat_params['adc_rate_hz'])
# Cast metadata `unicode` fields as `str` to enable HDF export.
for k, v in metadata_i.iteritems():
if isinstance(v, types.StringTypes):
metadata_i[k] = str(v)
data_md_i = data_i.copy()
for i, (k, v) in enumerate(metadata_i.iteritems()):
try:
data_md_i.insert(i, k, v)
except Exception, e:
logger.info('Skipping metadata field %s: %s.\n%s', k,
v, e)
# Set order for known columns. Unknown columns are ordered
# last, alphabetically.
column_order = ['instrument_id', 'experiment_id',
'experiment_uuid', 'experiment_start',
'experiment_length_min', 'utc_timestamp',
'device_id', 'batch_id', 'sample_id',
'step_label', 'step_number', 'attempt_number',
'temperature_celsius', 'relative_humidity',
'target_hz', 'sample_frequency_hz', 'time_s',
'current_amps']
column_index = dict([(k, i) for i, k in
enumerate(column_order)])
ordered_columns = sorted(data_md_i.columns, key=lambda k:
(column_index
.get(k, len(column_order)), k))
data_md_i = data_md_i[ordered_columns]
namebase_i = ('e[{}]-d[{}]-s[{}]'
.format(metadata_i['experiment_uuid'][:8],
metadata_i.get('device_id'),
metadata_i.get('sample_id')))
if self.dstat_experiment_data is None:
self.dstat_experiment_data = data_md_i
else:
combined = pd.concat([self.dstat_experiment_data,
data_md_i])
self.dstat_experiment_data = combined.reset_index(drop=
True)
# Append DStat experiment data to CSV file.
csv_output_path = self.data_dir().joinpath(namebase_i + '.csv')
# Only include header if the file does not exist or is empty.
include_header = not (csv_output_path.isfile() and
(csv_output_path.size > 0))
with csv_output_path.open('a') as output:
data_md_i.to_csv(output, index=False,
header=include_header)
df_dstat_summary = self.dstat_summary_frame(numeric=True)
# Write DStat summary table to CSV file.
csv_summary_path = self.data_dir().joinpath('dstat-summary'
'.csv')
with csv_summary_path.open('w') as output:
df_dstat_summary.to_csv(output)
# Turn light back on after photomultiplier tube (PMT)
# measurement.
self.dropbot_dx_remote.light_enabled = True
# notify step complete.
emit_signal('on_step_complete', [self.name, None])
self.dstat_timeout_id = None
return False
else:
print '\rWaiting for Dstat...', self._dstat_spinner.next(),
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
emit_signal('on_step_complete', [self.name, 'Fail'])
self.dstat_timeout_id = None
return False
return True
PluginGlobals.pop_env()
| '''
Connect to dropbot-dx instrument.
'''
self.has_environment_data = False
self.environment_sensor_master = None
# if the dropbot dx plugin is installed and enabled, try getting its
# reference
try:
service = get_service_instance_by_name('wheelerlab.dropbot_dx')
if service.enabled():
self.dropbot_dx_remote = service.control_board
except:
pass
if self.dropbot_dx_remote is None:
# if we couldn't get a reference, try finding a DropBot DX connected to
# a serial port
try:
self.dropbot_dx_remote = dx.SerialProxy()
host_version = self.dropbot_dx_remote.host_software_version
remote_version = self.dropbot_dx_remote.remote_software_version
if remote_version != host_version:
response = yesno('The DropBot DX firmware version (%s) '
'does not match the driver version (%s). '
'Update firmware?' % (remote_version,
host_version))
if response == gtk.RESPONSE_YES:
self.on_flash_firmware()
# turn on the light by default
self.dropbot_dx_remote.light_enabled = True
except IOError:
logger.warning('Could not connect to DropBot DX.')
# Try to read temperature/humidity over i2c bus through a remote proxy
# --------------------------------------------------------------------
remote_proxies = [self.dropbot_dx_remote]
try:
service = get_service_instance_by_name('wheelerlab'
'.dmf_control_board_plugin')
except KeyError:
# DropBot v2.0 control board plugin is not available.
pass
else:
if service.enabled() and service.control_board.connected():
# The DropBot v2.0 control board plugin is loaded and the
# DropBot v2.0 control board is connected.
#
# Try to read temperature/humidity over i2c through control
# board first.
remote_proxies.insert(0, service.control_board)
# Try each proxy (in order) until the temperature/humidity is read
# successfully.
for proxy_i in remote_proxies:
try:
climate_info = self.get_environment_state(proxy_i)
logger.info('temp=%.1fC, Rel. humidity=%.1f%% (%s)',
climate_info['temperature_celsius'],
100 * climate_info['relative_humidity'], proxy_i)
# Cache remote proxy reference for future calls.
self.has_environment_data = True
self.environment_sensor_master = proxy_i
break
except:
# Try next remote proxy.
pass
# Get instrument identifier, if available.
self.dropbot_dx_id = getattr(self.dropbot_dx_remote, 'id', None) | identifier_body |
__init__.py | """
Copyright 2015-2016 Christian Fobel and Ryan Fobel
This file is part of dropbot_dx_plugin.
dropbot_dx_plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dropbot_dx_plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with dropbot_dx_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import timedelta
from functools import wraps
import datetime as dt
import itertools
import json
import logging
import re
import subprocess
import sys, traceback
import time
import types
import gtk
import pango
from path_helpers import path
from flatland import Boolean, Float, Form
from pygtkhelpers.ui.extra_widgets import Filepath
from microdrop.plugin_helpers import (AppDataController, StepOptionsController,
get_plugin_info, hub_execute)
from microdrop.plugin_manager import (PluginGlobals, Plugin, IPlugin,
ScheduleRequest, implements, emit_signal,
get_service_instance_by_name)
from microdrop.app_context import get_app
import dropbot_dx as dx
import dropbot_elisa_analysis as ea
import gobject
from pygtkhelpers.ui.extra_dialogs import yesno, FormViewDialog
from pygtkhelpers.utils import dict_to_form
from arduino_helpers.upload import upload_firmware
import pandas as pd
logger = logging.getLogger(__name__)
PluginGlobals.push_env('microdrop.managed')
def dataframe_display_dialog(df, message='', parent=None):
'''
Display a string representation of a `pandas.DataFrame` in a
`gtk.MessageDialog`.
'''
dialog = gtk.MessageDialog(parent, buttons=gtk.BUTTONS_OK)
label = dialog.props.message_area.get_children()[-1]
label.modify_font(pango.FontDescription('mono'))
dialog.props.text = message
dialog.props.secondary_text = df.to_string()
try:
return dialog.run()
finally:
dialog.destroy()
def is_connected(_lambda):
'''
Decorator to check if DropBot DX instrument is connected.
If not connected, warning is logged, but wrapped function is not called.
'''
def wrapper(f):
@wraps(f)
def wrapped(self, *f_args, **f_kwargs):
if not self.connected():
logger.warning('DropBot DX not connected.')
else:
f(self, *f_args, **f_kwargs)
return wrapped
return wrapper(_lambda)
def get_unique_path(filepath):
'''
Append `-###` to the base name of a file until a file path is found that
does not exist.
Args
----
filepath (str) : Full file path to target file.
Returns
-------
(path) : Full path where no file exists.
'''
filepath = path(filepath)
cre_incremental = re.compile(r'^(?P<namebase>.*)-(?P<index>\d+)$')
while filepath.isfile():
# Output path exists.
parent_i = filepath.parent
namebase_i = filepath.namebase
ext_i = filepath.ext
match = cre_incremental.search(namebase_i)
if match:
# File name ends with `-##`. Increment and retry.
index_i = int(match.group('index')) + 1
namebase_i = match.group('namebase')
else:
index_i = 0
filepath = parent_i.joinpath(namebase_i + '-%02d%s' % (index_i, ext_i))
return filepath
class | (Plugin, AppDataController, StepOptionsController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
version = get_plugin_info(path(__file__).parent).version
plugin_name = get_plugin_info(path(__file__).parent).plugin_name
AppFields = Form.of(Float.named('dstat_delay_s')
.using(default=2., optional=True,
properties={'title': 'Delay before D-stat '
'measurement (seconds)'}),
Filepath.named('calibrator_file')
.using(#pylint: disable-msg=E1120
default='', optional=True,
properties={'action': gtk.FILE_CHOOSER_ACTION_SAVE}))
StepFields = Form.of(Boolean.named('magnet_engaged').using(default=False,
optional=True),
Boolean.named('dstat_enabled').using(default=False,
optional=True))
def __init__(self):
self.name = self.plugin_name
self.dstat_timeout_id = None # Periodic Dstat status check timeout id
self.dstat_experiment_id = None # UUID of active Dstat experiment
self.dropbot_dx_remote = None # `dropbot_dx.SerialProxy` instance
self.initialized = False # Latch to, e.g., config menus, only once
self._metadata = None
self.has_environment_data = False
self.environment_sensor_master = None
# Number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
self.dropbot_dx_id = None
def connect(self):
'''
Connect to dropbot-dx instrument.
'''
self.has_environment_data = False
self.environment_sensor_master = None
# if the dropbot dx plugin is installed and enabled, try getting its
# reference
try:
service = get_service_instance_by_name('wheelerlab.dropbot_dx')
if service.enabled():
self.dropbot_dx_remote = service.control_board
except:
pass
if self.dropbot_dx_remote is None:
# if we couldn't get a reference, try finding a DropBot DX connected to
# a serial port
try:
self.dropbot_dx_remote = dx.SerialProxy()
host_version = self.dropbot_dx_remote.host_software_version
remote_version = self.dropbot_dx_remote.remote_software_version
if remote_version != host_version:
response = yesno('The DropBot DX firmware version (%s) '
'does not match the driver version (%s). '
'Update firmware?' % (remote_version,
host_version))
if response == gtk.RESPONSE_YES:
self.on_flash_firmware()
# turn on the light by default
self.dropbot_dx_remote.light_enabled = True
except IOError:
logger.warning('Could not connect to DropBot DX.')
# Try to read temperature/humidity over i2c bus through a remote proxy
# --------------------------------------------------------------------
remote_proxies = [self.dropbot_dx_remote]
try:
service = get_service_instance_by_name('wheelerlab'
'.dmf_control_board_plugin')
except KeyError:
# DropBot v2.0 control board plugin is not available.
pass
else:
if service.enabled() and service.control_board.connected():
# The DropBot v2.0 control board plugin is loaded and the
# DropBot v2.0 control board is connected.
#
# Try to read temperature/humidity over i2c through control
# board first.
remote_proxies.insert(0, service.control_board)
# Try each proxy (in order) until the temperature/humidity is read
# successfully.
for proxy_i in remote_proxies:
try:
climate_info = self.get_environment_state(proxy_i)
logger.info('temp=%.1fC, Rel. humidity=%.1f%% (%s)',
climate_info['temperature_celsius'],
100 * climate_info['relative_humidity'], proxy_i)
# Cache remote proxy reference for future calls.
self.has_environment_data = True
self.environment_sensor_master = proxy_i
break
except:
# Try next remote proxy.
pass
# Get instrument identifier, if available.
self.dropbot_dx_id = getattr(self.dropbot_dx_remote, 'id', None)
def get_environment_state(self, master=None, i2c_address=0x27):
'''
Acquire temperature and humidity from Honeywell HIH6000 series
sensor.
[1]: http://sensing.honeywell.com/index.php/ci_id/142171/la_id/1/document/1/re_id/0
'''
if master is None:
master = self.environment_sensor_master
# Trigger measurement.
master.i2c_write(i2c_address, [])
time.sleep(.01)
while True:
# Read 4 bytes from sensor and cast as 2 16-bit integers with reversed
# byte order
humidity_data, temperature_data = master.i2c_read(i2c_address, 4) \
.astype('uint8').view('>u2')
status_code = (humidity_data >> 14) & 0x03
if status_code == 0:
# Measurement completed successfully.
break
elif status_code > 1:
raise IOError('Error reading from sensor.')
# Measurement data is stale (i.e., measurement still in
# progress). Try again.
time.sleep(.001)
# See URL from docstring for source of equations.
relative_humidity = float(humidity_data & 0x03FFF) / ((1 << 14) - 2)
temperature_celsius = (float((temperature_data >> 2) & 0x3FFF) /
((1 << 14) - 2) * 165 - 40)
return pd.Series([relative_humidity, temperature_celsius],
index=['relative_humidity',
'temperature_celsius'])
def connected(self):
'''
Returns
-------
(bool) : `True` if dropbot-dx instrument is connected.
'''
return (self.dropbot_dx_remote is not None)
def data_dir(self):
app = get_app()
data_dir = app.experiment_log.get_log_path().joinpath(self.name)
if not data_dir.isdir():
data_dir.makedirs_p()
return data_dir
def dstat_summary_frame(self, **kwargs):
'''
Generate DStat signal results summary, normalized against
calibrator signal where applicable.
'''
if self.dstat_experiment_data is None:
return pd.DataFrame(None)
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file')
# Reduce measurements from each DStat acquisition step into a single
# signal value.
df_md_reduced = ea.reduce_microdrop_dstat_data(self
.dstat_experiment_data)
# Subtract respective background signal from each row in DStat results
# summary. See `dropbot_elisa_analysis.subtract_background_signal` for
# more details.
try:
df_adjusted =\
ea.subtract_background_signal(df_md_reduced
.set_index('step_label'))
df_md_reduced.loc[:, 'signal'] = df_adjusted.signal.values
logger.info('Adjusted signals according to background signal '
'(where available).')
except Exception, exception:
logger.info('Could not adjust signals according to background '
'signal.\n%s', exception)
return ea.microdrop_dstat_summary_table(df_md_reduced,
calibrator_csv_path=
calibrator_file, **kwargs)
def get_step_metadata(self):
'''
Returns
-------
(OrderedDict) : Contents of `self.metadata` dictionary, updated
with the additional fields `batch_id`, `step_number`,
`attempt_number`, `temperature_celsius`, `relative_humidity`.
'''
app = get_app()
# Construct dictionary of metadata for extra columns in the `pandas.DataFrame`.
metadata = self.metadata.copy()
cre_device_id = re.compile(r'#(?P<batch_id>[a-fA-F0-9]+)'
r'%(?P<device_id>[a-fA-F0-9]+)$')
device_id = metadata.get('device_id', '')
# If `device_id` is in the form '#<batch-id>%<device-id>', extract batch and
# device identifiers separately.
match = cre_device_id.match(device_id)
if match:
metadata['device_id'] = str(match.group('device_id'))
metadata['batch_id'] = str(match.group('batch_id'))
else:
metadata['device_id'] = None
metadata['batch_id'] = None
metadata['step_number'] = app.protocol.current_step_number + 1
# Number of times the DStat experiment has been run for the current step.
metadata['attempt_number'] = (self.dstat_experiment_count_by_step
[app.protocol.current_step_number])
# Current temperature and humidity.
if self.has_environment_data:
metadata.update(self.get_environment_state())
# Instrument identifier.
metadata['instrument_id'] = self.dropbot_dx_id
if 'sample_id' not in metadata:
sample_labels = [str(v) for k, v in metadata.iteritems()
if str(k).lower().startswith('sample')]
metadata['sample_id'] = ' and '.join(sample_labels)
return metadata
###########################################################################
# # Accessor methods #
def get_step_label(self):
try:
step_label_plugin =\
get_service_instance_by_name('wheelerlab.step_label_plugin')
return step_label_plugin.get_step_options().get('label')
except:
return None
@property
def metadata(self):
'''
Add experiment index and experiment UUID to metadata.
'''
metadata = self._metadata.copy() if self._metadata else {}
app = get_app()
metadata['experiment_id'] = app.experiment_log.experiment_id
metadata['experiment_uuid'] = app.experiment_log.uuid
return metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
###########################################################################
# # Menu callbacks #
def on_edit_configuration(self, widget=None, data=None):
'''
Display a dialog to manually edit the configuration settings.
'''
config = self.dropbot_dx_remote.config
form = dict_to_form(config)
dialog = FormViewDialog(form, 'Edit configuration settings')
valid, response = dialog.run()
if valid:
self.dropbot_dx_remote.update_config(**response)
def on_flash_firmware(self):
board = dx.get_firmwares().keys()[0]
firmware_path = dx.get_firmwares()[board][0]
port = self.dropbot_dx_remote.stream.serial_device.port
# disconnect from DropBot DX so that we can flash it
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
logger.info(upload_firmware(firmware_path, board, port=port))
# reconnect
self.connect()
def on_launch_dstat_interface(self, widget, data=None):
subprocess.Popen([sys.executable, '-m', 'dstat_interface.main'])
def on_set_dstat_params_file(self, widget, data=None):
options = self.get_step_options()
form = Form.of(Filepath.named('dstat_params_file')
.using(default=options.get('dstat_params_file', ''),
optional=True,
properties={'patterns':
[('Dstat parameters file (*.yml)',
('*.yml', ))]}))
dialog = FormViewDialog(form, 'Set DStat parameters file')
valid, response = dialog.run()
if valid:
options['dstat_params_file'] = response['dstat_params_file']
self.set_step_values(options)
###########################################################################
# # Plugin signal handlers #
def get_schedule_requests(self, function_name):
"""
Returns a list of scheduling requests (i.e., ScheduleRequest
instances) for the function specified by function_name.
"""
if function_name in ['on_plugin_enable']:
return [ScheduleRequest('wheelerlab.dropbot_dx', self.name),
ScheduleRequest('wheelerlab.dmf_control_board_plugin',
self.name)]
elif function_name == 'on_step_run':
return [ScheduleRequest('wheelerlab.dmf_device_ui_plugin',
self.name)]
elif function_name == 'on_experiment_log_changed':
# Ensure that the app's reference to the new experiment log gets
# set.
return [ScheduleRequest('microdrop.app', self.name)]
return []
def on_experiment_log_changed(self, experiment_log):
# Reset number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
app = get_app()
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file', '')
data = {'calibrator_file': calibrator_file}
if hasattr(app, 'experiment_log') and app.experiment_log:
app.experiment_log.metadata[self.name] = data
# copy the calibrator file to the experiment log directory
if calibrator_file:
if not path(calibrator_file).isfile():
logger.error('Calibration file (%s) does not exist.' %
calibrator_file)
else:
try:
output_path = path(app.experiment_log.get_log_path()) / self.name
if not output_path.isdir():
output_path.mkdir()
path(calibrator_file).copy2(output_path / 'calibrator.csv')
except:
logger.error('Could not copy calibration file to the '
'experiment log directory.' , exc_info=True)
def on_metadata_changed(self, schema, original_metadata, metadata):
'''
Notify DStat interface of updates to the experiment metadata.
'''
metadata = metadata.copy()
metadata['metadata_schema'] = json.dumps(schema)
self.metadata = metadata
def on_plugin_enable(self):
self.connect()
if not self.initialized:
app = get_app()
self.tools_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_tools.append(self.tools_menu_item)
self.tools_menu = gtk.Menu()
self.tools_menu.show()
self.tools_menu_item.set_submenu(self.tools_menu)
menu_item = gtk.MenuItem("Launch Dstat interface")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_launch_dstat_interface)
menu_item.show()
menu_item = gtk.MenuItem("Set step Dstat parameters file...")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_set_dstat_params_file)
menu_item.show()
self.edit_config_menu_item = \
gtk.MenuItem("Edit configuration settings...")
self.tools_menu.append(self.edit_config_menu_item)
self.edit_config_menu_item.connect("activate",
self.on_edit_configuration)
self.view_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_view.append(self.view_menu_item)
self.view_menu = gtk.Menu()
self.view_menu.show()
self.view_menu_item.set_submenu(self.view_menu)
menu_item = gtk.MenuItem("View DStat results...")
self.view_menu.append(menu_item)
# Display DStat summary table in dialog.
menu_item.connect("activate", lambda *args:
dataframe_display_dialog
(self.dstat_summary_frame(unit='n'),
message='DStat result summary'))
menu_item.show()
self.initialized = True
self.tools_menu_item.show()
self.view_menu_item.show()
if self.connected():
self.edit_config_menu_item.show()
super(DropBotDxAccessoriesPlugin, self).on_plugin_enable()
def on_plugin_disable(self):
if self.connected():
# delete to free up the serial port
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
self.tools_menu_item.hide()
@is_connected
def on_protocol_run(self):
pass
def on_step_options_swapped(self, plugin, old_step_number, step_number):
"""
Handler called when the step options are changed for a particular
plugin. This will, for example, allow for GUI elements to be
updated based on step specified.
Parameters:
plugin : plugin instance for which the step options changed
step_number : step number that the options changed for
"""
pass
def on_step_run(self):
'''
Handler called whenever a step is executed. Note that this signal is
only emitted in realtime mode or if a protocol is running.
Plugins that handle this signal must emit the `on_step_complete` signal
once they have completed the step. The protocol controller will wait
until all plugins have completed the current step before proceeding.
The `on_step_complete` signal is emitted with following signature:
emit_signal('on_step_complete', [plugin_name, return_value])
where `plugin_name` is the name of the emitting plugin, and
`return_value` can be one of:
- `None`: Step completed successfully.
- `'Repeat'`: Repeat the step.
- `'Fail'`: Unrecoverable error (stop the protocol).
'''
app = get_app()
logger.info('[DropBotDxAccessoriesPlugin] on_step_run(): step #%d',
app.protocol.current_step_number)
options = self.get_step_options()
app_values = self.get_app_values()
if self.connected():
self.dropbot_dx_remote.light_enabled = not options['dstat_enabled']
self.dropbot_dx_remote.magnet_engaged=options['magnet_engaged']
try:
if self.has_environment_data:
env = self.get_environment_state().to_dict()
logger.info('temp=%.1fC, Rel. humidity=%.1f%%' %
(env['temperature_celsius'],
100 * env['relative_humidity']))
app.experiment_log.add_data({"environment state": env},
self.name)
except ValueError:
self.has_environment_data = False
if options['dstat_enabled']:
# D-stat is enabled for step. Request acquisition.
try:
if 'dstat_params_file' in options:
# Load Dstat parameters.
hub_execute('dstat-interface', 'load_params',
params_path=options['dstat_params_file'])
if self.dstat_timeout_id is not None:
# Timer was already set, so cancel previous timer.
gobject.source_remove(self.dstat_timeout_id)
# Delay before D-stat measurement (e.g., to allow webcam
# light to turn off).
dstat_delay_s = app_values.get('dstat_delay_s', 0)
time.sleep(max(0, dstat_delay_s))
step_label = self.get_step_label()
# Send Microdrop step label (if available) to provide name
# for DStat experiment.
metadata = self.metadata.copy()
metadata['name'] = (step_label if step_label else
str(app.protocol.current_step_number +
1))
metadata['patient_id'] = metadata.get('sample_id', 'None')
# Get target path for DStat database directory.
dstat_database_path = (path(app.config['data_dir'])
.realpath().joinpath('dstat-db'))
self.dstat_experiment_id = \
hub_execute('dstat-interface', 'run_active_experiment',
metadata=metadata,
params={'db_path_entry':
str(dstat_database_path),
'db_enable_checkbutton': True})
self._dstat_spinner = itertools.cycle(r'-\|/')
print ''
# Check every 250ms to see if dstat acquisition has
# completed.
self.dstat_timeout_id = \
gobject.timeout_add(250, self.check_dstat_status)
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
# An error occurred while initializing Analyst remote
# control.
emit_signal('on_step_complete', [self.name, 'Fail'])
else:
# D-State is not enabled, so step is complete.
emit_signal('on_step_complete', [self.name, None])
else:
# DropBox-DX device is not connected, but allow protocol to
# continue.
#
# N.B., A warning message is display once at the *start* of the
# protocol if no DropBot-DX connection has been established, but
# *not* on each step.
emit_signal('on_step_complete', [self.name, None])
###########################################################################
# # Periodic callbacks #
def check_dstat_status(self):
'''
1. Check to see if acquisition is finished.
2. If (1), emit `on_step_complete` signal.
'''
try:
completed_timestamp = hub_execute('dstat-interface',
'acquisition_complete',
experiment_id=
self.dstat_experiment_id,
timeout_s=5.)
if completed_timestamp is not None:
# ## Acquisition is complete ##
app = get_app()
# Increment the number of completed DStat experiments for
# current step.
step_i = app.protocol.current_step_number
count_i = 1 + self.dstat_experiment_count_by_step.get(step_i,
0)
self.dstat_experiment_count_by_step[step_i] = count_i
# ### Save results data and plot ###
output_directory = (path(app.experiment_log.get_log_path())
.abspath())
output_namebase = str(app.protocol.current_step_number)
step_label = self.get_step_label()
if step_label is not None:
# Replace characters that are not allowed in a filename
# with underscore.
output_namebase = re.sub(r'[:/\\\?{}]', '_', step_label)
# Save results to a text file in the experiment log directory.
output_txt_path = get_unique_path(output_directory
.joinpath(output_namebase +
'.txt'))
logger.info('Save results to: %s', output_txt_path)
dstat_params = hub_execute('dstat-interface', 'get_params')
hub_execute('dstat-interface', 'save_text',
save_data_path=output_txt_path)
data_i = hub_execute('dstat-interface', 'get_experiment_data',
experiment_id=self.dstat_experiment_id)
metadata_i = self.get_step_metadata()
# Compute (approximate) `utc_timestamp` for each DStat
# measurement.
max_time_s = data_i.time_s.max()
metadata_i['utc_timestamp'] = (completed_timestamp -
data_i.time_s
.map(lambda t:
timedelta(seconds=
max_time_s - t)))
# Step label from step label plugin.
metadata_i['step_label'] = step_label
# Compute UTC start time from local experiment start time.
metadata_i['experiment_start'] = \
(dt.datetime.fromtimestamp(app.experiment_log.start_time())
+ (dt.datetime.utcnow() - dt.datetime.now()))
# Compute UTC start time from local experiment start time.
metadata_i['experiment_length_min'] = \
(completed_timestamp -
metadata_i['experiment_start']).total_seconds() / 60.
# Record synchronous detection parameters from DStat (if set).
if dstat_params['sync_true']:
metadata_i['target_hz'] = float(dstat_params['sync_freq'])
else:
metadata_i['target_hz'] = None
metadata_i['sample_frequency_hz'] = float(dstat_params['adc_rate_hz'])
# Cast metadata `unicode` fields as `str` to enable HDF export.
for k, v in metadata_i.iteritems():
if isinstance(v, types.StringTypes):
metadata_i[k] = str(v)
data_md_i = data_i.copy()
for i, (k, v) in enumerate(metadata_i.iteritems()):
try:
data_md_i.insert(i, k, v)
except Exception, e:
logger.info('Skipping metadata field %s: %s.\n%s', k,
v, e)
# Set order for known columns. Unknown columns are ordered
# last, alphabetically.
column_order = ['instrument_id', 'experiment_id',
'experiment_uuid', 'experiment_start',
'experiment_length_min', 'utc_timestamp',
'device_id', 'batch_id', 'sample_id',
'step_label', 'step_number', 'attempt_number',
'temperature_celsius', 'relative_humidity',
'target_hz', 'sample_frequency_hz', 'time_s',
'current_amps']
column_index = dict([(k, i) for i, k in
enumerate(column_order)])
ordered_columns = sorted(data_md_i.columns, key=lambda k:
(column_index
.get(k, len(column_order)), k))
data_md_i = data_md_i[ordered_columns]
namebase_i = ('e[{}]-d[{}]-s[{}]'
.format(metadata_i['experiment_uuid'][:8],
metadata_i.get('device_id'),
metadata_i.get('sample_id')))
if self.dstat_experiment_data is None:
self.dstat_experiment_data = data_md_i
else:
combined = pd.concat([self.dstat_experiment_data,
data_md_i])
self.dstat_experiment_data = combined.reset_index(drop=
True)
# Append DStat experiment data to CSV file.
csv_output_path = self.data_dir().joinpath(namebase_i + '.csv')
# Only include header if the file does not exist or is empty.
include_header = not (csv_output_path.isfile() and
(csv_output_path.size > 0))
with csv_output_path.open('a') as output:
data_md_i.to_csv(output, index=False,
header=include_header)
df_dstat_summary = self.dstat_summary_frame(numeric=True)
# Write DStat summary table to CSV file.
csv_summary_path = self.data_dir().joinpath('dstat-summary'
'.csv')
with csv_summary_path.open('w') as output:
df_dstat_summary.to_csv(output)
# Turn light back on after photomultiplier tube (PMT)
# measurement.
self.dropbot_dx_remote.light_enabled = True
# notify step complete.
emit_signal('on_step_complete', [self.name, None])
self.dstat_timeout_id = None
return False
else:
print '\rWaiting for Dstat...', self._dstat_spinner.next(),
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
emit_signal('on_step_complete', [self.name, 'Fail'])
self.dstat_timeout_id = None
return False
return True
PluginGlobals.pop_env()
| DropBotDxAccessoriesPlugin | identifier_name |
__init__.py | """
Copyright 2015-2016 Christian Fobel and Ryan Fobel
This file is part of dropbot_dx_plugin.
dropbot_dx_plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
dropbot_dx_plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with dropbot_dx_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import timedelta
from functools import wraps
import datetime as dt
import itertools
import json
import logging
import re
import subprocess
import sys, traceback
import time
import types
import gtk
import pango
from path_helpers import path
from flatland import Boolean, Float, Form
from pygtkhelpers.ui.extra_widgets import Filepath
from microdrop.plugin_helpers import (AppDataController, StepOptionsController,
get_plugin_info, hub_execute)
from microdrop.plugin_manager import (PluginGlobals, Plugin, IPlugin,
ScheduleRequest, implements, emit_signal,
get_service_instance_by_name)
from microdrop.app_context import get_app
import dropbot_dx as dx
import dropbot_elisa_analysis as ea
import gobject
from pygtkhelpers.ui.extra_dialogs import yesno, FormViewDialog
from pygtkhelpers.utils import dict_to_form
from arduino_helpers.upload import upload_firmware
import pandas as pd
logger = logging.getLogger(__name__)
PluginGlobals.push_env('microdrop.managed')
def dataframe_display_dialog(df, message='', parent=None):
'''
Display a string representation of a `pandas.DataFrame` in a
`gtk.MessageDialog`.
'''
dialog = gtk.MessageDialog(parent, buttons=gtk.BUTTONS_OK)
label = dialog.props.message_area.get_children()[-1]
label.modify_font(pango.FontDescription('mono'))
dialog.props.text = message
dialog.props.secondary_text = df.to_string()
try:
return dialog.run()
finally:
dialog.destroy()
def is_connected(_lambda):
'''
Decorator to check if DropBot DX instrument is connected.
If not connected, warning is logged, but wrapped function is not called.
'''
def wrapper(f):
@wraps(f)
def wrapped(self, *f_args, **f_kwargs):
if not self.connected():
logger.warning('DropBot DX not connected.')
else:
f(self, *f_args, **f_kwargs)
return wrapped
return wrapper(_lambda)
def get_unique_path(filepath):
'''
Append `-###` to the base name of a file until a file path is found that
does not exist.
Args
----
filepath (str) : Full file path to target file.
Returns
-------
(path) : Full path where no file exists.
'''
filepath = path(filepath)
cre_incremental = re.compile(r'^(?P<namebase>.*)-(?P<index>\d+)$')
while filepath.isfile():
# Output path exists.
parent_i = filepath.parent
namebase_i = filepath.namebase
ext_i = filepath.ext
match = cre_incremental.search(namebase_i)
if match:
# File name ends with `-##`. Increment and retry.
index_i = int(match.group('index')) + 1
namebase_i = match.group('namebase')
else:
index_i = 0
filepath = parent_i.joinpath(namebase_i + '-%02d%s' % (index_i, ext_i))
return filepath
class DropBotDxAccessoriesPlugin(Plugin, AppDataController, StepOptionsController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
version = get_plugin_info(path(__file__).parent).version
plugin_name = get_plugin_info(path(__file__).parent).plugin_name
AppFields = Form.of(Float.named('dstat_delay_s')
.using(default=2., optional=True,
properties={'title': 'Delay before D-stat '
'measurement (seconds)'}),
Filepath.named('calibrator_file')
.using(#pylint: disable-msg=E1120
default='', optional=True,
properties={'action': gtk.FILE_CHOOSER_ACTION_SAVE}))
StepFields = Form.of(Boolean.named('magnet_engaged').using(default=False,
optional=True),
Boolean.named('dstat_enabled').using(default=False,
optional=True))
def __init__(self):
self.name = self.plugin_name
self.dstat_timeout_id = None # Periodic Dstat status check timeout id
self.dstat_experiment_id = None # UUID of active Dstat experiment
self.dropbot_dx_remote = None # `dropbot_dx.SerialProxy` instance
self.initialized = False # Latch to, e.g., config menus, only once
self._metadata = None
self.has_environment_data = False
self.environment_sensor_master = None
# Number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
self.dropbot_dx_id = None
def connect(self):
'''
Connect to dropbot-dx instrument.
'''
self.has_environment_data = False
self.environment_sensor_master = None
# if the dropbot dx plugin is installed and enabled, try getting its
# reference
try:
service = get_service_instance_by_name('wheelerlab.dropbot_dx')
if service.enabled():
self.dropbot_dx_remote = service.control_board
except:
pass
if self.dropbot_dx_remote is None:
# if we couldn't get a reference, try finding a DropBot DX connected to
# a serial port
try:
self.dropbot_dx_remote = dx.SerialProxy()
host_version = self.dropbot_dx_remote.host_software_version
remote_version = self.dropbot_dx_remote.remote_software_version
if remote_version != host_version:
response = yesno('The DropBot DX firmware version (%s) '
'does not match the driver version (%s). '
'Update firmware?' % (remote_version,
host_version))
if response == gtk.RESPONSE_YES:
self.on_flash_firmware()
# turn on the light by default
self.dropbot_dx_remote.light_enabled = True
except IOError:
logger.warning('Could not connect to DropBot DX.')
# Try to read temperature/humidity over i2c bus through a remote proxy
# --------------------------------------------------------------------
remote_proxies = [self.dropbot_dx_remote]
try:
service = get_service_instance_by_name('wheelerlab'
'.dmf_control_board_plugin')
except KeyError:
# DropBot v2.0 control board plugin is not available.
pass
else:
if service.enabled() and service.control_board.connected():
# The DropBot v2.0 control board plugin is loaded and the
# DropBot v2.0 control board is connected.
#
# Try to read temperature/humidity over i2c through control
# board first.
remote_proxies.insert(0, service.control_board)
# Try each proxy (in order) until the temperature/humidity is read
# successfully.
for proxy_i in remote_proxies:
try:
climate_info = self.get_environment_state(proxy_i)
logger.info('temp=%.1fC, Rel. humidity=%.1f%% (%s)',
climate_info['temperature_celsius'],
100 * climate_info['relative_humidity'], proxy_i)
# Cache remote proxy reference for future calls.
self.has_environment_data = True
self.environment_sensor_master = proxy_i
break
except:
# Try next remote proxy.
pass
# Get instrument identifier, if available.
self.dropbot_dx_id = getattr(self.dropbot_dx_remote, 'id', None)
def get_environment_state(self, master=None, i2c_address=0x27):
'''
Acquire temperature and humidity from Honeywell HIH6000 series
sensor.
[1]: http://sensing.honeywell.com/index.php/ci_id/142171/la_id/1/document/1/re_id/0
'''
if master is None:
master = self.environment_sensor_master
# Trigger measurement.
master.i2c_write(i2c_address, [])
time.sleep(.01)
while True:
# Read 4 bytes from sensor and cast as 2 16-bit integers with reversed
# byte order
humidity_data, temperature_data = master.i2c_read(i2c_address, 4) \
.astype('uint8').view('>u2')
status_code = (humidity_data >> 14) & 0x03
if status_code == 0:
# Measurement completed successfully.
break
elif status_code > 1:
raise IOError('Error reading from sensor.')
# Measurement data is stale (i.e., measurement still in
# progress). Try again.
time.sleep(.001)
# See URL from docstring for source of equations.
relative_humidity = float(humidity_data & 0x03FFF) / ((1 << 14) - 2)
temperature_celsius = (float((temperature_data >> 2) & 0x3FFF) /
((1 << 14) - 2) * 165 - 40)
return pd.Series([relative_humidity, temperature_celsius],
index=['relative_humidity',
'temperature_celsius'])
def connected(self):
'''
Returns
-------
(bool) : `True` if dropbot-dx instrument is connected.
'''
return (self.dropbot_dx_remote is not None)
def data_dir(self):
app = get_app()
data_dir = app.experiment_log.get_log_path().joinpath(self.name)
if not data_dir.isdir():
data_dir.makedirs_p()
return data_dir
def dstat_summary_frame(self, **kwargs):
'''
Generate DStat signal results summary, normalized against
calibrator signal where applicable.
'''
if self.dstat_experiment_data is None:
return pd.DataFrame(None)
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file')
# Reduce measurements from each DStat acquisition step into a single
# signal value.
df_md_reduced = ea.reduce_microdrop_dstat_data(self
.dstat_experiment_data)
# Subtract respective background signal from each row in DStat results
# summary. See `dropbot_elisa_analysis.subtract_background_signal` for
# more details.
try:
df_adjusted =\
ea.subtract_background_signal(df_md_reduced
.set_index('step_label'))
df_md_reduced.loc[:, 'signal'] = df_adjusted.signal.values
logger.info('Adjusted signals according to background signal '
'(where available).')
except Exception, exception:
logger.info('Could not adjust signals according to background '
'signal.\n%s', exception)
return ea.microdrop_dstat_summary_table(df_md_reduced,
calibrator_csv_path=
calibrator_file, **kwargs)
def get_step_metadata(self):
'''
Returns
-------
(OrderedDict) : Contents of `self.metadata` dictionary, updated
with the additional fields `batch_id`, `step_number`,
`attempt_number`, `temperature_celsius`, `relative_humidity`.
'''
app = get_app()
# Construct dictionary of metadata for extra columns in the `pandas.DataFrame`.
metadata = self.metadata.copy()
cre_device_id = re.compile(r'#(?P<batch_id>[a-fA-F0-9]+)'
r'%(?P<device_id>[a-fA-F0-9]+)$')
device_id = metadata.get('device_id', '')
# If `device_id` is in the form '#<batch-id>%<device-id>', extract batch and
# device identifiers separately.
match = cre_device_id.match(device_id)
if match:
metadata['device_id'] = str(match.group('device_id'))
metadata['batch_id'] = str(match.group('batch_id'))
else:
metadata['device_id'] = None
metadata['batch_id'] = None
metadata['step_number'] = app.protocol.current_step_number + 1
# Number of times the DStat experiment has been run for the current step.
metadata['attempt_number'] = (self.dstat_experiment_count_by_step
[app.protocol.current_step_number])
# Current temperature and humidity.
if self.has_environment_data:
metadata.update(self.get_environment_state())
# Instrument identifier.
metadata['instrument_id'] = self.dropbot_dx_id
if 'sample_id' not in metadata:
sample_labels = [str(v) for k, v in metadata.iteritems()
if str(k).lower().startswith('sample')]
metadata['sample_id'] = ' and '.join(sample_labels)
return metadata
###########################################################################
# # Accessor methods #
def get_step_label(self):
try:
step_label_plugin =\
get_service_instance_by_name('wheelerlab.step_label_plugin')
return step_label_plugin.get_step_options().get('label')
except:
return None
@property
def metadata(self):
'''
Add experiment index and experiment UUID to metadata.
'''
metadata = self._metadata.copy() if self._metadata else {}
app = get_app()
metadata['experiment_id'] = app.experiment_log.experiment_id
metadata['experiment_uuid'] = app.experiment_log.uuid
return metadata
@metadata.setter
def metadata(self, value):
self._metadata = value
###########################################################################
# # Menu callbacks #
def on_edit_configuration(self, widget=None, data=None):
'''
Display a dialog to manually edit the configuration settings.
'''
config = self.dropbot_dx_remote.config
form = dict_to_form(config)
dialog = FormViewDialog(form, 'Edit configuration settings')
valid, response = dialog.run()
if valid:
self.dropbot_dx_remote.update_config(**response)
def on_flash_firmware(self):
board = dx.get_firmwares().keys()[0]
firmware_path = dx.get_firmwares()[board][0]
port = self.dropbot_dx_remote.stream.serial_device.port
# disconnect from DropBot DX so that we can flash it
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
logger.info(upload_firmware(firmware_path, board, port=port))
# reconnect
self.connect()
def on_launch_dstat_interface(self, widget, data=None):
subprocess.Popen([sys.executable, '-m', 'dstat_interface.main'])
def on_set_dstat_params_file(self, widget, data=None):
options = self.get_step_options()
form = Form.of(Filepath.named('dstat_params_file')
.using(default=options.get('dstat_params_file', ''),
optional=True,
properties={'patterns':
[('Dstat parameters file (*.yml)',
('*.yml', ))]}))
dialog = FormViewDialog(form, 'Set DStat parameters file')
valid, response = dialog.run() |
###########################################################################
# # Plugin signal handlers #
def get_schedule_requests(self, function_name):
"""
Returns a list of scheduling requests (i.e., ScheduleRequest
instances) for the function specified by function_name.
"""
if function_name in ['on_plugin_enable']:
return [ScheduleRequest('wheelerlab.dropbot_dx', self.name),
ScheduleRequest('wheelerlab.dmf_control_board_plugin',
self.name)]
elif function_name == 'on_step_run':
return [ScheduleRequest('wheelerlab.dmf_device_ui_plugin',
self.name)]
elif function_name == 'on_experiment_log_changed':
# Ensure that the app's reference to the new experiment log gets
# set.
return [ScheduleRequest('microdrop.app', self.name)]
return []
def on_experiment_log_changed(self, experiment_log):
# Reset number of completed DStat experiments for each step.
self.dstat_experiment_count_by_step = {}
self.dstat_experiment_data = None
app = get_app()
app_values = self.get_app_values()
calibrator_file = app_values.get('calibrator_file', '')
data = {'calibrator_file': calibrator_file}
if hasattr(app, 'experiment_log') and app.experiment_log:
app.experiment_log.metadata[self.name] = data
# copy the calibrator file to the experiment log directory
if calibrator_file:
if not path(calibrator_file).isfile():
logger.error('Calibration file (%s) does not exist.' %
calibrator_file)
else:
try:
output_path = path(app.experiment_log.get_log_path()) / self.name
if not output_path.isdir():
output_path.mkdir()
path(calibrator_file).copy2(output_path / 'calibrator.csv')
except:
logger.error('Could not copy calibration file to the '
'experiment log directory.' , exc_info=True)
def on_metadata_changed(self, schema, original_metadata, metadata):
'''
Notify DStat interface of updates to the experiment metadata.
'''
metadata = metadata.copy()
metadata['metadata_schema'] = json.dumps(schema)
self.metadata = metadata
def on_plugin_enable(self):
self.connect()
if not self.initialized:
app = get_app()
self.tools_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_tools.append(self.tools_menu_item)
self.tools_menu = gtk.Menu()
self.tools_menu.show()
self.tools_menu_item.set_submenu(self.tools_menu)
menu_item = gtk.MenuItem("Launch Dstat interface")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_launch_dstat_interface)
menu_item.show()
menu_item = gtk.MenuItem("Set step Dstat parameters file...")
self.tools_menu.append(menu_item)
menu_item.connect("activate", self.on_set_dstat_params_file)
menu_item.show()
self.edit_config_menu_item = \
gtk.MenuItem("Edit configuration settings...")
self.tools_menu.append(self.edit_config_menu_item)
self.edit_config_menu_item.connect("activate",
self.on_edit_configuration)
self.view_menu_item = gtk.MenuItem("DropBot DX")
app.main_window_controller.menu_view.append(self.view_menu_item)
self.view_menu = gtk.Menu()
self.view_menu.show()
self.view_menu_item.set_submenu(self.view_menu)
menu_item = gtk.MenuItem("View DStat results...")
self.view_menu.append(menu_item)
# Display DStat summary table in dialog.
menu_item.connect("activate", lambda *args:
dataframe_display_dialog
(self.dstat_summary_frame(unit='n'),
message='DStat result summary'))
menu_item.show()
self.initialized = True
self.tools_menu_item.show()
self.view_menu_item.show()
if self.connected():
self.edit_config_menu_item.show()
super(DropBotDxAccessoriesPlugin, self).on_plugin_enable()
def on_plugin_disable(self):
if self.connected():
# delete to free up the serial port
del self.dropbot_dx_remote
self.dropbot_dx_remote = None
self.tools_menu_item.hide()
@is_connected
def on_protocol_run(self):
pass
def on_step_options_swapped(self, plugin, old_step_number, step_number):
"""
Handler called when the step options are changed for a particular
plugin. This will, for example, allow for GUI elements to be
updated based on step specified.
Parameters:
plugin : plugin instance for which the step options changed
step_number : step number that the options changed for
"""
pass
def on_step_run(self):
'''
Handler called whenever a step is executed. Note that this signal is
only emitted in realtime mode or if a protocol is running.
Plugins that handle this signal must emit the `on_step_complete` signal
once they have completed the step. The protocol controller will wait
until all plugins have completed the current step before proceeding.
The `on_step_complete` signal is emitted with following signature:
emit_signal('on_step_complete', [plugin_name, return_value])
where `plugin_name` is the name of the emitting plugin, and
`return_value` can be one of:
- `None`: Step completed successfully.
- `'Repeat'`: Repeat the step.
- `'Fail'`: Unrecoverable error (stop the protocol).
'''
app = get_app()
logger.info('[DropBotDxAccessoriesPlugin] on_step_run(): step #%d',
app.protocol.current_step_number)
options = self.get_step_options()
app_values = self.get_app_values()
if self.connected():
self.dropbot_dx_remote.light_enabled = not options['dstat_enabled']
self.dropbot_dx_remote.magnet_engaged=options['magnet_engaged']
try:
if self.has_environment_data:
env = self.get_environment_state().to_dict()
logger.info('temp=%.1fC, Rel. humidity=%.1f%%' %
(env['temperature_celsius'],
100 * env['relative_humidity']))
app.experiment_log.add_data({"environment state": env},
self.name)
except ValueError:
self.has_environment_data = False
if options['dstat_enabled']:
# D-stat is enabled for step. Request acquisition.
try:
if 'dstat_params_file' in options:
# Load Dstat parameters.
hub_execute('dstat-interface', 'load_params',
params_path=options['dstat_params_file'])
if self.dstat_timeout_id is not None:
# Timer was already set, so cancel previous timer.
gobject.source_remove(self.dstat_timeout_id)
# Delay before D-stat measurement (e.g., to allow webcam
# light to turn off).
dstat_delay_s = app_values.get('dstat_delay_s', 0)
time.sleep(max(0, dstat_delay_s))
step_label = self.get_step_label()
# Send Microdrop step label (if available) to provide name
# for DStat experiment.
metadata = self.metadata.copy()
metadata['name'] = (step_label if step_label else
str(app.protocol.current_step_number +
1))
metadata['patient_id'] = metadata.get('sample_id', 'None')
# Get target path for DStat database directory.
dstat_database_path = (path(app.config['data_dir'])
.realpath().joinpath('dstat-db'))
self.dstat_experiment_id = \
hub_execute('dstat-interface', 'run_active_experiment',
metadata=metadata,
params={'db_path_entry':
str(dstat_database_path),
'db_enable_checkbutton': True})
self._dstat_spinner = itertools.cycle(r'-\|/')
print ''
# Check every 250ms to see if dstat acquisition has
# completed.
self.dstat_timeout_id = \
gobject.timeout_add(250, self.check_dstat_status)
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
# An error occurred while initializing Analyst remote
# control.
emit_signal('on_step_complete', [self.name, 'Fail'])
else:
# D-State is not enabled, so step is complete.
emit_signal('on_step_complete', [self.name, None])
else:
# DropBox-DX device is not connected, but allow protocol to
# continue.
#
# N.B., A warning message is display once at the *start* of the
# protocol if no DropBot-DX connection has been established, but
# *not* on each step.
emit_signal('on_step_complete', [self.name, None])
###########################################################################
# # Periodic callbacks #
def check_dstat_status(self):
'''
1. Check to see if acquisition is finished.
2. If (1), emit `on_step_complete` signal.
'''
try:
completed_timestamp = hub_execute('dstat-interface',
'acquisition_complete',
experiment_id=
self.dstat_experiment_id,
timeout_s=5.)
if completed_timestamp is not None:
# ## Acquisition is complete ##
app = get_app()
# Increment the number of completed DStat experiments for
# current step.
step_i = app.protocol.current_step_number
count_i = 1 + self.dstat_experiment_count_by_step.get(step_i,
0)
self.dstat_experiment_count_by_step[step_i] = count_i
# ### Save results data and plot ###
output_directory = (path(app.experiment_log.get_log_path())
.abspath())
output_namebase = str(app.protocol.current_step_number)
step_label = self.get_step_label()
if step_label is not None:
# Replace characters that are not allowed in a filename
# with underscore.
output_namebase = re.sub(r'[:/\\\?{}]', '_', step_label)
# Save results to a text file in the experiment log directory.
output_txt_path = get_unique_path(output_directory
.joinpath(output_namebase +
'.txt'))
logger.info('Save results to: %s', output_txt_path)
dstat_params = hub_execute('dstat-interface', 'get_params')
hub_execute('dstat-interface', 'save_text',
save_data_path=output_txt_path)
data_i = hub_execute('dstat-interface', 'get_experiment_data',
experiment_id=self.dstat_experiment_id)
metadata_i = self.get_step_metadata()
# Compute (approximate) `utc_timestamp` for each DStat
# measurement.
max_time_s = data_i.time_s.max()
metadata_i['utc_timestamp'] = (completed_timestamp -
data_i.time_s
.map(lambda t:
timedelta(seconds=
max_time_s - t)))
# Step label from step label plugin.
metadata_i['step_label'] = step_label
# Compute UTC start time from local experiment start time.
metadata_i['experiment_start'] = \
(dt.datetime.fromtimestamp(app.experiment_log.start_time())
+ (dt.datetime.utcnow() - dt.datetime.now()))
# Compute UTC start time from local experiment start time.
metadata_i['experiment_length_min'] = \
(completed_timestamp -
metadata_i['experiment_start']).total_seconds() / 60.
# Record synchronous detection parameters from DStat (if set).
if dstat_params['sync_true']:
metadata_i['target_hz'] = float(dstat_params['sync_freq'])
else:
metadata_i['target_hz'] = None
metadata_i['sample_frequency_hz'] = float(dstat_params['adc_rate_hz'])
# Cast metadata `unicode` fields as `str` to enable HDF export.
for k, v in metadata_i.iteritems():
if isinstance(v, types.StringTypes):
metadata_i[k] = str(v)
data_md_i = data_i.copy()
for i, (k, v) in enumerate(metadata_i.iteritems()):
try:
data_md_i.insert(i, k, v)
except Exception, e:
logger.info('Skipping metadata field %s: %s.\n%s', k,
v, e)
# Set order for known columns. Unknown columns are ordered
# last, alphabetically.
column_order = ['instrument_id', 'experiment_id',
'experiment_uuid', 'experiment_start',
'experiment_length_min', 'utc_timestamp',
'device_id', 'batch_id', 'sample_id',
'step_label', 'step_number', 'attempt_number',
'temperature_celsius', 'relative_humidity',
'target_hz', 'sample_frequency_hz', 'time_s',
'current_amps']
column_index = dict([(k, i) for i, k in
enumerate(column_order)])
ordered_columns = sorted(data_md_i.columns, key=lambda k:
(column_index
.get(k, len(column_order)), k))
data_md_i = data_md_i[ordered_columns]
namebase_i = ('e[{}]-d[{}]-s[{}]'
.format(metadata_i['experiment_uuid'][:8],
metadata_i.get('device_id'),
metadata_i.get('sample_id')))
if self.dstat_experiment_data is None:
self.dstat_experiment_data = data_md_i
else:
combined = pd.concat([self.dstat_experiment_data,
data_md_i])
self.dstat_experiment_data = combined.reset_index(drop=
True)
# Append DStat experiment data to CSV file.
csv_output_path = self.data_dir().joinpath(namebase_i + '.csv')
# Only include header if the file does not exist or is empty.
include_header = not (csv_output_path.isfile() and
(csv_output_path.size > 0))
with csv_output_path.open('a') as output:
data_md_i.to_csv(output, index=False,
header=include_header)
df_dstat_summary = self.dstat_summary_frame(numeric=True)
# Write DStat summary table to CSV file.
csv_summary_path = self.data_dir().joinpath('dstat-summary'
'.csv')
with csv_summary_path.open('w') as output:
df_dstat_summary.to_csv(output)
# Turn light back on after photomultiplier tube (PMT)
# measurement.
self.dropbot_dx_remote.light_enabled = True
# notify step complete.
emit_signal('on_step_complete', [self.name, None])
self.dstat_timeout_id = None
return False
else:
print '\rWaiting for Dstat...', self._dstat_spinner.next(),
except:
print "Exception in user code:"
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
emit_signal('on_step_complete', [self.name, 'Fail'])
self.dstat_timeout_id = None
return False
return True
PluginGlobals.pop_env() |
if valid:
options['dstat_params_file'] = response['dstat_params_file']
self.set_step_values(options) | random_line_split |
Script_graph_data.py | from os import chdir, listdir, mkdir, remove
from urllib.request import urlretrieve
from tkinter import font as tkFont
from datetime import datetime, timedelta, date
import pathlib, time, tkinter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.animation as animation
from matplotlib import ticker, rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
Root_directory = str(pathlib.Path(__file__).parent.absolute()) # Strings with path of different directories
Datafiles_directory = Root_directory + '\\Datafiles'
Animation_directory = Root_directory + '\\Animations'
for Directory in [Datafiles_directory, Animation_directory]: # Create necessary directories if they don't exist
try:
mkdir(Directory)
except:
pass
Title_fontsize = 25 # Size in pixels of different elements in the graph
Date_fontsize = 25
Annotation_fontsize = 15
Axis_label_fontsize = 20
Axis_tick_fontsize = 15
Marker_ray = 8 # Ray in pixels of the markers in the scater graph
Annotation_offset = - Annotation_fontsize * 0.05 # Correction of the spacing between points in the graph and their annotations
Title_pad = Title_fontsize * 0.5 # Spacing betwenn title and plotting area
Axis_label_pad = Axis_label_fontsize * 0.5 # Spacing betwenn axis label and plotting area
Animation_interval = 200 # Interval in milliseconds between each frame in animation
Animation_fps = int(1/Animation_interval * 1e3) # Frames per second of the animation
Graph_font = rcParams['font.family'] # Tkinter font object that will be used to calculate the area occupied by each annotation in Annotations_frame()
tkinter.Frame().destroy()
font = tkFont.Font(family = Graph_font, size = Annotation_fontsize)
def Main_script(X_axis_inc = 1, Y_axis_inc = 7, Z_axis_inc = 12, Date_start = None, Date_end = None):
"""
Main routine to execute to download, extract, reconstruct and plot COVID data
Parameters:
- X_axis_inc: Integer, data to use for the X axis. Default is 1 (Total cases per million)
- Y_axis_inc: Integer, data to use for the Y axis. Default is 7 (Total deaths per million)
- Z_axis_inc: Integer, data to use for the colors of the points. Default is 12 (Positivity rate)
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
Date are None by default to use every available date in the data (see first lines of this fuction)
Returns:
ani: Matplotlib animation object. If it's not returned, the animation isn't displayed
Axis incs:
0 - Total cases
1 - Total cases per million
2 - New cases
3 - New cases smoothed
4 - New cases per million
5 - New cases per million smoothed
6 - Total deaths
7 - Total deaths per million
8 - New deaths
9 - New deaths smoothed
10 - New deaths per million
11 - New deaths per million smoothed
12 - Positivity rate
13 - Testing policy
"""
Timer_start = time.perf_counter()
print('Collecting data from Our World in Data')
COVID_data, Date_start_raw_data, Date_end_raw_data = Extract_data() # Download and extract raw COVID data
if Date_start == None: # If no start date is specified, it is set to the first date in the data. The end date is then set to the last date in the data. That way, we can display the whole data without having to know when it starts and ends
Date_start = Date_start_raw_data
Date_end = Date_end_raw_data
elif Date_end == None: Date_end = Date_start # But if no end date is specified, only the start date is displayed
print('Recontructing missing chunks in the data by linear interplolation')
COVID_data_reconstructed = Reconstruct_COVID_data(COVID_data) # Reconstruct the missing data
print('Exporting data in files')
Export_in_files(COVID_data, COVID_data_reconstructed) # Export the original and reconstructed data in CSV files, just to have them and be able to look whenever we want
print('Isolating data to plot')
COVID_data_scatter = Extract_data_for_plotting(COVID_data_reconstructed, X_axis_inc, Y_axis_inc, Z_axis_inc, Date_start, Date_end) # Filter data to only keep the axes we want to plot
print('Plotting data')
ani, COVID_data_scatter_names = Scatter_graph(COVID_data_scatter) # Plot the data
print('Exporting animation as video')
Writer = animation.writers['ffmpeg'] # Export the file
writer = Writer(fps = Animation_fps, metadata=dict(artist='Me'), bitrate=1800)
Annimation_file = Animation_directory + '\\%s vs %s with %s from %s to %s.mp4' % (tuple(COVID_data_scatter_names) + (Date_start, Date_end))
ani.save(Annimation_file, writer = writer)
print('\nProcessing done in %0.2f minutes' % ((time.perf_counter() - Timer_start) / 60))
return ani
def Extract_data():
"""
Extracts and formats data in dictionnaries from Our World in Data CSV files
Parameters: Nothing
Returns:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Population_data: Dictionnary of population for each country
"""
chdir(Datafiles_directory) # Empty the datafiles directory
File_list = listdir()
for File in File_list:
remove(File)
COVID_data_path = Datafiles_directory + '\\OWID COVID data %s.csv' % (date.today().isoformat()) # String with path of COVID data (where it will be stored when downloaded)
urlretrieve('https://raw.githubusercontent.com/owid/COVID-19-data/master/public/data/owid-covid-data.csv', COVID_data_path) # Download and extract the data
COVID_data_file = open(COVID_data_path, 'r')
COVID_raw_data = COVID_data_file.readlines()
COVID_raw_data = [Row.split(',') for Row in COVID_raw_data[1:]]
COVID_data = {'_Country': {'Date': ['Total cases', 'Total cases per million', 'New cases', 'New cases smoothed', 'New cases per million', 'New cases per million smoothed', 'Total deaths', 'Total deaths per million', 'New deaths', 'New deaths smoothed', 'New deaths per million', 'New deaths per million smoothed', 'Positivity rate', 'Testing policy']}}
Date_list = []
for Row_inc in range(len(COVID_raw_data)): # For each row in the file...
Country = COVID_raw_data[Row_inc][2]
Date = COVID_raw_data[Row_inc][3]
if COVID_raw_data[Row_inc][2] not in COVID_data: COVID_data[Country] = {} # If a new country is encountered, a new entry to the dictionnary COVID_data is added
if Date not in Date_list: Date_list.append(Date) # If a new date is encoutered, it is added to the corresponding list
COVID_data[Country][Date] = []
for Column_inc in [4, 10, 5, 6, 11, 12, 7, 13, 8, 9, 14, 15, 23, 24]: # For each column we want to extract...
Data_item = COVID_raw_data[Row_inc][Column_inc]
if Column_inc != 24: # Column_inc of 24 is the testing policy and is a string so can't appended as a float, prompting this exception
if Data_item == '': COVID_data[Country][Date].append(None) # If there's nothing, a None element is added
else: COVID_data[Country][Date].append(float(COVID_raw_data[Row_inc][Column_inc]))
else: COVID_data[Country][Date].append(COVID_raw_data[Row_inc][Column_inc])
if COVID_raw_data[Row_inc][2] == 'International' or COVID_raw_data[Row_inc][2] == 'World': # The entries "World" and "International" aren't interesting so they are ignored
break
COVID_data_file.close()
Date_start_raw_data, Date_end_raw_data = min(Date_list), max(Date_list)
return COVID_data, Date_start_raw_data, Date_end_raw_data
def Reconstruct_COVID_data(COVID_data):
"""
Reconstructs missing chunks of data by linear interpolation
Parameters:
COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world
Returns:
COVID_data_reconstructed: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
"""
COVID_data_reconstructed = {}
COVID_data_reconstructed['_Country'] = COVID_data['_Country']
Countries_list = list(COVID_data.keys())[1:]
for Country in Countries_list: # For each country...
COVID_data_single_country = list(COVID_data[Country].values()) # Extract the matrix containing the data and transpose it. That way, each element of a single list in the array corresponds to one column (see help of Main_script) and it makes it easier to navigate through each column and recontruct the missing elements
T_COVID_data_single_country = list(map(list, zip(*COVID_data_single_country)))
for Column_inc in range(len(T_COVID_data_single_country)): # For each column...
Column = T_COVID_data_single_country[Column_inc]
Max_column_inc = len(Column) - 1
Row_inc = 0
while Column[Row_inc] == None and Row_inc < Max_column_inc: # Recontructing missing data at the beginning is impossible so we just skip the first rows with a None in them
Row_inc += 1
if None in Column: # If a None is in the list (meaning there are bits of data missing)...
while Row_inc < Max_column_inc: # Not including this line could prompt an index error
if Column[Row_inc] == None: # When a None in encoutered...
None_interval_start = Row_inc # Recording when the segments of None starts and ends
while Column[Row_inc] == None and Row_inc < Max_column_inc:
Row_inc += 1
None_interval_end = Row_inc - 1
Interpolation_interval_length = None_interval_end - None_interval_start + 2
if Row_inc < Max_column_inc: # Reconstruction of the segment by linear interpolation : Y = mX + b with m = (Y_max - Y_min) / (X_max - X_min)
m = (Column[None_interval_end + 1] - Column[None_interval_start - 1]) / Interpolation_interval_length
b = Column[None_interval_start - 1]
for Row_inc in range(None_interval_start, Row_inc):
T_COVID_data_single_country[Column_inc][Row_inc] = m * (Row_inc - None_interval_start + 1) + b
else: # In the case the None segment goes on until the end, the last known value is just copied
for Row_inc in range(None_interval_start, Row_inc + 1):
T_COVID_data_single_country[Column_inc][Row_inc] = T_COVID_data_single_country[Column_inc][None_interval_start - 1]
Row_inc += 1
COVID_data_single_country_reconstructed = list(map(list, zip(*T_COVID_data_single_country))) # Retranspose the matrix to get the reconstruted data in the correct format
Date_list_country = list(COVID_data[Country].keys()) # Add the reconstructed data to the appropriate dictionnary
COVID_data_reconstructed[Country] = {}
for Date_inc in range(len(Date_list_country)):
Date = Date_list_country[Date_inc]
COVID_data_reconstructed[Country][Date] = COVID_data_single_country_reconstructed[Date_inc]
return COVID_data_reconstructed
def Export_in_files(COVID_data, COVID_data_reconstructed):
"""
Exports the raw and reconstructed data in seperate files
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Covid_data_reconstructued: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
Returns: Nothing
"""
F_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')
FR_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')
COVID_data_lists = [COVID_data, COVID_data_reconstructed]
Data_file_list = [F_data_file, FR_data_file]
Countries_list = list(COVID_data.keys())[1:]
for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file
COVID_data_temp = COVID_data_lists[Data_set_inc]
Data_file_temp = Data_file_list[Data_set_inc]
Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\n')
for Country in Countries_list:
COVID_data_single_country = COVID_data_temp[Country]
Date_list = list(COVID_data[Country].keys())
for Date in Date_list:
COVID_data_single_country_single_date = COVID_data_single_country[Date]
Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None
Data_file_temp.write('%s;%s;' % (Country, Date))
Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))
Data_file_temp.write('\n')
Data_file_temp.close()
def Extract_data_for_plotting(COVID_data, X_Axis_inc, Y_Axis_inc, Z_Axis_inc, Date_start, Date_end, Keep_no_PR = True):
"""
Extract data from recontructed COVID data in order to only keep data that will be plotted
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world (usually reconstructed)
- X_axis_inc: Integer, data to use for the X axis
- Y_axis_inc: Integer, data to use for the Y axis
- Z_axis_inc: Integer, data to use for the colors of the points
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
- Keep_no_PR: Boolean indicating whether or not countries without a positivity rate have to be kept. Default is True
Returns:
COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
"""
Date_start_obj = datetime.strptime(Date_start, '%Y-%m-%d') # Create a list of all the dates to extract
Date_end_obj = datetime.strptime(Date_end, '%Y-%m-%d')
Date_difference = (Date_end_obj - Date_start_obj).days + 1
Date_list = [(Date_start_obj + timedelta(Days)).isoformat()[:10] for Days in range(Date_difference)]
Countries_list = list(COVID_data.keys())[1:]
COVID_data_scatter = {'0Date': {'Country': [COVID_data['_Country']['Date'][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]]}}
for Date in Date_list: # For each date and each country...
COVID_data_scatter[Date] = {}
for Country in Countries_list:
try:
Data_items = [COVID_data[Country][Date][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]] # This line will prompt an error in case the data doesn't exist, hence the try - except structure (much easier than 10 000 conditions to try to figure out if the data exists for a date and country)
if None not in Data_items[:2] and not (not Keep_no_PR and Data_items[2] == None): # Any data point that has a None as its X or Y coordinate is exlcuded, and also Z if asked by the user
if min(Data_items[:2]) > 0: COVID_data_scatter[Date][Country] = Data_items # Since the graph is in logscale, points with 0 as their X or Y coordinate are excluded (because log(0) doesn't exist).
# This double verification can't be done in one line because having None in a list you're trying to find the minimum of prompts an error
except: pass
if COVID_data_scatter[Date] == {}: COVID_data_scatter.pop(Date)
return COVID_data_scatter
def Annotations_frame(Points_to_display, Countries_displayed, Frame_limits):
"""
Tells which countries to annotate and which not to. Since the lists in parameters are sorted by descending order of positivity rate, the countries with higher positivity rates will be examined first and thus annotatd with more priority
Parameters:
- Points_to_display: List of X and Y coordinates of each point displayed on the graph
- Countries_displayed: List of countries displayed on the graph
- Frame_limits: Tuple, limits of the plotting area (X_min, X_max, Y_min, Y_max)
Returns:
- Countries_to_annotate: List of countries to annotate
- Annotations_mask: Numpy array of bools, outline of the annotations. This variable is only used in this function to decide which countries to annotate and which not to but I had so many problems in finding the correct formulas that just in case, I wanted to be able to display it easilly in Scatter_graph() even after solving all problems
"""
X_list_frame, Y_list_frame = zip(*Points_to_display) # Transform tuples of (X, Y) into 2 distinct lists of X and Y coordinates
Frame_limits_log = list(map(np.log10, Frame_limits))
X_min_log, X_max_log, Y_min_log, Y_max_log = Frame_limits_log
fig = plt.gcf()
ax = plt.gca()
ax_bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) # Get size in pixels of the plotting area
ax_width, ax_height = ax_bbox.width, ax_bbox.height
ax_width *= fig.dpi
ax_height *= fig.dpi
Annotations_mask = np.zeros((int(ax_width), int(ax_height)), bool) # Array of bools same size as plotting area where outlines of annotations will be stored
Countries_to_annotate = {}
for Country_inc in range(len(Countries_displayed)): # For each country...
Country = Countries_displayed[Country_inc]
Annotation_width_enlargment = 1.3 # Slight corrections to make the annotation outlines fit as best as possible. Found by trial and error
Annotation_height_enlargment = 1.6
Label_size = 0.5 * np.array([font.measure(Country)*Annotation_width_enlargment, Annotation_fontsize * Annotation_height_enlargment]) # Everything is divided by 2 because the origin of the anotation outline is in its center
Offset = [0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.7 + Annotation_offset] # Distance between point and annotation. Annotation_fontsize is in points so it has to be converted to pixels (1 inch = 72 points = screen dpi). 0.56 is just a correction found by trial and error
Country = Countries_displayed[Country_inc]
Country_coords = Points_to_display[Country_inc]
List_slice = [] # Get indices delimiting the outline of the annotation in the plotting area
for Axis_inc in range(2):
Min_log, Max_log = Frame_limits_log[Axis_inc*2 : Axis_inc*2 + 2] # Simple transformation: Y = (Y_max - Y_min) / (X_max - X_min) * (X - X_min) + Y_min
Coodrs_transformation = lambda x: (Annotations_mask.shape[Axis_inc] - 1)/(Max_log - Min_log) * (np.log10(x) - Min_log)
for Label_offset_sign in range(-1, 2, 2):
List_slice.append(sum([Coodrs_transformation(Country_coords[Axis_inc]), Offset[Axis_inc], Label_offset_sign * Label_size[Axis_inc]]))
Slice_X_min, Slice_X_max, Slice_Y_min, Slice_Y_max = map(int, List_slice)
Annotation_slice = np.s_[Slice_X_min : Slice_X_max + 1, Slice_Y_min : Slice_Y_max + 1]
if not np.any(Annotations_mask[Annotation_slice]): # If there isn't a True in the current annotation outline (meaing there already is another annotation displayed)...
|
return Countries_to_annotate, Annotations_mask
def Scatter_graph(COVID_data_scatter, Display_annotations_mask = False):
"""
Plots data entered in parameters
Parameters:
- COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
- Display_annotations_mask: Boolean indicating whether to display the outline of annotations created by Annotations_frame() or not
Returns:
- ani: Animation object created by matplotlib
- COVID_data_scatter_names: List of names of the columns plotted
"""
COVID_data_scatter_names = COVID_data_scatter.pop('0Date')['Country'] # Extract names of columns plotted
X_axis, Y_axis, Z_axis = [], [], [] # Separate the axes in COVID_data_scatter in order to find the minimum and maximum along each axis
for Date_item in COVID_data_scatter.values():
for Country_item in Date_item.values():
for Axis_inc in range(3):
[X_axis, Y_axis, Z_axis][Axis_inc].append(Country_item[Axis_inc])
Min_list, Max_list = [], [] # Limits of the plotting area
Graph_window_margin = 2 # Since the graph is in log scale, the plotting area can't be extended using New max = Factor * (Max - Min) so I just went with multiplying the maximum and dividing the minimum by a factor of 2
for Axis_inc in range(2):
Min_list.append(min([X_axis, Y_axis][Axis_inc]) / Graph_window_margin)
Max_list.append(max([X_axis, Y_axis][Axis_inc]) * Graph_window_margin)
cmap = cm.jet # Colormap for the 3rd axis
cmap = colors.LinearSegmentedColormap.from_list('jet_truncated', cmap(np.linspace(0.2, 0.95, 100)))
Z_axis_cleaned = list(filter(lambda Item: Item != None, Z_axis)) # Positivity rate to color converter
norm = colors.Normalize(vmin = 0, vmax = max(Z_axis_cleaned), clip = True)
mapper = cm.ScalarMappable(norm = norm, cmap = cmap)
plt.close() # Initialise plotting area. A simple "plt.clf()" doesn't work to erase everything and prompts glitches after the 2nd execution of the code, forcing us to close the figure and reopen it
fig = plt.figure("Scatter graph of COVID data")
fig.set_size_inches(tuple(1/fig.dpi * np.array([1920, 1080])))
ax = fig.gca()
manager = plt.get_current_fig_manager() # Adapt the matplotlib window to the screen
manager.window.showMaximized()
Data_frames = zip(COVID_data_scatter.keys(), COVID_data_scatter.values()) # Transform the first level of dictionnary into a list because we need to have access to the keys of that first level during the creation of the animation frames
Animation_frames = [] # List where all the matplotlib objects for the animation will be stored
for Frame in Data_frames:
Date = Frame[0]
Points_to_display, Positivity_rate_list, Points_colors = [], [], []
Countries_displayed = list(Frame[1].keys())
for Country in Countries_displayed: # For each country...
Country_coords = Frame[1][Country][:2]
Positivity_rate = Frame[1][Country][2]
Points_to_display.append(Country_coords)
if Positivity_rate != None: # If there is a positivity rate for that country, it is plotted with the color it corresponds to on the colormap
Positivity_rate_list.append(Positivity_rate)
Points_colors.append(mapper.to_rgba(Positivity_rate))
else: # Otherwise, it appears in #ABB7B7 gray and a "-1" is appended to the list of positivity rates. That way, these points will be in last after the sorting in descending order in a few lines
Positivity_rate_list.append(-1)
Points_colors.append((0.6627, 0.6627, 0.6627, 1))
All_points_info = list(zip(Countries_displayed, Points_to_display, Positivity_rate_list, Points_colors)) # Group everything, sort the points based on the positivity rate and then seperate everything to get the same objects as before but sorted
All_points_info.sort(key = lambda x: x[2])
All_points_info = list(zip(*All_points_info))
Countries_displayed = list(All_points_info[0])
Points_to_display = list(All_points_info[1])
Positivity_rate_list = list(All_points_info[2])
Points_colors = list(All_points_info[3])
X_list_frame, Y_list_frame = zip(*Points_to_display) # Separate X and Y axes and plot the points
scatter = ax.scatter(X_list_frame, Y_list_frame, c = Points_colors, s = np.pi * (Marker_ray*72/fig.dpi)**2, linewidth = 0.5, edgecolors = 'black') # Marker ray is the radius of the circle in pixels but s is the area of the circle in points. We have to convert the pixels in points (1 inch = 72 points = Screen dpi) then apply area = pi * radius²
# Note: ax.scatter plots the points one by one so the last elements of the lists will be above the firsts. Since the X and Y axes are sorted in ascending order of positivity rate, the last points (high positivity rates) will be on top. This is on purpose because these are the most interesting ones
Text_date = ax.text(0.02, 0.97, Date, transform = ax.transAxes, fontsize = Date_fontsize, verticalalignment = 'top', horizontalalignment = 'left', bbox = dict(boxstyle = 'round', facecolor = 'white', alpha = 0.9, pad = 0.3)) # Display the date
fig.tight_layout() # Annotations_frame() requires the use of lines regarding the size of the plotting area. For them to work properly, we have to virtually draw the elements, which is why we use fig.tight_layout() in the middle of the creation of the animation frames
Countries_to_annotate, Annotations_mask = Annotations_frame(Points_to_display[::-1], Countries_displayed[::-1], (Min_list[0], Max_list[0], Min_list[1], Max_list[1])) # Decide which countries to annotate and which not to
Annotation_list = []
for Country, Country_coords in zip(Countries_to_annotate.keys(), Countries_to_annotate.values()): # Annotate countries
Annotation_list.append(ax.annotate(Country, Country_coords, textcoords = 'offset pixels', xytext=(0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.5 + Annotation_offset), ha='center', va='center', fontsize = Annotation_fontsize))
if Display_annotations_mask: # If something goes wrong during an edit, the user can still display the annotations outline
ax_tw_x = ax.twinx() # Duplicate axis. Compulsory because the graph is in logscale and an image cannot be properly displayed in logscale
ax2 = ax_tw_x.twiny()
mapper_mask = cm.ScalarMappable(norm = colors.Normalize(vmin = 0, vmax = 1, clip = True), cmap = cm.gray) # Convert array of bools into array of colors then display the image
Annotations_mask_im = mapper_mask.to_rgba(np.rot90(np.invert(Annotations_mask) + np.zeros(Annotations_mask.shape)), alpha = 0.3)
Annotations_mask_ax = ax2.imshow(Annotations_mask_im, extent = [Min_list[0], Max_list[0], Min_list[1], Max_list[1]], aspect = 'auto')
ax_tw_x.axis('off') # Not display axes of the image
ax2.axis('off')
Animation_frames.append([scatter, Text_date, Annotations_mask_ax] + Annotation_list)
else: Animation_frames.append([scatter, Text_date] + Annotation_list)
ax.set_title("COVID-19 pandemic - %s vs. %s" % tuple(COVID_data_scatter_names[:2][::-1]), fontsize = Title_fontsize, pad = Title_pad)
ax.set_xlabel(COVID_data_scatter_names[0], fontsize = Axis_label_fontsize)
ax.set_ylabel(COVID_data_scatter_names[1], fontsize = Axis_label_fontsize)
ax.set_xlim(Min_list[0], Max_list[0])
ax.set_ylim(Min_list[1], Max_list[1])
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(linestyle = '--', linewidth = 1.5, which = 'major')
ax.grid(linestyle = '--', linewidth = 0.5, which = 'minor')
ax.set_axisbelow(True)
ax.tick_params(axis='x', labelsize = Axis_tick_fontsize)
ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
Formatter_list = [] # Display axes graduations as multiples of 10 (rather than 10^n) and find how many decimal places to display
for Axis_inc in range(2):
if Min_list[Axis_inc] < 1: Min_axis_log = int(np.abs(np.floor(np.log10(Min_list[Axis_inc])))) - 1
else: Min_axis_log = 0
Formatter_list.append('%.' + str(Min_axis_log) + 'f')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[0]))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[1]))
Divider = make_axes_locatable(ax) # Display 3rd axis (colors). Using make_axes_locatable() allows for better tight_layout results
cax = Divider.append_axes('right', size = '2%', pad = 0.3)
cbar = fig.colorbar(mapper, cax = cax)
cbar.ax.set_ylabel(COVID_data_scatter_names[2], fontsize = Axis_label_fontsize, labelpad=Axis_label_pad)
cbar.ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
cbar.ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax = 1, decimals = 0)) # Set axis graduations as percentage with no decimal places
ani = animation.ArtistAnimation(fig, Animation_frames, blit = True, interval = Animation_interval)
fig.tight_layout()
fig.show()
return ani, COVID_data_scatter_names
| Countries_to_annotate[Country] = Points_to_display[Country_inc] # The country has to be annotated
Annotations_mask[Annotation_slice] = True # All the elements in Annotations_mask in this area are set to True to signify there is now an annotation displayed there
| conditional_block |
Script_graph_data.py | from os import chdir, listdir, mkdir, remove
from urllib.request import urlretrieve
from tkinter import font as tkFont
from datetime import datetime, timedelta, date
import pathlib, time, tkinter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.animation as animation
from matplotlib import ticker, rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
Root_directory = str(pathlib.Path(__file__).parent.absolute()) # Strings with path of different directories
Datafiles_directory = Root_directory + '\\Datafiles'
Animation_directory = Root_directory + '\\Animations'
for Directory in [Datafiles_directory, Animation_directory]: # Create necessary directories if they don't exist
try:
mkdir(Directory)
except:
pass
Title_fontsize = 25 # Size in pixels of different elements in the graph
Date_fontsize = 25
Annotation_fontsize = 15
Axis_label_fontsize = 20
Axis_tick_fontsize = 15
Marker_ray = 8 # Ray in pixels of the markers in the scater graph
Annotation_offset = - Annotation_fontsize * 0.05 # Correction of the spacing between points in the graph and their annotations
Title_pad = Title_fontsize * 0.5 # Spacing betwenn title and plotting area
Axis_label_pad = Axis_label_fontsize * 0.5 # Spacing betwenn axis label and plotting area
Animation_interval = 200 # Interval in milliseconds between each frame in animation
Animation_fps = int(1/Animation_interval * 1e3) # Frames per second of the animation
Graph_font = rcParams['font.family'] # Tkinter font object that will be used to calculate the area occupied by each annotation in Annotations_frame()
tkinter.Frame().destroy()
font = tkFont.Font(family = Graph_font, size = Annotation_fontsize)
def Main_script(X_axis_inc = 1, Y_axis_inc = 7, Z_axis_inc = 12, Date_start = None, Date_end = None):
"""
Main routine to execute to download, extract, reconstruct and plot COVID data
Parameters:
- X_axis_inc: Integer, data to use for the X axis. Default is 1 (Total cases per million)
- Y_axis_inc: Integer, data to use for the Y axis. Default is 7 (Total deaths per million)
- Z_axis_inc: Integer, data to use for the colors of the points. Default is 12 (Positivity rate)
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
Date are None by default to use every available date in the data (see first lines of this fuction)
Returns:
ani: Matplotlib animation object. If it's not returned, the animation isn't displayed
Axis incs:
0 - Total cases
1 - Total cases per million
2 - New cases
3 - New cases smoothed
4 - New cases per million
5 - New cases per million smoothed
6 - Total deaths
7 - Total deaths per million
8 - New deaths
9 - New deaths smoothed
10 - New deaths per million
11 - New deaths per million smoothed
12 - Positivity rate
13 - Testing policy
"""
Timer_start = time.perf_counter()
print('Collecting data from Our World in Data')
COVID_data, Date_start_raw_data, Date_end_raw_data = Extract_data() # Download and extract raw COVID data
if Date_start == None: # If no start date is specified, it is set to the first date in the data. The end date is then set to the last date in the data. That way, we can display the whole data without having to know when it starts and ends
Date_start = Date_start_raw_data
Date_end = Date_end_raw_data
elif Date_end == None: Date_end = Date_start # But if no end date is specified, only the start date is displayed
print('Recontructing missing chunks in the data by linear interplolation')
COVID_data_reconstructed = Reconstruct_COVID_data(COVID_data) # Reconstruct the missing data
print('Exporting data in files')
Export_in_files(COVID_data, COVID_data_reconstructed) # Export the original and reconstructed data in CSV files, just to have them and be able to look whenever we want
print('Isolating data to plot')
COVID_data_scatter = Extract_data_for_plotting(COVID_data_reconstructed, X_axis_inc, Y_axis_inc, Z_axis_inc, Date_start, Date_end) # Filter data to only keep the axes we want to plot
print('Plotting data')
ani, COVID_data_scatter_names = Scatter_graph(COVID_data_scatter) # Plot the data
print('Exporting animation as video')
Writer = animation.writers['ffmpeg'] # Export the file
writer = Writer(fps = Animation_fps, metadata=dict(artist='Me'), bitrate=1800)
Annimation_file = Animation_directory + '\\%s vs %s with %s from %s to %s.mp4' % (tuple(COVID_data_scatter_names) + (Date_start, Date_end))
ani.save(Annimation_file, writer = writer)
print('\nProcessing done in %0.2f minutes' % ((time.perf_counter() - Timer_start) / 60))
return ani
def Extract_data():
"""
Extracts and formats data in dictionnaries from Our World in Data CSV files
Parameters: Nothing
Returns:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Population_data: Dictionnary of population for each country
"""
chdir(Datafiles_directory) # Empty the datafiles directory
File_list = listdir()
for File in File_list:
remove(File)
COVID_data_path = Datafiles_directory + '\\OWID COVID data %s.csv' % (date.today().isoformat()) # String with path of COVID data (where it will be stored when downloaded)
urlretrieve('https://raw.githubusercontent.com/owid/COVID-19-data/master/public/data/owid-covid-data.csv', COVID_data_path) # Download and extract the data
COVID_data_file = open(COVID_data_path, 'r')
COVID_raw_data = COVID_data_file.readlines()
COVID_raw_data = [Row.split(',') for Row in COVID_raw_data[1:]]
COVID_data = {'_Country': {'Date': ['Total cases', 'Total cases per million', 'New cases', 'New cases smoothed', 'New cases per million', 'New cases per million smoothed', 'Total deaths', 'Total deaths per million', 'New deaths', 'New deaths smoothed', 'New deaths per million', 'New deaths per million smoothed', 'Positivity rate', 'Testing policy']}}
Date_list = []
for Row_inc in range(len(COVID_raw_data)): # For each row in the file...
Country = COVID_raw_data[Row_inc][2]
Date = COVID_raw_data[Row_inc][3]
if COVID_raw_data[Row_inc][2] not in COVID_data: COVID_data[Country] = {} # If a new country is encountered, a new entry to the dictionnary COVID_data is added
if Date not in Date_list: Date_list.append(Date) # If a new date is encoutered, it is added to the corresponding list
COVID_data[Country][Date] = []
for Column_inc in [4, 10, 5, 6, 11, 12, 7, 13, 8, 9, 14, 15, 23, 24]: # For each column we want to extract...
Data_item = COVID_raw_data[Row_inc][Column_inc]
if Column_inc != 24: # Column_inc of 24 is the testing policy and is a string so can't appended as a float, prompting this exception
if Data_item == '': COVID_data[Country][Date].append(None) # If there's nothing, a None element is added
else: COVID_data[Country][Date].append(float(COVID_raw_data[Row_inc][Column_inc]))
else: COVID_data[Country][Date].append(COVID_raw_data[Row_inc][Column_inc])
if COVID_raw_data[Row_inc][2] == 'International' or COVID_raw_data[Row_inc][2] == 'World': # The entries "World" and "International" aren't interesting so they are ignored
break
COVID_data_file.close()
Date_start_raw_data, Date_end_raw_data = min(Date_list), max(Date_list)
return COVID_data, Date_start_raw_data, Date_end_raw_data
def Reconstruct_COVID_data(COVID_data):
"""
Reconstructs missing chunks of data by linear interpolation
Parameters:
COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world
Returns:
COVID_data_reconstructed: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
"""
COVID_data_reconstructed = {}
COVID_data_reconstructed['_Country'] = COVID_data['_Country']
Countries_list = list(COVID_data.keys())[1:]
for Country in Countries_list: # For each country...
COVID_data_single_country = list(COVID_data[Country].values()) # Extract the matrix containing the data and transpose it. That way, each element of a single list in the array corresponds to one column (see help of Main_script) and it makes it easier to navigate through each column and recontruct the missing elements
T_COVID_data_single_country = list(map(list, zip(*COVID_data_single_country)))
for Column_inc in range(len(T_COVID_data_single_country)): # For each column...
Column = T_COVID_data_single_country[Column_inc]
Max_column_inc = len(Column) - 1
Row_inc = 0
while Column[Row_inc] == None and Row_inc < Max_column_inc: # Recontructing missing data at the beginning is impossible so we just skip the first rows with a None in them
Row_inc += 1
if None in Column: # If a None is in the list (meaning there are bits of data missing)...
while Row_inc < Max_column_inc: # Not including this line could prompt an index error
if Column[Row_inc] == None: # When a None in encoutered...
None_interval_start = Row_inc # Recording when the segments of None starts and ends
while Column[Row_inc] == None and Row_inc < Max_column_inc:
Row_inc += 1
None_interval_end = Row_inc - 1
Interpolation_interval_length = None_interval_end - None_interval_start + 2
if Row_inc < Max_column_inc: # Reconstruction of the segment by linear interpolation : Y = mX + b with m = (Y_max - Y_min) / (X_max - X_min)
m = (Column[None_interval_end + 1] - Column[None_interval_start - 1]) / Interpolation_interval_length
b = Column[None_interval_start - 1]
for Row_inc in range(None_interval_start, Row_inc):
T_COVID_data_single_country[Column_inc][Row_inc] = m * (Row_inc - None_interval_start + 1) + b
else: # In the case the None segment goes on until the end, the last known value is just copied
for Row_inc in range(None_interval_start, Row_inc + 1):
T_COVID_data_single_country[Column_inc][Row_inc] = T_COVID_data_single_country[Column_inc][None_interval_start - 1]
Row_inc += 1
COVID_data_single_country_reconstructed = list(map(list, zip(*T_COVID_data_single_country))) # Retranspose the matrix to get the reconstruted data in the correct format
Date_list_country = list(COVID_data[Country].keys()) # Add the reconstructed data to the appropriate dictionnary
COVID_data_reconstructed[Country] = {}
for Date_inc in range(len(Date_list_country)):
Date = Date_list_country[Date_inc]
COVID_data_reconstructed[Country][Date] = COVID_data_single_country_reconstructed[Date_inc]
return COVID_data_reconstructed
def Export_in_files(COVID_data, COVID_data_reconstructed):
"""
Exports the raw and reconstructed data in seperate files
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Covid_data_reconstructued: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
Returns: Nothing
"""
F_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')
FR_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')
COVID_data_lists = [COVID_data, COVID_data_reconstructed]
Data_file_list = [F_data_file, FR_data_file]
Countries_list = list(COVID_data.keys())[1:]
for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file
COVID_data_temp = COVID_data_lists[Data_set_inc]
Data_file_temp = Data_file_list[Data_set_inc]
Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\n')
for Country in Countries_list:
COVID_data_single_country = COVID_data_temp[Country]
Date_list = list(COVID_data[Country].keys())
for Date in Date_list:
COVID_data_single_country_single_date = COVID_data_single_country[Date]
Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None
Data_file_temp.write('%s;%s;' % (Country, Date))
Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))
Data_file_temp.write('\n')
Data_file_temp.close()
def Extract_data_for_plotting(COVID_data, X_Axis_inc, Y_Axis_inc, Z_Axis_inc, Date_start, Date_end, Keep_no_PR = True):
"""
Extract data from recontructed COVID data in order to only keep data that will be plotted
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world (usually reconstructed)
- X_axis_inc: Integer, data to use for the X axis
- Y_axis_inc: Integer, data to use for the Y axis
- Z_axis_inc: Integer, data to use for the colors of the points
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
- Keep_no_PR: Boolean indicating whether or not countries without a positivity rate have to be kept. Default is True
Returns:
COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
"""
Date_start_obj = datetime.strptime(Date_start, '%Y-%m-%d') # Create a list of all the dates to extract
Date_end_obj = datetime.strptime(Date_end, '%Y-%m-%d')
Date_difference = (Date_end_obj - Date_start_obj).days + 1
Date_list = [(Date_start_obj + timedelta(Days)).isoformat()[:10] for Days in range(Date_difference)]
Countries_list = list(COVID_data.keys())[1:]
COVID_data_scatter = {'0Date': {'Country': [COVID_data['_Country']['Date'][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]]}}
for Date in Date_list: # For each date and each country...
COVID_data_scatter[Date] = {}
for Country in Countries_list:
try:
Data_items = [COVID_data[Country][Date][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]] # This line will prompt an error in case the data doesn't exist, hence the try - except structure (much easier than 10 000 conditions to try to figure out if the data exists for a date and country)
if None not in Data_items[:2] and not (not Keep_no_PR and Data_items[2] == None): # Any data point that has a None as its X or Y coordinate is exlcuded, and also Z if asked by the user
if min(Data_items[:2]) > 0: COVID_data_scatter[Date][Country] = Data_items # Since the graph is in logscale, points with 0 as their X or Y coordinate are excluded (because log(0) doesn't exist).
# This double verification can't be done in one line because having None in a list you're trying to find the minimum of prompts an error
except: pass
if COVID_data_scatter[Date] == {}: COVID_data_scatter.pop(Date)
return COVID_data_scatter
def Annotations_frame(Points_to_display, Countries_displayed, Frame_limits):
|
def Scatter_graph(COVID_data_scatter, Display_annotations_mask = False):
"""
Plots data entered in parameters
Parameters:
- COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
- Display_annotations_mask: Boolean indicating whether to display the outline of annotations created by Annotations_frame() or not
Returns:
- ani: Animation object created by matplotlib
- COVID_data_scatter_names: List of names of the columns plotted
"""
COVID_data_scatter_names = COVID_data_scatter.pop('0Date')['Country'] # Extract names of columns plotted
X_axis, Y_axis, Z_axis = [], [], [] # Separate the axes in COVID_data_scatter in order to find the minimum and maximum along each axis
for Date_item in COVID_data_scatter.values():
for Country_item in Date_item.values():
for Axis_inc in range(3):
[X_axis, Y_axis, Z_axis][Axis_inc].append(Country_item[Axis_inc])
Min_list, Max_list = [], [] # Limits of the plotting area
Graph_window_margin = 2 # Since the graph is in log scale, the plotting area can't be extended using New max = Factor * (Max - Min) so I just went with multiplying the maximum and dividing the minimum by a factor of 2
for Axis_inc in range(2):
Min_list.append(min([X_axis, Y_axis][Axis_inc]) / Graph_window_margin)
Max_list.append(max([X_axis, Y_axis][Axis_inc]) * Graph_window_margin)
cmap = cm.jet # Colormap for the 3rd axis
cmap = colors.LinearSegmentedColormap.from_list('jet_truncated', cmap(np.linspace(0.2, 0.95, 100)))
Z_axis_cleaned = list(filter(lambda Item: Item != None, Z_axis)) # Positivity rate to color converter
norm = colors.Normalize(vmin = 0, vmax = max(Z_axis_cleaned), clip = True)
mapper = cm.ScalarMappable(norm = norm, cmap = cmap)
plt.close() # Initialise plotting area. A simple "plt.clf()" doesn't work to erase everything and prompts glitches after the 2nd execution of the code, forcing us to close the figure and reopen it
fig = plt.figure("Scatter graph of COVID data")
fig.set_size_inches(tuple(1/fig.dpi * np.array([1920, 1080])))
ax = fig.gca()
manager = plt.get_current_fig_manager() # Adapt the matplotlib window to the screen
manager.window.showMaximized()
Data_frames = zip(COVID_data_scatter.keys(), COVID_data_scatter.values()) # Transform the first level of dictionnary into a list because we need to have access to the keys of that first level during the creation of the animation frames
Animation_frames = [] # List where all the matplotlib objects for the animation will be stored
for Frame in Data_frames:
Date = Frame[0]
Points_to_display, Positivity_rate_list, Points_colors = [], [], []
Countries_displayed = list(Frame[1].keys())
for Country in Countries_displayed: # For each country...
Country_coords = Frame[1][Country][:2]
Positivity_rate = Frame[1][Country][2]
Points_to_display.append(Country_coords)
if Positivity_rate != None: # If there is a positivity rate for that country, it is plotted with the color it corresponds to on the colormap
Positivity_rate_list.append(Positivity_rate)
Points_colors.append(mapper.to_rgba(Positivity_rate))
else: # Otherwise, it appears in #ABB7B7 gray and a "-1" is appended to the list of positivity rates. That way, these points will be in last after the sorting in descending order in a few lines
Positivity_rate_list.append(-1)
Points_colors.append((0.6627, 0.6627, 0.6627, 1))
All_points_info = list(zip(Countries_displayed, Points_to_display, Positivity_rate_list, Points_colors)) # Group everything, sort the points based on the positivity rate and then seperate everything to get the same objects as before but sorted
All_points_info.sort(key = lambda x: x[2])
All_points_info = list(zip(*All_points_info))
Countries_displayed = list(All_points_info[0])
Points_to_display = list(All_points_info[1])
Positivity_rate_list = list(All_points_info[2])
Points_colors = list(All_points_info[3])
X_list_frame, Y_list_frame = zip(*Points_to_display) # Separate X and Y axes and plot the points
scatter = ax.scatter(X_list_frame, Y_list_frame, c = Points_colors, s = np.pi * (Marker_ray*72/fig.dpi)**2, linewidth = 0.5, edgecolors = 'black') # Marker ray is the radius of the circle in pixels but s is the area of the circle in points. We have to convert the pixels in points (1 inch = 72 points = Screen dpi) then apply area = pi * radius²
# Note: ax.scatter plots the points one by one so the last elements of the lists will be above the firsts. Since the X and Y axes are sorted in ascending order of positivity rate, the last points (high positivity rates) will be on top. This is on purpose because these are the most interesting ones
Text_date = ax.text(0.02, 0.97, Date, transform = ax.transAxes, fontsize = Date_fontsize, verticalalignment = 'top', horizontalalignment = 'left', bbox = dict(boxstyle = 'round', facecolor = 'white', alpha = 0.9, pad = 0.3)) # Display the date
fig.tight_layout() # Annotations_frame() requires the use of lines regarding the size of the plotting area. For them to work properly, we have to virtually draw the elements, which is why we use fig.tight_layout() in the middle of the creation of the animation frames
Countries_to_annotate, Annotations_mask = Annotations_frame(Points_to_display[::-1], Countries_displayed[::-1], (Min_list[0], Max_list[0], Min_list[1], Max_list[1])) # Decide which countries to annotate and which not to
Annotation_list = []
for Country, Country_coords in zip(Countries_to_annotate.keys(), Countries_to_annotate.values()): # Annotate countries
Annotation_list.append(ax.annotate(Country, Country_coords, textcoords = 'offset pixels', xytext=(0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.5 + Annotation_offset), ha='center', va='center', fontsize = Annotation_fontsize))
if Display_annotations_mask: # If something goes wrong during an edit, the user can still display the annotations outline
ax_tw_x = ax.twinx() # Duplicate axis. Compulsory because the graph is in logscale and an image cannot be properly displayed in logscale
ax2 = ax_tw_x.twiny()
mapper_mask = cm.ScalarMappable(norm = colors.Normalize(vmin = 0, vmax = 1, clip = True), cmap = cm.gray) # Convert array of bools into array of colors then display the image
Annotations_mask_im = mapper_mask.to_rgba(np.rot90(np.invert(Annotations_mask) + np.zeros(Annotations_mask.shape)), alpha = 0.3)
Annotations_mask_ax = ax2.imshow(Annotations_mask_im, extent = [Min_list[0], Max_list[0], Min_list[1], Max_list[1]], aspect = 'auto')
ax_tw_x.axis('off') # Not display axes of the image
ax2.axis('off')
Animation_frames.append([scatter, Text_date, Annotations_mask_ax] + Annotation_list)
else: Animation_frames.append([scatter, Text_date] + Annotation_list)
ax.set_title("COVID-19 pandemic - %s vs. %s" % tuple(COVID_data_scatter_names[:2][::-1]), fontsize = Title_fontsize, pad = Title_pad)
ax.set_xlabel(COVID_data_scatter_names[0], fontsize = Axis_label_fontsize)
ax.set_ylabel(COVID_data_scatter_names[1], fontsize = Axis_label_fontsize)
ax.set_xlim(Min_list[0], Max_list[0])
ax.set_ylim(Min_list[1], Max_list[1])
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(linestyle = '--', linewidth = 1.5, which = 'major')
ax.grid(linestyle = '--', linewidth = 0.5, which = 'minor')
ax.set_axisbelow(True)
ax.tick_params(axis='x', labelsize = Axis_tick_fontsize)
ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
Formatter_list = [] # Display axes graduations as multiples of 10 (rather than 10^n) and find how many decimal places to display
for Axis_inc in range(2):
if Min_list[Axis_inc] < 1: Min_axis_log = int(np.abs(np.floor(np.log10(Min_list[Axis_inc])))) - 1
else: Min_axis_log = 0
Formatter_list.append('%.' + str(Min_axis_log) + 'f')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[0]))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[1]))
Divider = make_axes_locatable(ax) # Display 3rd axis (colors). Using make_axes_locatable() allows for better tight_layout results
cax = Divider.append_axes('right', size = '2%', pad = 0.3)
cbar = fig.colorbar(mapper, cax = cax)
cbar.ax.set_ylabel(COVID_data_scatter_names[2], fontsize = Axis_label_fontsize, labelpad=Axis_label_pad)
cbar.ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
cbar.ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax = 1, decimals = 0)) # Set axis graduations as percentage with no decimal places
ani = animation.ArtistAnimation(fig, Animation_frames, blit = True, interval = Animation_interval)
fig.tight_layout()
fig.show()
return ani, COVID_data_scatter_names
| """
Tells which countries to annotate and which not to. Since the lists in parameters are sorted by descending order of positivity rate, the countries with higher positivity rates will be examined first and thus annotatd with more priority
Parameters:
- Points_to_display: List of X and Y coordinates of each point displayed on the graph
- Countries_displayed: List of countries displayed on the graph
- Frame_limits: Tuple, limits of the plotting area (X_min, X_max, Y_min, Y_max)
Returns:
- Countries_to_annotate: List of countries to annotate
- Annotations_mask: Numpy array of bools, outline of the annotations. This variable is only used in this function to decide which countries to annotate and which not to but I had so many problems in finding the correct formulas that just in case, I wanted to be able to display it easilly in Scatter_graph() even after solving all problems
"""
X_list_frame, Y_list_frame = zip(*Points_to_display) # Transform tuples of (X, Y) into 2 distinct lists of X and Y coordinates
Frame_limits_log = list(map(np.log10, Frame_limits))
X_min_log, X_max_log, Y_min_log, Y_max_log = Frame_limits_log
fig = plt.gcf()
ax = plt.gca()
ax_bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) # Get size in pixels of the plotting area
ax_width, ax_height = ax_bbox.width, ax_bbox.height
ax_width *= fig.dpi
ax_height *= fig.dpi
Annotations_mask = np.zeros((int(ax_width), int(ax_height)), bool) # Array of bools same size as plotting area where outlines of annotations will be stored
Countries_to_annotate = {}
for Country_inc in range(len(Countries_displayed)): # For each country...
Country = Countries_displayed[Country_inc]
Annotation_width_enlargment = 1.3 # Slight corrections to make the annotation outlines fit as best as possible. Found by trial and error
Annotation_height_enlargment = 1.6
Label_size = 0.5 * np.array([font.measure(Country)*Annotation_width_enlargment, Annotation_fontsize * Annotation_height_enlargment]) # Everything is divided by 2 because the origin of the anotation outline is in its center
Offset = [0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.7 + Annotation_offset] # Distance between point and annotation. Annotation_fontsize is in points so it has to be converted to pixels (1 inch = 72 points = screen dpi). 0.56 is just a correction found by trial and error
Country = Countries_displayed[Country_inc]
Country_coords = Points_to_display[Country_inc]
List_slice = [] # Get indices delimiting the outline of the annotation in the plotting area
for Axis_inc in range(2):
Min_log, Max_log = Frame_limits_log[Axis_inc*2 : Axis_inc*2 + 2] # Simple transformation: Y = (Y_max - Y_min) / (X_max - X_min) * (X - X_min) + Y_min
Coodrs_transformation = lambda x: (Annotations_mask.shape[Axis_inc] - 1)/(Max_log - Min_log) * (np.log10(x) - Min_log)
for Label_offset_sign in range(-1, 2, 2):
List_slice.append(sum([Coodrs_transformation(Country_coords[Axis_inc]), Offset[Axis_inc], Label_offset_sign * Label_size[Axis_inc]]))
Slice_X_min, Slice_X_max, Slice_Y_min, Slice_Y_max = map(int, List_slice)
Annotation_slice = np.s_[Slice_X_min : Slice_X_max + 1, Slice_Y_min : Slice_Y_max + 1]
if not np.any(Annotations_mask[Annotation_slice]): # If there isn't a True in the current annotation outline (meaing there already is another annotation displayed)...
Countries_to_annotate[Country] = Points_to_display[Country_inc] # The country has to be annotated
Annotations_mask[Annotation_slice] = True # All the elements in Annotations_mask in this area are set to True to signify there is now an annotation displayed there
return Countries_to_annotate, Annotations_mask | identifier_body |
Script_graph_data.py | from os import chdir, listdir, mkdir, remove
from urllib.request import urlretrieve
from tkinter import font as tkFont
from datetime import datetime, timedelta, date
import pathlib, time, tkinter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.animation as animation
from matplotlib import ticker, rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
Root_directory = str(pathlib.Path(__file__).parent.absolute()) # Strings with path of different directories
Datafiles_directory = Root_directory + '\\Datafiles'
Animation_directory = Root_directory + '\\Animations'
for Directory in [Datafiles_directory, Animation_directory]: # Create necessary directories if they don't exist
try:
mkdir(Directory)
except:
pass
Title_fontsize = 25 # Size in pixels of different elements in the graph
Date_fontsize = 25
Annotation_fontsize = 15
Axis_label_fontsize = 20
Axis_tick_fontsize = 15
Marker_ray = 8 # Ray in pixels of the markers in the scater graph
Annotation_offset = - Annotation_fontsize * 0.05 # Correction of the spacing between points in the graph and their annotations
Title_pad = Title_fontsize * 0.5 # Spacing betwenn title and plotting area
Axis_label_pad = Axis_label_fontsize * 0.5 # Spacing betwenn axis label and plotting area
Animation_interval = 200 # Interval in milliseconds between each frame in animation
Animation_fps = int(1/Animation_interval * 1e3) # Frames per second of the animation
Graph_font = rcParams['font.family'] # Tkinter font object that will be used to calculate the area occupied by each annotation in Annotations_frame()
tkinter.Frame().destroy()
font = tkFont.Font(family = Graph_font, size = Annotation_fontsize)
def Main_script(X_axis_inc = 1, Y_axis_inc = 7, Z_axis_inc = 12, Date_start = None, Date_end = None):
"""
Main routine to execute to download, extract, reconstruct and plot COVID data
Parameters:
- X_axis_inc: Integer, data to use for the X axis. Default is 1 (Total cases per million)
- Y_axis_inc: Integer, data to use for the Y axis. Default is 7 (Total deaths per million)
- Z_axis_inc: Integer, data to use for the colors of the points. Default is 12 (Positivity rate)
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
Date are None by default to use every available date in the data (see first lines of this fuction)
Returns:
ani: Matplotlib animation object. If it's not returned, the animation isn't displayed
Axis incs:
0 - Total cases
1 - Total cases per million
2 - New cases
3 - New cases smoothed
4 - New cases per million
5 - New cases per million smoothed
6 - Total deaths
7 - Total deaths per million
8 - New deaths
9 - New deaths smoothed
10 - New deaths per million
11 - New deaths per million smoothed
12 - Positivity rate
13 - Testing policy
"""
Timer_start = time.perf_counter()
print('Collecting data from Our World in Data')
COVID_data, Date_start_raw_data, Date_end_raw_data = Extract_data() # Download and extract raw COVID data
if Date_start == None: # If no start date is specified, it is set to the first date in the data. The end date is then set to the last date in the data. That way, we can display the whole data without having to know when it starts and ends
Date_start = Date_start_raw_data
Date_end = Date_end_raw_data
elif Date_end == None: Date_end = Date_start # But if no end date is specified, only the start date is displayed
print('Recontructing missing chunks in the data by linear interplolation')
COVID_data_reconstructed = Reconstruct_COVID_data(COVID_data) # Reconstruct the missing data
print('Exporting data in files')
Export_in_files(COVID_data, COVID_data_reconstructed) # Export the original and reconstructed data in CSV files, just to have them and be able to look whenever we want
print('Isolating data to plot')
COVID_data_scatter = Extract_data_for_plotting(COVID_data_reconstructed, X_axis_inc, Y_axis_inc, Z_axis_inc, Date_start, Date_end) # Filter data to only keep the axes we want to plot
print('Plotting data')
ani, COVID_data_scatter_names = Scatter_graph(COVID_data_scatter) # Plot the data
print('Exporting animation as video')
Writer = animation.writers['ffmpeg'] # Export the file
writer = Writer(fps = Animation_fps, metadata=dict(artist='Me'), bitrate=1800)
Annimation_file = Animation_directory + '\\%s vs %s with %s from %s to %s.mp4' % (tuple(COVID_data_scatter_names) + (Date_start, Date_end))
ani.save(Annimation_file, writer = writer)
print('\nProcessing done in %0.2f minutes' % ((time.perf_counter() - Timer_start) / 60))
return ani
def Extract_data():
"""
Extracts and formats data in dictionnaries from Our World in Data CSV files
Parameters: Nothing
Returns:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Population_data: Dictionnary of population for each country
"""
chdir(Datafiles_directory) # Empty the datafiles directory
File_list = listdir()
for File in File_list:
remove(File)
COVID_data_path = Datafiles_directory + '\\OWID COVID data %s.csv' % (date.today().isoformat()) # String with path of COVID data (where it will be stored when downloaded)
urlretrieve('https://raw.githubusercontent.com/owid/COVID-19-data/master/public/data/owid-covid-data.csv', COVID_data_path) # Download and extract the data
COVID_data_file = open(COVID_data_path, 'r')
COVID_raw_data = COVID_data_file.readlines()
COVID_raw_data = [Row.split(',') for Row in COVID_raw_data[1:]]
COVID_data = {'_Country': {'Date': ['Total cases', 'Total cases per million', 'New cases', 'New cases smoothed', 'New cases per million', 'New cases per million smoothed', 'Total deaths', 'Total deaths per million', 'New deaths', 'New deaths smoothed', 'New deaths per million', 'New deaths per million smoothed', 'Positivity rate', 'Testing policy']}}
Date_list = []
for Row_inc in range(len(COVID_raw_data)): # For each row in the file...
Country = COVID_raw_data[Row_inc][2]
Date = COVID_raw_data[Row_inc][3]
if COVID_raw_data[Row_inc][2] not in COVID_data: COVID_data[Country] = {} # If a new country is encountered, a new entry to the dictionnary COVID_data is added
if Date not in Date_list: Date_list.append(Date) # If a new date is encoutered, it is added to the corresponding list
COVID_data[Country][Date] = []
for Column_inc in [4, 10, 5, 6, 11, 12, 7, 13, 8, 9, 14, 15, 23, 24]: # For each column we want to extract...
Data_item = COVID_raw_data[Row_inc][Column_inc]
if Column_inc != 24: # Column_inc of 24 is the testing policy and is a string so can't appended as a float, prompting this exception
if Data_item == '': COVID_data[Country][Date].append(None) # If there's nothing, a None element is added
else: COVID_data[Country][Date].append(float(COVID_raw_data[Row_inc][Column_inc]))
else: COVID_data[Country][Date].append(COVID_raw_data[Row_inc][Column_inc])
if COVID_raw_data[Row_inc][2] == 'International' or COVID_raw_data[Row_inc][2] == 'World': # The entries "World" and "International" aren't interesting so they are ignored
break
COVID_data_file.close()
Date_start_raw_data, Date_end_raw_data = min(Date_list), max(Date_list)
return COVID_data, Date_start_raw_data, Date_end_raw_data
def Reconstruct_COVID_data(COVID_data):
"""
Reconstructs missing chunks of data by linear interpolation
Parameters:
COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world
Returns:
COVID_data_reconstructed: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
"""
COVID_data_reconstructed = {}
COVID_data_reconstructed['_Country'] = COVID_data['_Country']
Countries_list = list(COVID_data.keys())[1:]
for Country in Countries_list: # For each country...
COVID_data_single_country = list(COVID_data[Country].values()) # Extract the matrix containing the data and transpose it. That way, each element of a single list in the array corresponds to one column (see help of Main_script) and it makes it easier to navigate through each column and recontruct the missing elements
T_COVID_data_single_country = list(map(list, zip(*COVID_data_single_country)))
for Column_inc in range(len(T_COVID_data_single_country)): # For each column...
Column = T_COVID_data_single_country[Column_inc]
Max_column_inc = len(Column) - 1
Row_inc = 0
while Column[Row_inc] == None and Row_inc < Max_column_inc: # Recontructing missing data at the beginning is impossible so we just skip the first rows with a None in them
Row_inc += 1
if None in Column: # If a None is in the list (meaning there are bits of data missing)...
while Row_inc < Max_column_inc: # Not including this line could prompt an index error
if Column[Row_inc] == None: # When a None in encoutered...
None_interval_start = Row_inc # Recording when the segments of None starts and ends
while Column[Row_inc] == None and Row_inc < Max_column_inc:
Row_inc += 1
None_interval_end = Row_inc - 1
Interpolation_interval_length = None_interval_end - None_interval_start + 2
if Row_inc < Max_column_inc: # Reconstruction of the segment by linear interpolation : Y = mX + b with m = (Y_max - Y_min) / (X_max - X_min)
m = (Column[None_interval_end + 1] - Column[None_interval_start - 1]) / Interpolation_interval_length
b = Column[None_interval_start - 1]
for Row_inc in range(None_interval_start, Row_inc):
T_COVID_data_single_country[Column_inc][Row_inc] = m * (Row_inc - None_interval_start + 1) + b
else: # In the case the None segment goes on until the end, the last known value is just copied
for Row_inc in range(None_interval_start, Row_inc + 1):
T_COVID_data_single_country[Column_inc][Row_inc] = T_COVID_data_single_country[Column_inc][None_interval_start - 1]
Row_inc += 1
COVID_data_single_country_reconstructed = list(map(list, zip(*T_COVID_data_single_country))) # Retranspose the matrix to get the reconstruted data in the correct format
Date_list_country = list(COVID_data[Country].keys()) # Add the reconstructed data to the appropriate dictionnary
COVID_data_reconstructed[Country] = {}
for Date_inc in range(len(Date_list_country)):
Date = Date_list_country[Date_inc]
COVID_data_reconstructed[Country][Date] = COVID_data_single_country_reconstructed[Date_inc]
return COVID_data_reconstructed
def Export_in_files(COVID_data, COVID_data_reconstructed):
"""
Exports the raw and reconstructed data in seperate files
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Covid_data_reconstructued: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
Returns: Nothing
"""
F_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')
FR_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')
COVID_data_lists = [COVID_data, COVID_data_reconstructed]
Data_file_list = [F_data_file, FR_data_file]
Countries_list = list(COVID_data.keys())[1:]
for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file
COVID_data_temp = COVID_data_lists[Data_set_inc]
Data_file_temp = Data_file_list[Data_set_inc]
Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\n')
for Country in Countries_list:
COVID_data_single_country = COVID_data_temp[Country]
Date_list = list(COVID_data[Country].keys())
for Date in Date_list:
COVID_data_single_country_single_date = COVID_data_single_country[Date]
Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None
Data_file_temp.write('%s;%s;' % (Country, Date))
Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))
Data_file_temp.write('\n')
Data_file_temp.close()
def Extract_data_for_plotting(COVID_data, X_Axis_inc, Y_Axis_inc, Z_Axis_inc, Date_start, Date_end, Keep_no_PR = True):
"""
Extract data from recontructed COVID data in order to only keep data that will be plotted
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world (usually reconstructed)
- X_axis_inc: Integer, data to use for the X axis
- Y_axis_inc: Integer, data to use for the Y axis
- Z_axis_inc: Integer, data to use for the colors of the points
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
- Keep_no_PR: Boolean indicating whether or not countries without a positivity rate have to be kept. Default is True
Returns:
COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
"""
Date_start_obj = datetime.strptime(Date_start, '%Y-%m-%d') # Create a list of all the dates to extract
Date_end_obj = datetime.strptime(Date_end, '%Y-%m-%d')
Date_difference = (Date_end_obj - Date_start_obj).days + 1
Date_list = [(Date_start_obj + timedelta(Days)).isoformat()[:10] for Days in range(Date_difference)]
Countries_list = list(COVID_data.keys())[1:]
COVID_data_scatter = {'0Date': {'Country': [COVID_data['_Country']['Date'][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]]}}
for Date in Date_list: # For each date and each country...
COVID_data_scatter[Date] = {}
for Country in Countries_list:
try:
Data_items = [COVID_data[Country][Date][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]] # This line will prompt an error in case the data doesn't exist, hence the try - except structure (much easier than 10 000 conditions to try to figure out if the data exists for a date and country)
if None not in Data_items[:2] and not (not Keep_no_PR and Data_items[2] == None): # Any data point that has a None as its X or Y coordinate is exlcuded, and also Z if asked by the user
if min(Data_items[:2]) > 0: COVID_data_scatter[Date][Country] = Data_items # Since the graph is in logscale, points with 0 as their X or Y coordinate are excluded (because log(0) doesn't exist).
# This double verification can't be done in one line because having None in a list you're trying to find the minimum of prompts an error
except: pass
if COVID_data_scatter[Date] == {}: COVID_data_scatter.pop(Date)
return COVID_data_scatter
def Annotations_frame(Points_to_display, Countries_displayed, Frame_limits):
"""
Tells which countries to annotate and which not to. Since the lists in parameters are sorted by descending order of positivity rate, the countries with higher positivity rates will be examined first and thus annotatd with more priority
Parameters:
- Points_to_display: List of X and Y coordinates of each point displayed on the graph
- Countries_displayed: List of countries displayed on the graph
- Frame_limits: Tuple, limits of the plotting area (X_min, X_max, Y_min, Y_max)
Returns:
- Countries_to_annotate: List of countries to annotate
- Annotations_mask: Numpy array of bools, outline of the annotations. This variable is only used in this function to decide which countries to annotate and which not to but I had so many problems in finding the correct formulas that just in case, I wanted to be able to display it easilly in Scatter_graph() even after solving all problems
"""
X_list_frame, Y_list_frame = zip(*Points_to_display) # Transform tuples of (X, Y) into 2 distinct lists of X and Y coordinates
Frame_limits_log = list(map(np.log10, Frame_limits))
X_min_log, X_max_log, Y_min_log, Y_max_log = Frame_limits_log
fig = plt.gcf()
ax = plt.gca()
ax_bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) # Get size in pixels of the plotting area
ax_width, ax_height = ax_bbox.width, ax_bbox.height
ax_width *= fig.dpi
ax_height *= fig.dpi
Annotations_mask = np.zeros((int(ax_width), int(ax_height)), bool) # Array of bools same size as plotting area where outlines of annotations will be stored
Countries_to_annotate = {}
for Country_inc in range(len(Countries_displayed)): # For each country...
Country = Countries_displayed[Country_inc]
Annotation_width_enlargment = 1.3 # Slight corrections to make the annotation outlines fit as best as possible. Found by trial and error
Annotation_height_enlargment = 1.6
Label_size = 0.5 * np.array([font.measure(Country)*Annotation_width_enlargment, Annotation_fontsize * Annotation_height_enlargment]) # Everything is divided by 2 because the origin of the anotation outline is in its center
Offset = [0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.7 + Annotation_offset] # Distance between point and annotation. Annotation_fontsize is in points so it has to be converted to pixels (1 inch = 72 points = screen dpi). 0.56 is just a correction found by trial and error
Country = Countries_displayed[Country_inc]
Country_coords = Points_to_display[Country_inc]
List_slice = [] # Get indices delimiting the outline of the annotation in the plotting area
for Axis_inc in range(2):
Min_log, Max_log = Frame_limits_log[Axis_inc*2 : Axis_inc*2 + 2] # Simple transformation: Y = (Y_max - Y_min) / (X_max - X_min) * (X - X_min) + Y_min
Coodrs_transformation = lambda x: (Annotations_mask.shape[Axis_inc] - 1)/(Max_log - Min_log) * (np.log10(x) - Min_log)
for Label_offset_sign in range(-1, 2, 2):
List_slice.append(sum([Coodrs_transformation(Country_coords[Axis_inc]), Offset[Axis_inc], Label_offset_sign * Label_size[Axis_inc]]))
Slice_X_min, Slice_X_max, Slice_Y_min, Slice_Y_max = map(int, List_slice)
Annotation_slice = np.s_[Slice_X_min : Slice_X_max + 1, Slice_Y_min : Slice_Y_max + 1]
if not np.any(Annotations_mask[Annotation_slice]): # If there isn't a True in the current annotation outline (meaing there already is another annotation displayed)...
Countries_to_annotate[Country] = Points_to_display[Country_inc] # The country has to be annotated
Annotations_mask[Annotation_slice] = True # All the elements in Annotations_mask in this area are set to True to signify there is now an annotation displayed there
return Countries_to_annotate, Annotations_mask
def Scatter_graph(COVID_data_scatter, Display_annotations_mask = False):
"""
Plots data entered in parameters
Parameters:
- COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
- Display_annotations_mask: Boolean indicating whether to display the outline of annotations created by Annotations_frame() or not
Returns:
- ani: Animation object created by matplotlib
- COVID_data_scatter_names: List of names of the columns plotted
"""
COVID_data_scatter_names = COVID_data_scatter.pop('0Date')['Country'] # Extract names of columns plotted
X_axis, Y_axis, Z_axis = [], [], [] # Separate the axes in COVID_data_scatter in order to find the minimum and maximum along each axis
for Date_item in COVID_data_scatter.values():
for Country_item in Date_item.values():
for Axis_inc in range(3):
[X_axis, Y_axis, Z_axis][Axis_inc].append(Country_item[Axis_inc])
Min_list, Max_list = [], [] # Limits of the plotting area
Graph_window_margin = 2 # Since the graph is in log scale, the plotting area can't be extended using New max = Factor * (Max - Min) so I just went with multiplying the maximum and dividing the minimum by a factor of 2
for Axis_inc in range(2):
Min_list.append(min([X_axis, Y_axis][Axis_inc]) / Graph_window_margin)
Max_list.append(max([X_axis, Y_axis][Axis_inc]) * Graph_window_margin)
cmap = cm.jet # Colormap for the 3rd axis
cmap = colors.LinearSegmentedColormap.from_list('jet_truncated', cmap(np.linspace(0.2, 0.95, 100)))
Z_axis_cleaned = list(filter(lambda Item: Item != None, Z_axis)) # Positivity rate to color converter
norm = colors.Normalize(vmin = 0, vmax = max(Z_axis_cleaned), clip = True)
mapper = cm.ScalarMappable(norm = norm, cmap = cmap)
plt.close() # Initialise plotting area. A simple "plt.clf()" doesn't work to erase everything and prompts glitches after the 2nd execution of the code, forcing us to close the figure and reopen it
fig = plt.figure("Scatter graph of COVID data")
fig.set_size_inches(tuple(1/fig.dpi * np.array([1920, 1080])))
ax = fig.gca()
manager = plt.get_current_fig_manager() # Adapt the matplotlib window to the screen
manager.window.showMaximized()
Data_frames = zip(COVID_data_scatter.keys(), COVID_data_scatter.values()) # Transform the first level of dictionnary into a list because we need to have access to the keys of that first level during the creation of the animation frames
Animation_frames = [] # List where all the matplotlib objects for the animation will be stored
for Frame in Data_frames:
Date = Frame[0]
Points_to_display, Positivity_rate_list, Points_colors = [], [], []
Countries_displayed = list(Frame[1].keys())
for Country in Countries_displayed: # For each country...
Country_coords = Frame[1][Country][:2]
Positivity_rate = Frame[1][Country][2]
Points_to_display.append(Country_coords)
if Positivity_rate != None: # If there is a positivity rate for that country, it is plotted with the color it corresponds to on the colormap
Positivity_rate_list.append(Positivity_rate)
Points_colors.append(mapper.to_rgba(Positivity_rate))
else: # Otherwise, it appears in #ABB7B7 gray and a "-1" is appended to the list of positivity rates. That way, these points will be in last after the sorting in descending order in a few lines
Positivity_rate_list.append(-1)
Points_colors.append((0.6627, 0.6627, 0.6627, 1))
All_points_info = list(zip(Countries_displayed, Points_to_display, Positivity_rate_list, Points_colors)) # Group everything, sort the points based on the positivity rate and then seperate everything to get the same objects as before but sorted
All_points_info.sort(key = lambda x: x[2])
All_points_info = list(zip(*All_points_info))
Countries_displayed = list(All_points_info[0])
Points_to_display = list(All_points_info[1])
Positivity_rate_list = list(All_points_info[2])
Points_colors = list(All_points_info[3])
|
X_list_frame, Y_list_frame = zip(*Points_to_display) # Separate X and Y axes and plot the points
scatter = ax.scatter(X_list_frame, Y_list_frame, c = Points_colors, s = np.pi * (Marker_ray*72/fig.dpi)**2, linewidth = 0.5, edgecolors = 'black') # Marker ray is the radius of the circle in pixels but s is the area of the circle in points. We have to convert the pixels in points (1 inch = 72 points = Screen dpi) then apply area = pi * radius²
# Note: ax.scatter plots the points one by one so the last elements of the lists will be above the firsts. Since the X and Y axes are sorted in ascending order of positivity rate, the last points (high positivity rates) will be on top. This is on purpose because these are the most interesting ones
Text_date = ax.text(0.02, 0.97, Date, transform = ax.transAxes, fontsize = Date_fontsize, verticalalignment = 'top', horizontalalignment = 'left', bbox = dict(boxstyle = 'round', facecolor = 'white', alpha = 0.9, pad = 0.3)) # Display the date
fig.tight_layout() # Annotations_frame() requires the use of lines regarding the size of the plotting area. For them to work properly, we have to virtually draw the elements, which is why we use fig.tight_layout() in the middle of the creation of the animation frames
Countries_to_annotate, Annotations_mask = Annotations_frame(Points_to_display[::-1], Countries_displayed[::-1], (Min_list[0], Max_list[0], Min_list[1], Max_list[1])) # Decide which countries to annotate and which not to
Annotation_list = []
for Country, Country_coords in zip(Countries_to_annotate.keys(), Countries_to_annotate.values()): # Annotate countries
Annotation_list.append(ax.annotate(Country, Country_coords, textcoords = 'offset pixels', xytext=(0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.5 + Annotation_offset), ha='center', va='center', fontsize = Annotation_fontsize))
if Display_annotations_mask: # If something goes wrong during an edit, the user can still display the annotations outline
ax_tw_x = ax.twinx() # Duplicate axis. Compulsory because the graph is in logscale and an image cannot be properly displayed in logscale
ax2 = ax_tw_x.twiny()
mapper_mask = cm.ScalarMappable(norm = colors.Normalize(vmin = 0, vmax = 1, clip = True), cmap = cm.gray) # Convert array of bools into array of colors then display the image
Annotations_mask_im = mapper_mask.to_rgba(np.rot90(np.invert(Annotations_mask) + np.zeros(Annotations_mask.shape)), alpha = 0.3)
Annotations_mask_ax = ax2.imshow(Annotations_mask_im, extent = [Min_list[0], Max_list[0], Min_list[1], Max_list[1]], aspect = 'auto')
ax_tw_x.axis('off') # Not display axes of the image
ax2.axis('off')
Animation_frames.append([scatter, Text_date, Annotations_mask_ax] + Annotation_list)
else: Animation_frames.append([scatter, Text_date] + Annotation_list)
ax.set_title("COVID-19 pandemic - %s vs. %s" % tuple(COVID_data_scatter_names[:2][::-1]), fontsize = Title_fontsize, pad = Title_pad)
ax.set_xlabel(COVID_data_scatter_names[0], fontsize = Axis_label_fontsize)
ax.set_ylabel(COVID_data_scatter_names[1], fontsize = Axis_label_fontsize)
ax.set_xlim(Min_list[0], Max_list[0])
ax.set_ylim(Min_list[1], Max_list[1])
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(linestyle = '--', linewidth = 1.5, which = 'major')
ax.grid(linestyle = '--', linewidth = 0.5, which = 'minor')
ax.set_axisbelow(True)
ax.tick_params(axis='x', labelsize = Axis_tick_fontsize)
ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
Formatter_list = [] # Display axes graduations as multiples of 10 (rather than 10^n) and find how many decimal places to display
for Axis_inc in range(2):
if Min_list[Axis_inc] < 1: Min_axis_log = int(np.abs(np.floor(np.log10(Min_list[Axis_inc])))) - 1
else: Min_axis_log = 0
Formatter_list.append('%.' + str(Min_axis_log) + 'f')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[0]))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[1]))
Divider = make_axes_locatable(ax) # Display 3rd axis (colors). Using make_axes_locatable() allows for better tight_layout results
cax = Divider.append_axes('right', size = '2%', pad = 0.3)
cbar = fig.colorbar(mapper, cax = cax)
cbar.ax.set_ylabel(COVID_data_scatter_names[2], fontsize = Axis_label_fontsize, labelpad=Axis_label_pad)
cbar.ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
cbar.ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax = 1, decimals = 0)) # Set axis graduations as percentage with no decimal places
ani = animation.ArtistAnimation(fig, Animation_frames, blit = True, interval = Animation_interval)
fig.tight_layout()
fig.show()
return ani, COVID_data_scatter_names | random_line_split | |
Script_graph_data.py | from os import chdir, listdir, mkdir, remove
from urllib.request import urlretrieve
from tkinter import font as tkFont
from datetime import datetime, timedelta, date
import pathlib, time, tkinter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.animation as animation
from matplotlib import ticker, rcParams
from mpl_toolkits.axes_grid1 import make_axes_locatable
Root_directory = str(pathlib.Path(__file__).parent.absolute()) # Strings with path of different directories
Datafiles_directory = Root_directory + '\\Datafiles'
Animation_directory = Root_directory + '\\Animations'
for Directory in [Datafiles_directory, Animation_directory]: # Create necessary directories if they don't exist
try:
mkdir(Directory)
except:
pass
Title_fontsize = 25 # Size in pixels of different elements in the graph
Date_fontsize = 25
Annotation_fontsize = 15
Axis_label_fontsize = 20
Axis_tick_fontsize = 15
Marker_ray = 8 # Ray in pixels of the markers in the scater graph
Annotation_offset = - Annotation_fontsize * 0.05 # Correction of the spacing between points in the graph and their annotations
Title_pad = Title_fontsize * 0.5 # Spacing betwenn title and plotting area
Axis_label_pad = Axis_label_fontsize * 0.5 # Spacing betwenn axis label and plotting area
Animation_interval = 200 # Interval in milliseconds between each frame in animation
Animation_fps = int(1/Animation_interval * 1e3) # Frames per second of the animation
Graph_font = rcParams['font.family'] # Tkinter font object that will be used to calculate the area occupied by each annotation in Annotations_frame()
tkinter.Frame().destroy()
font = tkFont.Font(family = Graph_font, size = Annotation_fontsize)
def | (X_axis_inc = 1, Y_axis_inc = 7, Z_axis_inc = 12, Date_start = None, Date_end = None):
"""
Main routine to execute to download, extract, reconstruct and plot COVID data
Parameters:
- X_axis_inc: Integer, data to use for the X axis. Default is 1 (Total cases per million)
- Y_axis_inc: Integer, data to use for the Y axis. Default is 7 (Total deaths per million)
- Z_axis_inc: Integer, data to use for the colors of the points. Default is 12 (Positivity rate)
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
Date are None by default to use every available date in the data (see first lines of this fuction)
Returns:
ani: Matplotlib animation object. If it's not returned, the animation isn't displayed
Axis incs:
0 - Total cases
1 - Total cases per million
2 - New cases
3 - New cases smoothed
4 - New cases per million
5 - New cases per million smoothed
6 - Total deaths
7 - Total deaths per million
8 - New deaths
9 - New deaths smoothed
10 - New deaths per million
11 - New deaths per million smoothed
12 - Positivity rate
13 - Testing policy
"""
Timer_start = time.perf_counter()
print('Collecting data from Our World in Data')
COVID_data, Date_start_raw_data, Date_end_raw_data = Extract_data() # Download and extract raw COVID data
if Date_start == None: # If no start date is specified, it is set to the first date in the data. The end date is then set to the last date in the data. That way, we can display the whole data without having to know when it starts and ends
Date_start = Date_start_raw_data
Date_end = Date_end_raw_data
elif Date_end == None: Date_end = Date_start # But if no end date is specified, only the start date is displayed
print('Recontructing missing chunks in the data by linear interplolation')
COVID_data_reconstructed = Reconstruct_COVID_data(COVID_data) # Reconstruct the missing data
print('Exporting data in files')
Export_in_files(COVID_data, COVID_data_reconstructed) # Export the original and reconstructed data in CSV files, just to have them and be able to look whenever we want
print('Isolating data to plot')
COVID_data_scatter = Extract_data_for_plotting(COVID_data_reconstructed, X_axis_inc, Y_axis_inc, Z_axis_inc, Date_start, Date_end) # Filter data to only keep the axes we want to plot
print('Plotting data')
ani, COVID_data_scatter_names = Scatter_graph(COVID_data_scatter) # Plot the data
print('Exporting animation as video')
Writer = animation.writers['ffmpeg'] # Export the file
writer = Writer(fps = Animation_fps, metadata=dict(artist='Me'), bitrate=1800)
Annimation_file = Animation_directory + '\\%s vs %s with %s from %s to %s.mp4' % (tuple(COVID_data_scatter_names) + (Date_start, Date_end))
ani.save(Annimation_file, writer = writer)
print('\nProcessing done in %0.2f minutes' % ((time.perf_counter() - Timer_start) / 60))
return ani
def Extract_data():
"""
Extracts and formats data in dictionnaries from Our World in Data CSV files
Parameters: Nothing
Returns:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Population_data: Dictionnary of population for each country
"""
chdir(Datafiles_directory) # Empty the datafiles directory
File_list = listdir()
for File in File_list:
remove(File)
COVID_data_path = Datafiles_directory + '\\OWID COVID data %s.csv' % (date.today().isoformat()) # String with path of COVID data (where it will be stored when downloaded)
urlretrieve('https://raw.githubusercontent.com/owid/COVID-19-data/master/public/data/owid-covid-data.csv', COVID_data_path) # Download and extract the data
COVID_data_file = open(COVID_data_path, 'r')
COVID_raw_data = COVID_data_file.readlines()
COVID_raw_data = [Row.split(',') for Row in COVID_raw_data[1:]]
COVID_data = {'_Country': {'Date': ['Total cases', 'Total cases per million', 'New cases', 'New cases smoothed', 'New cases per million', 'New cases per million smoothed', 'Total deaths', 'Total deaths per million', 'New deaths', 'New deaths smoothed', 'New deaths per million', 'New deaths per million smoothed', 'Positivity rate', 'Testing policy']}}
Date_list = []
for Row_inc in range(len(COVID_raw_data)): # For each row in the file...
Country = COVID_raw_data[Row_inc][2]
Date = COVID_raw_data[Row_inc][3]
if COVID_raw_data[Row_inc][2] not in COVID_data: COVID_data[Country] = {} # If a new country is encountered, a new entry to the dictionnary COVID_data is added
if Date not in Date_list: Date_list.append(Date) # If a new date is encoutered, it is added to the corresponding list
COVID_data[Country][Date] = []
for Column_inc in [4, 10, 5, 6, 11, 12, 7, 13, 8, 9, 14, 15, 23, 24]: # For each column we want to extract...
Data_item = COVID_raw_data[Row_inc][Column_inc]
if Column_inc != 24: # Column_inc of 24 is the testing policy and is a string so can't appended as a float, prompting this exception
if Data_item == '': COVID_data[Country][Date].append(None) # If there's nothing, a None element is added
else: COVID_data[Country][Date].append(float(COVID_raw_data[Row_inc][Column_inc]))
else: COVID_data[Country][Date].append(COVID_raw_data[Row_inc][Column_inc])
if COVID_raw_data[Row_inc][2] == 'International' or COVID_raw_data[Row_inc][2] == 'World': # The entries "World" and "International" aren't interesting so they are ignored
break
COVID_data_file.close()
Date_start_raw_data, Date_end_raw_data = min(Date_list), max(Date_list)
return COVID_data, Date_start_raw_data, Date_end_raw_data
def Reconstruct_COVID_data(COVID_data):
"""
Reconstructs missing chunks of data by linear interpolation
Parameters:
COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world
Returns:
COVID_data_reconstructed: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
"""
COVID_data_reconstructed = {}
COVID_data_reconstructed['_Country'] = COVID_data['_Country']
Countries_list = list(COVID_data.keys())[1:]
for Country in Countries_list: # For each country...
COVID_data_single_country = list(COVID_data[Country].values()) # Extract the matrix containing the data and transpose it. That way, each element of a single list in the array corresponds to one column (see help of Main_script) and it makes it easier to navigate through each column and recontruct the missing elements
T_COVID_data_single_country = list(map(list, zip(*COVID_data_single_country)))
for Column_inc in range(len(T_COVID_data_single_country)): # For each column...
Column = T_COVID_data_single_country[Column_inc]
Max_column_inc = len(Column) - 1
Row_inc = 0
while Column[Row_inc] == None and Row_inc < Max_column_inc: # Recontructing missing data at the beginning is impossible so we just skip the first rows with a None in them
Row_inc += 1
if None in Column: # If a None is in the list (meaning there are bits of data missing)...
while Row_inc < Max_column_inc: # Not including this line could prompt an index error
if Column[Row_inc] == None: # When a None in encoutered...
None_interval_start = Row_inc # Recording when the segments of None starts and ends
while Column[Row_inc] == None and Row_inc < Max_column_inc:
Row_inc += 1
None_interval_end = Row_inc - 1
Interpolation_interval_length = None_interval_end - None_interval_start + 2
if Row_inc < Max_column_inc: # Reconstruction of the segment by linear interpolation : Y = mX + b with m = (Y_max - Y_min) / (X_max - X_min)
m = (Column[None_interval_end + 1] - Column[None_interval_start - 1]) / Interpolation_interval_length
b = Column[None_interval_start - 1]
for Row_inc in range(None_interval_start, Row_inc):
T_COVID_data_single_country[Column_inc][Row_inc] = m * (Row_inc - None_interval_start + 1) + b
else: # In the case the None segment goes on until the end, the last known value is just copied
for Row_inc in range(None_interval_start, Row_inc + 1):
T_COVID_data_single_country[Column_inc][Row_inc] = T_COVID_data_single_country[Column_inc][None_interval_start - 1]
Row_inc += 1
COVID_data_single_country_reconstructed = list(map(list, zip(*T_COVID_data_single_country))) # Retranspose the matrix to get the reconstruted data in the correct format
Date_list_country = list(COVID_data[Country].keys()) # Add the reconstructed data to the appropriate dictionnary
COVID_data_reconstructed[Country] = {}
for Date_inc in range(len(Date_list_country)):
Date = Date_list_country[Date_inc]
COVID_data_reconstructed[Country][Date] = COVID_data_single_country_reconstructed[Date_inc]
return COVID_data_reconstructed
def Export_in_files(COVID_data, COVID_data_reconstructed):
"""
Exports the raw and reconstructed data in seperate files
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rate data throughout the world
- Covid_data_reconstructued: Reconstructed dictionnary of cases, deaths and positivity rates data throughout the world
Returns: Nothing
"""
F_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')
FR_data_file = open(Datafiles_directory + '\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')
COVID_data_lists = [COVID_data, COVID_data_reconstructed]
Data_file_list = [F_data_file, FR_data_file]
Countries_list = list(COVID_data.keys())[1:]
for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file
COVID_data_temp = COVID_data_lists[Data_set_inc]
Data_file_temp = Data_file_list[Data_set_inc]
Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\n')
for Country in Countries_list:
COVID_data_single_country = COVID_data_temp[Country]
Date_list = list(COVID_data[Country].keys())
for Date in Date_list:
COVID_data_single_country_single_date = COVID_data_single_country[Date]
Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None
Data_file_temp.write('%s;%s;' % (Country, Date))
Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))
Data_file_temp.write('\n')
Data_file_temp.close()
def Extract_data_for_plotting(COVID_data, X_Axis_inc, Y_Axis_inc, Z_Axis_inc, Date_start, Date_end, Keep_no_PR = True):
"""
Extract data from recontructed COVID data in order to only keep data that will be plotted
Parameters:
- COVID_data: Dictionnary of cases, deaths and positivity rates data throughout the world (usually reconstructed)
- X_axis_inc: Integer, data to use for the X axis
- Y_axis_inc: Integer, data to use for the Y axis
- Z_axis_inc: Integer, data to use for the colors of the points
- Date_start: String, starting date of the animation
- Date_end: String, ending date of the animation
- Keep_no_PR: Boolean indicating whether or not countries without a positivity rate have to be kept. Default is True
Returns:
COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
"""
Date_start_obj = datetime.strptime(Date_start, '%Y-%m-%d') # Create a list of all the dates to extract
Date_end_obj = datetime.strptime(Date_end, '%Y-%m-%d')
Date_difference = (Date_end_obj - Date_start_obj).days + 1
Date_list = [(Date_start_obj + timedelta(Days)).isoformat()[:10] for Days in range(Date_difference)]
Countries_list = list(COVID_data.keys())[1:]
COVID_data_scatter = {'0Date': {'Country': [COVID_data['_Country']['Date'][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]]}}
for Date in Date_list: # For each date and each country...
COVID_data_scatter[Date] = {}
for Country in Countries_list:
try:
Data_items = [COVID_data[Country][Date][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]] # This line will prompt an error in case the data doesn't exist, hence the try - except structure (much easier than 10 000 conditions to try to figure out if the data exists for a date and country)
if None not in Data_items[:2] and not (not Keep_no_PR and Data_items[2] == None): # Any data point that has a None as its X or Y coordinate is exlcuded, and also Z if asked by the user
if min(Data_items[:2]) > 0: COVID_data_scatter[Date][Country] = Data_items # Since the graph is in logscale, points with 0 as their X or Y coordinate are excluded (because log(0) doesn't exist).
# This double verification can't be done in one line because having None in a list you're trying to find the minimum of prompts an error
except: pass
if COVID_data_scatter[Date] == {}: COVID_data_scatter.pop(Date)
return COVID_data_scatter
def Annotations_frame(Points_to_display, Countries_displayed, Frame_limits):
"""
Tells which countries to annotate and which not to. Since the lists in parameters are sorted by descending order of positivity rate, the countries with higher positivity rates will be examined first and thus annotatd with more priority
Parameters:
- Points_to_display: List of X and Y coordinates of each point displayed on the graph
- Countries_displayed: List of countries displayed on the graph
- Frame_limits: Tuple, limits of the plotting area (X_min, X_max, Y_min, Y_max)
Returns:
- Countries_to_annotate: List of countries to annotate
- Annotations_mask: Numpy array of bools, outline of the annotations. This variable is only used in this function to decide which countries to annotate and which not to but I had so many problems in finding the correct formulas that just in case, I wanted to be able to display it easilly in Scatter_graph() even after solving all problems
"""
X_list_frame, Y_list_frame = zip(*Points_to_display) # Transform tuples of (X, Y) into 2 distinct lists of X and Y coordinates
Frame_limits_log = list(map(np.log10, Frame_limits))
X_min_log, X_max_log, Y_min_log, Y_max_log = Frame_limits_log
fig = plt.gcf()
ax = plt.gca()
ax_bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) # Get size in pixels of the plotting area
ax_width, ax_height = ax_bbox.width, ax_bbox.height
ax_width *= fig.dpi
ax_height *= fig.dpi
Annotations_mask = np.zeros((int(ax_width), int(ax_height)), bool) # Array of bools same size as plotting area where outlines of annotations will be stored
Countries_to_annotate = {}
for Country_inc in range(len(Countries_displayed)): # For each country...
Country = Countries_displayed[Country_inc]
Annotation_width_enlargment = 1.3 # Slight corrections to make the annotation outlines fit as best as possible. Found by trial and error
Annotation_height_enlargment = 1.6
Label_size = 0.5 * np.array([font.measure(Country)*Annotation_width_enlargment, Annotation_fontsize * Annotation_height_enlargment]) # Everything is divided by 2 because the origin of the anotation outline is in its center
Offset = [0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.7 + Annotation_offset] # Distance between point and annotation. Annotation_fontsize is in points so it has to be converted to pixels (1 inch = 72 points = screen dpi). 0.56 is just a correction found by trial and error
Country = Countries_displayed[Country_inc]
Country_coords = Points_to_display[Country_inc]
List_slice = [] # Get indices delimiting the outline of the annotation in the plotting area
for Axis_inc in range(2):
Min_log, Max_log = Frame_limits_log[Axis_inc*2 : Axis_inc*2 + 2] # Simple transformation: Y = (Y_max - Y_min) / (X_max - X_min) * (X - X_min) + Y_min
Coodrs_transformation = lambda x: (Annotations_mask.shape[Axis_inc] - 1)/(Max_log - Min_log) * (np.log10(x) - Min_log)
for Label_offset_sign in range(-1, 2, 2):
List_slice.append(sum([Coodrs_transformation(Country_coords[Axis_inc]), Offset[Axis_inc], Label_offset_sign * Label_size[Axis_inc]]))
Slice_X_min, Slice_X_max, Slice_Y_min, Slice_Y_max = map(int, List_slice)
Annotation_slice = np.s_[Slice_X_min : Slice_X_max + 1, Slice_Y_min : Slice_Y_max + 1]
if not np.any(Annotations_mask[Annotation_slice]): # If there isn't a True in the current annotation outline (meaing there already is another annotation displayed)...
Countries_to_annotate[Country] = Points_to_display[Country_inc] # The country has to be annotated
Annotations_mask[Annotation_slice] = True # All the elements in Annotations_mask in this area are set to True to signify there is now an annotation displayed there
return Countries_to_annotate, Annotations_mask
def Scatter_graph(COVID_data_scatter, Display_annotations_mask = False):
"""
Plots data entered in parameters
Parameters:
- COVID_data_scatter: Reconstructed dictionnary of the 3 columns the user asked to plot throughout time
- Display_annotations_mask: Boolean indicating whether to display the outline of annotations created by Annotations_frame() or not
Returns:
- ani: Animation object created by matplotlib
- COVID_data_scatter_names: List of names of the columns plotted
"""
COVID_data_scatter_names = COVID_data_scatter.pop('0Date')['Country'] # Extract names of columns plotted
X_axis, Y_axis, Z_axis = [], [], [] # Separate the axes in COVID_data_scatter in order to find the minimum and maximum along each axis
for Date_item in COVID_data_scatter.values():
for Country_item in Date_item.values():
for Axis_inc in range(3):
[X_axis, Y_axis, Z_axis][Axis_inc].append(Country_item[Axis_inc])
Min_list, Max_list = [], [] # Limits of the plotting area
Graph_window_margin = 2 # Since the graph is in log scale, the plotting area can't be extended using New max = Factor * (Max - Min) so I just went with multiplying the maximum and dividing the minimum by a factor of 2
for Axis_inc in range(2):
Min_list.append(min([X_axis, Y_axis][Axis_inc]) / Graph_window_margin)
Max_list.append(max([X_axis, Y_axis][Axis_inc]) * Graph_window_margin)
cmap = cm.jet # Colormap for the 3rd axis
cmap = colors.LinearSegmentedColormap.from_list('jet_truncated', cmap(np.linspace(0.2, 0.95, 100)))
Z_axis_cleaned = list(filter(lambda Item: Item != None, Z_axis)) # Positivity rate to color converter
norm = colors.Normalize(vmin = 0, vmax = max(Z_axis_cleaned), clip = True)
mapper = cm.ScalarMappable(norm = norm, cmap = cmap)
plt.close() # Initialise plotting area. A simple "plt.clf()" doesn't work to erase everything and prompts glitches after the 2nd execution of the code, forcing us to close the figure and reopen it
fig = plt.figure("Scatter graph of COVID data")
fig.set_size_inches(tuple(1/fig.dpi * np.array([1920, 1080])))
ax = fig.gca()
manager = plt.get_current_fig_manager() # Adapt the matplotlib window to the screen
manager.window.showMaximized()
Data_frames = zip(COVID_data_scatter.keys(), COVID_data_scatter.values()) # Transform the first level of dictionnary into a list because we need to have access to the keys of that first level during the creation of the animation frames
Animation_frames = [] # List where all the matplotlib objects for the animation will be stored
for Frame in Data_frames:
Date = Frame[0]
Points_to_display, Positivity_rate_list, Points_colors = [], [], []
Countries_displayed = list(Frame[1].keys())
for Country in Countries_displayed: # For each country...
Country_coords = Frame[1][Country][:2]
Positivity_rate = Frame[1][Country][2]
Points_to_display.append(Country_coords)
if Positivity_rate != None: # If there is a positivity rate for that country, it is plotted with the color it corresponds to on the colormap
Positivity_rate_list.append(Positivity_rate)
Points_colors.append(mapper.to_rgba(Positivity_rate))
else: # Otherwise, it appears in #ABB7B7 gray and a "-1" is appended to the list of positivity rates. That way, these points will be in last after the sorting in descending order in a few lines
Positivity_rate_list.append(-1)
Points_colors.append((0.6627, 0.6627, 0.6627, 1))
All_points_info = list(zip(Countries_displayed, Points_to_display, Positivity_rate_list, Points_colors)) # Group everything, sort the points based on the positivity rate and then seperate everything to get the same objects as before but sorted
All_points_info.sort(key = lambda x: x[2])
All_points_info = list(zip(*All_points_info))
Countries_displayed = list(All_points_info[0])
Points_to_display = list(All_points_info[1])
Positivity_rate_list = list(All_points_info[2])
Points_colors = list(All_points_info[3])
X_list_frame, Y_list_frame = zip(*Points_to_display) # Separate X and Y axes and plot the points
scatter = ax.scatter(X_list_frame, Y_list_frame, c = Points_colors, s = np.pi * (Marker_ray*72/fig.dpi)**2, linewidth = 0.5, edgecolors = 'black') # Marker ray is the radius of the circle in pixels but s is the area of the circle in points. We have to convert the pixels in points (1 inch = 72 points = Screen dpi) then apply area = pi * radius²
# Note: ax.scatter plots the points one by one so the last elements of the lists will be above the firsts. Since the X and Y axes are sorted in ascending order of positivity rate, the last points (high positivity rates) will be on top. This is on purpose because these are the most interesting ones
Text_date = ax.text(0.02, 0.97, Date, transform = ax.transAxes, fontsize = Date_fontsize, verticalalignment = 'top', horizontalalignment = 'left', bbox = dict(boxstyle = 'round', facecolor = 'white', alpha = 0.9, pad = 0.3)) # Display the date
fig.tight_layout() # Annotations_frame() requires the use of lines regarding the size of the plotting area. For them to work properly, we have to virtually draw the elements, which is why we use fig.tight_layout() in the middle of the creation of the animation frames
Countries_to_annotate, Annotations_mask = Annotations_frame(Points_to_display[::-1], Countries_displayed[::-1], (Min_list[0], Max_list[0], Min_list[1], Max_list[1])) # Decide which countries to annotate and which not to
Annotation_list = []
for Country, Country_coords in zip(Countries_to_annotate.keys(), Countries_to_annotate.values()): # Annotate countries
Annotation_list.append(ax.annotate(Country, Country_coords, textcoords = 'offset pixels', xytext=(0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.5 + Annotation_offset), ha='center', va='center', fontsize = Annotation_fontsize))
if Display_annotations_mask: # If something goes wrong during an edit, the user can still display the annotations outline
ax_tw_x = ax.twinx() # Duplicate axis. Compulsory because the graph is in logscale and an image cannot be properly displayed in logscale
ax2 = ax_tw_x.twiny()
mapper_mask = cm.ScalarMappable(norm = colors.Normalize(vmin = 0, vmax = 1, clip = True), cmap = cm.gray) # Convert array of bools into array of colors then display the image
Annotations_mask_im = mapper_mask.to_rgba(np.rot90(np.invert(Annotations_mask) + np.zeros(Annotations_mask.shape)), alpha = 0.3)
Annotations_mask_ax = ax2.imshow(Annotations_mask_im, extent = [Min_list[0], Max_list[0], Min_list[1], Max_list[1]], aspect = 'auto')
ax_tw_x.axis('off') # Not display axes of the image
ax2.axis('off')
Animation_frames.append([scatter, Text_date, Annotations_mask_ax] + Annotation_list)
else: Animation_frames.append([scatter, Text_date] + Annotation_list)
ax.set_title("COVID-19 pandemic - %s vs. %s" % tuple(COVID_data_scatter_names[:2][::-1]), fontsize = Title_fontsize, pad = Title_pad)
ax.set_xlabel(COVID_data_scatter_names[0], fontsize = Axis_label_fontsize)
ax.set_ylabel(COVID_data_scatter_names[1], fontsize = Axis_label_fontsize)
ax.set_xlim(Min_list[0], Max_list[0])
ax.set_ylim(Min_list[1], Max_list[1])
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(linestyle = '--', linewidth = 1.5, which = 'major')
ax.grid(linestyle = '--', linewidth = 0.5, which = 'minor')
ax.set_axisbelow(True)
ax.tick_params(axis='x', labelsize = Axis_tick_fontsize)
ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
Formatter_list = [] # Display axes graduations as multiples of 10 (rather than 10^n) and find how many decimal places to display
for Axis_inc in range(2):
if Min_list[Axis_inc] < 1: Min_axis_log = int(np.abs(np.floor(np.log10(Min_list[Axis_inc])))) - 1
else: Min_axis_log = 0
Formatter_list.append('%.' + str(Min_axis_log) + 'f')
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[0]))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[1]))
Divider = make_axes_locatable(ax) # Display 3rd axis (colors). Using make_axes_locatable() allows for better tight_layout results
cax = Divider.append_axes('right', size = '2%', pad = 0.3)
cbar = fig.colorbar(mapper, cax = cax)
cbar.ax.set_ylabel(COVID_data_scatter_names[2], fontsize = Axis_label_fontsize, labelpad=Axis_label_pad)
cbar.ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)
cbar.ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax = 1, decimals = 0)) # Set axis graduations as percentage with no decimal places
ani = animation.ArtistAnimation(fig, Animation_frames, blit = True, interval = Animation_interval)
fig.tight_layout()
fig.show()
return ani, COVID_data_scatter_names
| Main_script | identifier_name |
script.js | ///IMPORTS-----------------------------------------
import screens from './components/screens.js'
import PausedTimeout from './functions/pausedTimeout.js'
import map1 from './maps/map1.js'
import map2 from './maps/map2.js'
import map3 from './maps/map3.js'
import map4 from './maps/map4.js'
import map5 from './maps/map5.js'
///GLOBAL VARIABLES-----------------------------------------
//values for car position
window.gameArea = document.getElementById('game-area')
//storing the intervals
window.objIntervals = {
}
window.myIntervals = {
moving:null,
rotating:null,
}
window.myIntervalValues = {
moving:50,
rotating:50,
}
window.timeouts = {
}
window.maps = [
map1,
map2,
map3,
map4,
map5
]
window.state = {
paused:true,
gameStart:true,
crashed:false,
completed:false,
mapIndex:0,
}
///GLOBAL METHODS-----------------------------------------
window.killScreen = function(){
let classList = document.getElementById('screen').classList
classList.add('fadeoutslide')
classList.remove('fadeinslide')
state.paused = false
state.gameStart = false
setTimeout(()=>{
document.getElementById('screen').remove()
},600)
}
window.rotationRatio = function(){
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
return ratio
}
window.PausedTimeout = PausedTimeout
window.pauseTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].pauseTimeout()
}
}
window.resumeTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].resumeTimeout()
}
}
window.destroyTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].stopTimeout()
delete timeouts[timeout]
}
}
window.rotationPercentage = function(){
let ratio
(rotationAngle%360)/360 < 0 ? ratio = Math.abs((rotationAngle%360)/360 + 1) : ratio = (rotationAngle%360)/360
if(ratio >= 0.5) ratio = (1 - ratio)
ratio*=4
if(ratio >1) ratio = 1 - (ratio - 1)
return ratio
}
window.handleCrash = function(){
clearIntervals()
screens({
title:'You crashed!',
content:"Play again?",
button:"Continue",
})
state.paused=true
state.crashed=true
return null
}
//keys for multiple key listeners
let activeKeys={}
let crashed = false
//FUNCTIONS-----------------------------------------
function clearIntervals(){
clearInterval(myIntervals.moving)
myIntervals.moving = null
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
deleteKeys()
return null
}
function | (){
for(let key in activeKeys){
delete activeKeys[key]
}
}
function handleVictory(){
clearIntervals()
screens({
title:'Goal reached!',
content:"",
button:"Next Level",
})
state.paused=true
state.completed=true
}
var crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
function checkVictory(){
const point = document.getElementById('victory-point')
//the values should adjust for what percentage to the side the car has rotated
let ratio = rotationPercentage()
//If the car is fully rotated to the side, tbe difference will be 25 pixels less to top, so 25px should be added.
if(
//from bottom to to p
(yPosition + (25*ratio) ) < (point.offsetTop + point.offsetHeight) &&
(yPosition + (25*ratio)) > point.offsetTop &&
(xPosition + 40) > point.offsetLeft &&
xPosition < (point.offsetLeft + point.offsetWidth)
){
return true
}else{
return false
}
}
function checkCrash(){
if(state.paused) return
let ratio = rotationPercentage()
function checkBoundaries(){
if(
(yPosition + (25 * ratio) ) < 0 | //TOP
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > gameArea.offsetHeight | //BOTTOM
(xPosition - (25 * ratio) ) < 0 | //LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > gameArea.offsetWidth //RIGHT
){
return true
}
}
const calcPoints = {
center(){
return ((mycar.offsetTop + mycar.offsetHeight)/2 + (mycar.offsetLeft + mycar.offsetWidth)/2)
},
topleft(){
return{
x:1,
y:1
}
}
}
function checkForeignObjects(){
let crashed = false
document.querySelectorAll('[crashable="true"]').forEach(crashable=>{
let foreignRatio, foreignRotation;
if(crashable.style.transform){
//this only works because rotateZ is the only transform applied
foreignRotation = parseInt(crashable.style.transform.match(/[0-9]+/));
//this tests if the foreign object is rotated
(foreignRotation%360)/360 < 0 ? foreignRatio = Math.abs((foreignRotation%360)/360 + 1) : foreignRatio = (foreignRotation%360)/360
if(foreignRatio >= 0.5) foreignRatio = (1 - foreignRatio)
foreignRatio*=4
if(foreignRatio >1) foreignRatio = 1 - (foreignRatio - 1)
}else{
foreignRatio = 0
}
//defines boundaries, adjusts for rotation
let top =(crashable.offsetTop + crashable.offsetHeight)
let bottom = crashable.offsetTop
let left = (crashable.offsetLeft+crashable.offsetWidth)
let right = crashable.offsetLeft
let difference = (crashable.offsetHeight - crashable.offsetWidth) /2
//tests the values
if(
(yPosition + (25 * ratio) ) < top - (difference * foreignRatio) && //INTO BOTTOM
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > bottom + (difference * foreignRatio) && //INTO TOP
(xPosition - (25 * ratio) ) < left + (difference * foreignRatio) && //INTO LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > right - (difference * foreignRatio) //INTO RIGHT
){
crashed = true
}
})
return crashed
}
if( checkBoundaries() | checkForeignObjects() ) return true
}
function move(isForward){
myIntervals.moving = setInterval(()=>{
if(state.paused) return
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
let ratio2 = (10 * (ratio*4))
if(ratio2 > 20) ratio2 -= 2*(ratio2 - 20)
let ratio3 = (10 * (ratio*4))
if(ratio3 > 10 && ratio3 < 30) ratio3 -= 2*(ratio3 - 10)
else if(ratio3 >= 30) ratio3 -=40
if(isForward){
yPosition -= (10 - ratio2)
xPosition += ratio3
}else{
yPosition += (10 - ratio2)
xPosition -=ratio3
}
if( checkVictory() ) return handleVictory()
mycar.style.top=`${yPosition}px`
mycar.style.left=`${xPosition}px`
},myIntervalValues.moving)
}
//EVENT LISTENERS ---------------------------------------------------
window.initListeners = function(){
document.addEventListener('keypress',e=>{
//WHEN YOU PRESS THE SPACEBAR
if(e.keyCode==32){
//PAUSES GAME
if(!state.paused){
screens({
title:'Paused',
content:'Press space to continue.',
})
state.paused = true
clearIntervals()
pauseTimeouts()
}else{
killScreen()
resumeTimeouts()
//ADDITIONAL OPTIONS IF SPACEBAR IS PRESSED
if(state.crashed){
destroyTimeouts()
state.crashed=false
maps[state.mapIndex].reset()
return crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
}
if(state.completed){
state.completed = false
maps[state.mapIndex].destroy()
state.mapIndex++
return maps[state.mapIndex].init()
}
}
}
})
//WHEN YOU PRESS ANY OTHER KEY
document.addEventListener('keydown',function handleKeyDown(e){
1
//38: top arrow....39 right arrow..... 40 bottom arrow... 37 left arrow
//16: shift, 32: spacebar
activeKeys[e.keyCode]=e.keyCode
// console.log(e.keyCode)
for(let key in activeKeys){
//toggle headlights
if(key==16){
document.querySelectorAll('#my-car .headlight').forEach(element=>{
if(!element.classList.contains('highbeams-in')){
element.classList.add('highbeams-in')
element.classList.remove('highbeams-out')
}
else{
element.classList.remove('highbeams-in')
element.classList.add('highbeams-out')
}
})
}
//move forward
if(key==38&&!myIntervals.moving){
if(state.paused) return
move(true)
}
//move backward
if(key==40&&!myIntervals.moving){
if(state.paused) return
move(false)
}
//rotate left
if(key==37&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle-=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
//rotate right
if(key==39&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle+=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
}
})
document.addEventListener('keyup',function handleKeyUp(e){
if(state.paused) return
delete activeKeys[e.keyCode]
if(e.keyCode==38|e.keyCode==40){
clearInterval(myIntervals.moving)
myIntervals.moving = null
}
if(e.keyCode==37|e.keyCode==39){
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
}
})
}
//INITIALIZATION ---------------------------------------------------
maps[state.mapIndex].init()
initListeners()
screens({
title:'Road Whiz',
content:'Can you survive the challenges?',
button:'Continue'
})
| deleteKeys | identifier_name |
script.js | ///IMPORTS-----------------------------------------
import screens from './components/screens.js'
import PausedTimeout from './functions/pausedTimeout.js'
import map1 from './maps/map1.js'
import map2 from './maps/map2.js'
import map3 from './maps/map3.js'
import map4 from './maps/map4.js'
import map5 from './maps/map5.js'
///GLOBAL VARIABLES-----------------------------------------
//values for car position
window.gameArea = document.getElementById('game-area')
//storing the intervals
window.objIntervals = {
}
window.myIntervals = {
moving:null,
rotating:null,
}
window.myIntervalValues = {
moving:50,
rotating:50,
}
window.timeouts = {
}
window.maps = [
map1,
map2,
map3,
map4,
map5
]
window.state = {
paused:true,
gameStart:true,
crashed:false,
completed:false,
mapIndex:0,
}
///GLOBAL METHODS-----------------------------------------
window.killScreen = function(){
let classList = document.getElementById('screen').classList
classList.add('fadeoutslide')
classList.remove('fadeinslide')
state.paused = false
state.gameStart = false
setTimeout(()=>{
document.getElementById('screen').remove()
},600)
}
window.rotationRatio = function(){
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
return ratio
}
window.PausedTimeout = PausedTimeout
window.pauseTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].pauseTimeout()
}
}
window.resumeTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].resumeTimeout()
}
}
window.destroyTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].stopTimeout()
delete timeouts[timeout]
}
}
window.rotationPercentage = function(){
let ratio
(rotationAngle%360)/360 < 0 ? ratio = Math.abs((rotationAngle%360)/360 + 1) : ratio = (rotationAngle%360)/360
if(ratio >= 0.5) ratio = (1 - ratio)
ratio*=4
if(ratio >1) ratio = 1 - (ratio - 1)
return ratio
}
window.handleCrash = function(){
clearIntervals()
screens({
title:'You crashed!',
content:"Play again?",
button:"Continue",
})
state.paused=true
state.crashed=true
return null
}
//keys for multiple key listeners
let activeKeys={}
let crashed = false
//FUNCTIONS-----------------------------------------
function clearIntervals(){
clearInterval(myIntervals.moving)
myIntervals.moving = null
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
deleteKeys()
return null
}
function deleteKeys(){
for(let key in activeKeys){
delete activeKeys[key]
}
}
function handleVictory(){
clearIntervals()
screens({
title:'Goal reached!',
content:"",
button:"Next Level",
})
state.paused=true
state.completed=true
}
var crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
function checkVictory(){
const point = document.getElementById('victory-point')
//the values should adjust for what percentage to the side the car has rotated
let ratio = rotationPercentage()
//If the car is fully rotated to the side, tbe difference will be 25 pixels less to top, so 25px should be added.
if(
//from bottom to to p
(yPosition + (25*ratio) ) < (point.offsetTop + point.offsetHeight) &&
(yPosition + (25*ratio)) > point.offsetTop &&
(xPosition + 40) > point.offsetLeft &&
xPosition < (point.offsetLeft + point.offsetWidth)
){
return true
}else |
}
function checkCrash(){
if(state.paused) return
let ratio = rotationPercentage()
function checkBoundaries(){
if(
(yPosition + (25 * ratio) ) < 0 | //TOP
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > gameArea.offsetHeight | //BOTTOM
(xPosition - (25 * ratio) ) < 0 | //LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > gameArea.offsetWidth //RIGHT
){
return true
}
}
const calcPoints = {
center(){
return ((mycar.offsetTop + mycar.offsetHeight)/2 + (mycar.offsetLeft + mycar.offsetWidth)/2)
},
topleft(){
return{
x:1,
y:1
}
}
}
function checkForeignObjects(){
let crashed = false
document.querySelectorAll('[crashable="true"]').forEach(crashable=>{
let foreignRatio, foreignRotation;
if(crashable.style.transform){
//this only works because rotateZ is the only transform applied
foreignRotation = parseInt(crashable.style.transform.match(/[0-9]+/));
//this tests if the foreign object is rotated
(foreignRotation%360)/360 < 0 ? foreignRatio = Math.abs((foreignRotation%360)/360 + 1) : foreignRatio = (foreignRotation%360)/360
if(foreignRatio >= 0.5) foreignRatio = (1 - foreignRatio)
foreignRatio*=4
if(foreignRatio >1) foreignRatio = 1 - (foreignRatio - 1)
}else{
foreignRatio = 0
}
//defines boundaries, adjusts for rotation
let top =(crashable.offsetTop + crashable.offsetHeight)
let bottom = crashable.offsetTop
let left = (crashable.offsetLeft+crashable.offsetWidth)
let right = crashable.offsetLeft
let difference = (crashable.offsetHeight - crashable.offsetWidth) /2
//tests the values
if(
(yPosition + (25 * ratio) ) < top - (difference * foreignRatio) && //INTO BOTTOM
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > bottom + (difference * foreignRatio) && //INTO TOP
(xPosition - (25 * ratio) ) < left + (difference * foreignRatio) && //INTO LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > right - (difference * foreignRatio) //INTO RIGHT
){
crashed = true
}
})
return crashed
}
if( checkBoundaries() | checkForeignObjects() ) return true
}
function move(isForward){
myIntervals.moving = setInterval(()=>{
if(state.paused) return
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
let ratio2 = (10 * (ratio*4))
if(ratio2 > 20) ratio2 -= 2*(ratio2 - 20)
let ratio3 = (10 * (ratio*4))
if(ratio3 > 10 && ratio3 < 30) ratio3 -= 2*(ratio3 - 10)
else if(ratio3 >= 30) ratio3 -=40
if(isForward){
yPosition -= (10 - ratio2)
xPosition += ratio3
}else{
yPosition += (10 - ratio2)
xPosition -=ratio3
}
if( checkVictory() ) return handleVictory()
mycar.style.top=`${yPosition}px`
mycar.style.left=`${xPosition}px`
},myIntervalValues.moving)
}
//EVENT LISTENERS ---------------------------------------------------
window.initListeners = function(){
document.addEventListener('keypress',e=>{
//WHEN YOU PRESS THE SPACEBAR
if(e.keyCode==32){
//PAUSES GAME
if(!state.paused){
screens({
title:'Paused',
content:'Press space to continue.',
})
state.paused = true
clearIntervals()
pauseTimeouts()
}else{
killScreen()
resumeTimeouts()
//ADDITIONAL OPTIONS IF SPACEBAR IS PRESSED
if(state.crashed){
destroyTimeouts()
state.crashed=false
maps[state.mapIndex].reset()
return crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
}
if(state.completed){
state.completed = false
maps[state.mapIndex].destroy()
state.mapIndex++
return maps[state.mapIndex].init()
}
}
}
})
//WHEN YOU PRESS ANY OTHER KEY
document.addEventListener('keydown',function handleKeyDown(e){
1
//38: top arrow....39 right arrow..... 40 bottom arrow... 37 left arrow
//16: shift, 32: spacebar
activeKeys[e.keyCode]=e.keyCode
// console.log(e.keyCode)
for(let key in activeKeys){
//toggle headlights
if(key==16){
document.querySelectorAll('#my-car .headlight').forEach(element=>{
if(!element.classList.contains('highbeams-in')){
element.classList.add('highbeams-in')
element.classList.remove('highbeams-out')
}
else{
element.classList.remove('highbeams-in')
element.classList.add('highbeams-out')
}
})
}
//move forward
if(key==38&&!myIntervals.moving){
if(state.paused) return
move(true)
}
//move backward
if(key==40&&!myIntervals.moving){
if(state.paused) return
move(false)
}
//rotate left
if(key==37&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle-=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
//rotate right
if(key==39&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle+=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
}
})
document.addEventListener('keyup',function handleKeyUp(e){
if(state.paused) return
delete activeKeys[e.keyCode]
if(e.keyCode==38|e.keyCode==40){
clearInterval(myIntervals.moving)
myIntervals.moving = null
}
if(e.keyCode==37|e.keyCode==39){
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
}
})
}
//INITIALIZATION ---------------------------------------------------
maps[state.mapIndex].init()
initListeners()
screens({
title:'Road Whiz',
content:'Can you survive the challenges?',
button:'Continue'
})
| {
return false
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.