file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
index.ts | import { assert } from '@0x/assert';
import { schemas } from '@0x/json-schemas';
import {
AbiEncoder,
abiUtils,
BigNumber,
decodeBytesAsRevertError,
decodeThrownErrorAsRevertError,
providerUtils,
RevertError,
StringRevertError,
} from '@0x/utils';
import { Web3Wrapper } from '@0x/web3-wrapper';
import {
AbiDefinition,
AbiType,
BlockParam,
CallData,
ConstructorAbi,
ContractAbi,
DataItem,
MethodAbi,
SupportedProvider,
TransactionReceiptWithDecodedLogs,
TxData,
TxDataPayable,
} from 'ethereum-types';
import Account from 'ethereumjs-account';
import * as util from 'ethereumjs-util';
import { default as VM } from 'ethereumjs-vm';
import PStateManager from 'ethereumjs-vm/dist/state/promisified';
export { linkLibrariesInBytecode, methodAbiToFunctionSignature } from './utils';
import { AwaitTransactionSuccessOpts } from './types';
import { formatABIDataItem } from './utils';
export { SubscriptionManager } from './subscription_manager';
export {
ContractEvent,
SendTransactionOpts,
AwaitTransactionSuccessOpts,
ContractFunctionObj,
ContractTxFunctionObj,
SubscriptionErrors,
} from './types';
export interface AbiEncoderByFunctionSignature {
[key: string]: AbiEncoder.Method;
}
const ARBITRARY_PRIVATE_KEY = 'e331b6d69882b4cb4ea581d88e0b604039a3de5967688d3dcffdd2270c0fd109';
// tslint:disable: max-classes-per-file
/**
* @dev A promise-compatible type that exposes a `txHash` field.
* Not used by BaseContract, but generated contracts will return it in
* `awaitTransactionSuccessAsync()`.
* Maybe there's a better place for this.
*/
export class PromiseWithTransactionHash<T> implements Promise<T> {
public readonly txHashPromise: Promise<string>;
private readonly _promise: Promise<T>;
constructor(txHashPromise: Promise<string>, promise: Promise<T>) {
this.txHashPromise = txHashPromise;
this._promise = promise;
}
// tslint:disable:promise-function-async
// tslint:disable:async-suffix
public then<TResult>(
onFulfilled?: (v: T) => TResult | Promise<TResult>,
onRejected?: (reason: any) => Promise<never>,
): Promise<TResult> {
return this._promise.then<TResult>(onFulfilled, onRejected);
}
public catch<TResult>(onRejected?: (reason: any) => Promise<TResult>): Promise<TResult | T> {
return this._promise.catch(onRejected);
}
public finally(onFinally?: (() => void) | null): Promise<T> {
return this._promise.finally(onFinally);
}
// tslint:enable:promise-function-async
// tslint:enable:async-suffix
get [Symbol.toStringTag](): 'Promise' {
return this._promise[Symbol.toStringTag];
}
}
export class BaseContract {
protected _abiEncoderByFunctionSignature: AbiEncoderByFunctionSignature;
protected _web3Wrapper: Web3Wrapper;
public abi: ContractAbi;
public address: string;
public contractName: string;
public constructorArgs: any[] = [];
public _deployedBytecodeIfExists?: Buffer;
private _evmIfExists?: VM;
private _evmAccountIfExists?: Buffer;
protected static _formatABIDataItemList(
abis: DataItem[],
values: any[],
formatter: (type: string, value: any) => any,
): any {
return values.map((value: any, i: number) => formatABIDataItem(abis[i], value, formatter));
}
protected static _lowercaseAddress(type: string, value: string): string {
return type === 'address' ? value.toLowerCase() : value;
}
protected static _bigNumberToString(_type: string, value: any): any {
return BigNumber.isBigNumber(value) ? value.toString() : value;
}
protected static _lookupConstructorAbi(abi: ContractAbi): ConstructorAbi |
protected static _throwIfCallResultIsRevertError(rawCallResult: string): void {
// Try to decode the call result as a revert error.
let revert: RevertError;
try {
revert = decodeBytesAsRevertError(rawCallResult);
} catch (err) {
// Can't decode it as a revert error, so assume it didn't revert.
return;
}
throw revert;
}
protected static _throwIfThrownErrorIsRevertError(error: Error): void {
// Try to decode a thrown error.
let revertError: RevertError;
try {
revertError = decodeThrownErrorAsRevertError(error);
} catch (err) {
// Can't decode it.
return;
}
// Re-cast StringRevertErrors as plain Errors for backwards-compatibility.
if (revertError instanceof StringRevertError) {
throw new Error(revertError.values.message as string);
}
throw revertError;
}
protected static _throwIfUnexpectedEmptyCallResult(rawCallResult: string, methodAbi: AbiEncoder.Method): void {
// With live nodes, we will receive an empty call result if:
// 1. The function has no return value.
// 2. The contract reverts without data.
// 3. The contract reverts with an invalid opcode (`assert(false)` or `invalid()`).
if (!rawCallResult || rawCallResult === '0x') {
const returnValueDataItem = methodAbi.getReturnValueDataItem();
if (returnValueDataItem.components === undefined || returnValueDataItem.components.length === 0) {
// Expected no result (which makes it hard to tell if the call reverted).
return;
}
throw new Error(`Function "${methodAbi.getSignature()}" reverted with no data`);
}
}
// Throws if the given arguments cannot be safely/correctly encoded based on
// the given inputAbi. An argument may not be considered safely encodeable
// if it overflows the corresponding Solidity type, there is a bug in the
// encoder, or the encoder performs unsafe type coercion.
public static strictArgumentEncodingCheck(inputAbi: DataItem[], args: any[]): string {
const abiEncoder = AbiEncoder.create(inputAbi);
const params = abiUtils.parseEthersParams(inputAbi);
const rawEncoded = abiEncoder.encode(args);
const rawDecoded = abiEncoder.decodeAsArray(rawEncoded);
for (let i = 0; i < rawDecoded.length; i++) {
const original = args[i];
const decoded = rawDecoded[i];
if (!abiUtils.isAbiDataEqual(params.names[i], params.types[i], original, decoded)) {
throw new Error(
`Cannot safely encode argument: ${params.names[i]} (${original}) of type ${
params.types[i]
}. (Possible type overflow or other encoding error)`,
);
}
}
return rawEncoded;
}
protected static async _applyDefaultsToContractTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
const txDataWithDefaults = BaseContract._removeUndefinedProperties<T>(txData);
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected static _assertCallParams(callData: Partial<CallData>, defaultBlock?: BlockParam): void {
assert.doesConformToSchema('callData', callData, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
if (defaultBlock !== undefined) {
assert.isBlockParam('defaultBlock', defaultBlock);
}
}
private static _removeUndefinedProperties<T>(props: any): T {
const clonedProps = { ...props };
Object.keys(clonedProps).forEach(key => clonedProps[key] === undefined && delete clonedProps[key]);
return clonedProps;
}
protected _promiseWithTransactionHash(
txHashPromise: Promise<string>,
opts: AwaitTransactionSuccessOpts,
): PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs> {
return new PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs>(
txHashPromise,
(async (): Promise<TransactionReceiptWithDecodedLogs> => {
// When the transaction hash resolves, wait for it to be mined.
return this._web3Wrapper.awaitTransactionSuccessAsync(
await txHashPromise,
opts.pollingIntervalMs,
opts.timeoutMs,
);
})(),
);
}
protected async _applyDefaultsToTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
// Gas amount sourced with the following priorities:
// 1. Optional param passed in to public method call
// 2. Global config passed in at library instantiation
// 3. Gas estimate calculation + safety margin
// tslint:disable-next-line:no-object-literal-type-assertion
const txDataWithDefaults = {
to: this.address,
...this._web3Wrapper.getContractDefaults(),
...BaseContract._removeUndefinedProperties(txData),
} as T;
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected async _evmExecAsync(encodedData: string): Promise<string> {
const encodedDataBytes = Buffer.from(encodedData.substr(2), 'hex');
const addressBuf = Buffer.from(this.address.substr(2), 'hex');
// should only run once, the first time it is called
if (this._evmIfExists === undefined) {
const vm = new VM({});
const psm = new PStateManager(vm.stateManager);
// create an account with 1 ETH
const accountPk = Buffer.from(ARBITRARY_PRIVATE_KEY, 'hex');
const accountAddress = util.privateToAddress(accountPk);
const account = new Account({ balance: 1e18 });
await psm.putAccount(accountAddress, account);
// 'deploy' the contract
if (this._deployedBytecodeIfExists === undefined) {
const contractCode = await this._web3Wrapper.getContractCodeAsync(this.address);
this._deployedBytecodeIfExists = Buffer.from(contractCode.substr(2), 'hex');
}
await psm.putContractCode(addressBuf, this._deployedBytecodeIfExists);
// save for later
this._evmIfExists = vm;
this._evmAccountIfExists = accountAddress;
}
let rawCallResult;
try {
const result = await this._evmIfExists.runCall({
to: addressBuf,
caller: this._evmAccountIfExists,
origin: this._evmAccountIfExists,
data: encodedDataBytes,
});
rawCallResult = `0x${result.execResult.returnValue.toString('hex')}`;
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected async _performCallAsync(callData: Partial<CallData>, defaultBlock?: BlockParam): Promise<string> {
const callDataWithDefaults = await this._applyDefaultsToTxDataAsync(callData);
let rawCallResult: string;
try {
rawCallResult = await this._web3Wrapper.callAsync(callDataWithDefaults, defaultBlock);
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected _lookupAbiEncoder(functionSignature: string): AbiEncoder.Method {
const abiEncoder = this._abiEncoderByFunctionSignature[functionSignature];
if (abiEncoder === undefined) {
throw new Error(`Failed to lookup method with function signature '${functionSignature}'`);
}
return abiEncoder;
}
protected _lookupAbi(functionSignature: string): MethodAbi {
const methodAbi = this.abi.find((abiDefinition: AbiDefinition) => {
if (abiDefinition.type !== AbiType.Function) {
return false;
}
// tslint:disable-next-line:no-unnecessary-type-assertion
const abiFunctionSignature = new AbiEncoder.Method(abiDefinition as MethodAbi).getSignature();
if (abiFunctionSignature === functionSignature) {
return true;
}
return false;
}) as MethodAbi;
return methodAbi;
}
protected _strictEncodeArguments(functionSignature: string, functionArguments: any): string {
const abiEncoder = this._lookupAbiEncoder(functionSignature);
const inputAbi = abiEncoder.getDataItem().components;
if (inputAbi === undefined) {
throw new Error(`Undefined Method Input ABI`);
}
const abiEncodedArguments = abiEncoder.encode(functionArguments);
return abiEncodedArguments;
}
/// @dev Constructs a contract wrapper.
/// @param contractName Name of contract.
/// @param abi of the contract.
/// @param address of the deployed contract.
/// @param supportedProvider for communicating with an ethereum node.
/// @param logDecodeDependencies the name and ABI of contracts whose event logs are
/// decoded by this wrapper.
/// @param deployedBytecode the deployedBytecode of the contract, used for executing
/// pure Solidity functions in memory. This is different from the bytecode.
constructor(
contractName: string,
abi: ContractAbi,
address: string,
supportedProvider: SupportedProvider,
callAndTxnDefaults?: Partial<CallData>,
logDecodeDependencies?: { [contractName: string]: ContractAbi },
deployedBytecode?: string,
) {
assert.isString('contractName', contractName);
assert.isETHAddressHex('address', address);
if (deployedBytecode !== undefined && deployedBytecode !== '') {
// `deployedBytecode` might contain references to
// unlinked libraries and, hence, would not be a hex string. We'll just
// leave `_deployedBytecodeIfExists` empty if this is the case.
// TODO(dorothy-zbornak): We should link the `deployedBytecode`
// beforehand in the generated wrappers.
try {
assert.isHexString('deployedBytecode', deployedBytecode);
this._deployedBytecodeIfExists = Buffer.from(deployedBytecode.substr(2), 'hex');
} catch (err) {
// Do nothing.
}
}
const provider = providerUtils.standardizeOrThrow(supportedProvider);
if (callAndTxnDefaults !== undefined) {
assert.doesConformToSchema('callAndTxnDefaults', callAndTxnDefaults, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
}
this.contractName = contractName;
this._web3Wrapper = new Web3Wrapper(provider, callAndTxnDefaults);
this.abi = abi;
this.address = address;
const methodAbis = this.abi.filter(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Function,
) as MethodAbi[];
this._abiEncoderByFunctionSignature = {};
methodAbis.forEach(methodAbi => {
const abiEncoder = new AbiEncoder.Method(methodAbi);
const functionSignature = abiEncoder.getSignature();
this._abiEncoderByFunctionSignature[functionSignature] = abiEncoder;
this._web3Wrapper.abiDecoder.addABI(abi, contractName);
});
if (logDecodeDependencies) {
Object.entries(logDecodeDependencies).forEach(([dependencyName, dependencyAbi]) =>
this._web3Wrapper.abiDecoder.addABI(dependencyAbi, dependencyName),
);
}
}
}
| {
const constructorAbiIfExists = abi.find(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Constructor,
// tslint:disable-next-line:no-unnecessary-type-assertion
) as ConstructorAbi | undefined;
if (constructorAbiIfExists !== undefined) {
return constructorAbiIfExists;
} else {
// If the constructor is not explicitly defined, it won't be included in the ABI. It is
// still callable however, so we construct what the ABI would look like were it to exist.
const defaultConstructorAbi: ConstructorAbi = {
type: AbiType.Constructor,
stateMutability: 'nonpayable',
payable: false,
inputs: [],
};
return defaultConstructorAbi;
}
} | identifier_body |
index.ts | import { assert } from '@0x/assert';
import { schemas } from '@0x/json-schemas';
import {
AbiEncoder,
abiUtils,
BigNumber,
decodeBytesAsRevertError,
decodeThrownErrorAsRevertError,
providerUtils,
RevertError,
StringRevertError,
} from '@0x/utils';
import { Web3Wrapper } from '@0x/web3-wrapper';
import {
AbiDefinition,
AbiType,
BlockParam,
CallData,
ConstructorAbi,
ContractAbi,
DataItem,
MethodAbi,
SupportedProvider,
TransactionReceiptWithDecodedLogs,
TxData,
TxDataPayable,
} from 'ethereum-types';
import Account from 'ethereumjs-account';
import * as util from 'ethereumjs-util';
import { default as VM } from 'ethereumjs-vm';
import PStateManager from 'ethereumjs-vm/dist/state/promisified';
export { linkLibrariesInBytecode, methodAbiToFunctionSignature } from './utils';
import { AwaitTransactionSuccessOpts } from './types';
import { formatABIDataItem } from './utils';
export { SubscriptionManager } from './subscription_manager';
export {
ContractEvent,
SendTransactionOpts,
AwaitTransactionSuccessOpts,
ContractFunctionObj,
ContractTxFunctionObj,
SubscriptionErrors,
} from './types';
export interface AbiEncoderByFunctionSignature {
[key: string]: AbiEncoder.Method;
}
const ARBITRARY_PRIVATE_KEY = 'e331b6d69882b4cb4ea581d88e0b604039a3de5967688d3dcffdd2270c0fd109';
// tslint:disable: max-classes-per-file
/**
* @dev A promise-compatible type that exposes a `txHash` field.
* Not used by BaseContract, but generated contracts will return it in
* `awaitTransactionSuccessAsync()`.
* Maybe there's a better place for this.
*/
export class PromiseWithTransactionHash<T> implements Promise<T> {
public readonly txHashPromise: Promise<string>;
private readonly _promise: Promise<T>;
constructor(txHashPromise: Promise<string>, promise: Promise<T>) {
this.txHashPromise = txHashPromise;
this._promise = promise;
}
// tslint:disable:promise-function-async
// tslint:disable:async-suffix
public then<TResult>(
onFulfilled?: (v: T) => TResult | Promise<TResult>,
onRejected?: (reason: any) => Promise<never>,
): Promise<TResult> {
return this._promise.then<TResult>(onFulfilled, onRejected);
}
public catch<TResult>(onRejected?: (reason: any) => Promise<TResult>): Promise<TResult | T> {
return this._promise.catch(onRejected);
}
public finally(onFinally?: (() => void) | null): Promise<T> {
return this._promise.finally(onFinally);
}
// tslint:enable:promise-function-async
// tslint:enable:async-suffix
get [Symbol.toStringTag](): 'Promise' {
return this._promise[Symbol.toStringTag];
}
}
export class | {
protected _abiEncoderByFunctionSignature: AbiEncoderByFunctionSignature;
protected _web3Wrapper: Web3Wrapper;
public abi: ContractAbi;
public address: string;
public contractName: string;
public constructorArgs: any[] = [];
public _deployedBytecodeIfExists?: Buffer;
private _evmIfExists?: VM;
private _evmAccountIfExists?: Buffer;
protected static _formatABIDataItemList(
abis: DataItem[],
values: any[],
formatter: (type: string, value: any) => any,
): any {
return values.map((value: any, i: number) => formatABIDataItem(abis[i], value, formatter));
}
protected static _lowercaseAddress(type: string, value: string): string {
return type === 'address' ? value.toLowerCase() : value;
}
protected static _bigNumberToString(_type: string, value: any): any {
return BigNumber.isBigNumber(value) ? value.toString() : value;
}
protected static _lookupConstructorAbi(abi: ContractAbi): ConstructorAbi {
const constructorAbiIfExists = abi.find(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Constructor,
// tslint:disable-next-line:no-unnecessary-type-assertion
) as ConstructorAbi | undefined;
if (constructorAbiIfExists !== undefined) {
return constructorAbiIfExists;
} else {
// If the constructor is not explicitly defined, it won't be included in the ABI. It is
// still callable however, so we construct what the ABI would look like were it to exist.
const defaultConstructorAbi: ConstructorAbi = {
type: AbiType.Constructor,
stateMutability: 'nonpayable',
payable: false,
inputs: [],
};
return defaultConstructorAbi;
}
}
protected static _throwIfCallResultIsRevertError(rawCallResult: string): void {
// Try to decode the call result as a revert error.
let revert: RevertError;
try {
revert = decodeBytesAsRevertError(rawCallResult);
} catch (err) {
// Can't decode it as a revert error, so assume it didn't revert.
return;
}
throw revert;
}
protected static _throwIfThrownErrorIsRevertError(error: Error): void {
// Try to decode a thrown error.
let revertError: RevertError;
try {
revertError = decodeThrownErrorAsRevertError(error);
} catch (err) {
// Can't decode it.
return;
}
// Re-cast StringRevertErrors as plain Errors for backwards-compatibility.
if (revertError instanceof StringRevertError) {
throw new Error(revertError.values.message as string);
}
throw revertError;
}
protected static _throwIfUnexpectedEmptyCallResult(rawCallResult: string, methodAbi: AbiEncoder.Method): void {
// With live nodes, we will receive an empty call result if:
// 1. The function has no return value.
// 2. The contract reverts without data.
// 3. The contract reverts with an invalid opcode (`assert(false)` or `invalid()`).
if (!rawCallResult || rawCallResult === '0x') {
const returnValueDataItem = methodAbi.getReturnValueDataItem();
if (returnValueDataItem.components === undefined || returnValueDataItem.components.length === 0) {
// Expected no result (which makes it hard to tell if the call reverted).
return;
}
throw new Error(`Function "${methodAbi.getSignature()}" reverted with no data`);
}
}
// Throws if the given arguments cannot be safely/correctly encoded based on
// the given inputAbi. An argument may not be considered safely encodeable
// if it overflows the corresponding Solidity type, there is a bug in the
// encoder, or the encoder performs unsafe type coercion.
public static strictArgumentEncodingCheck(inputAbi: DataItem[], args: any[]): string {
const abiEncoder = AbiEncoder.create(inputAbi);
const params = abiUtils.parseEthersParams(inputAbi);
const rawEncoded = abiEncoder.encode(args);
const rawDecoded = abiEncoder.decodeAsArray(rawEncoded);
for (let i = 0; i < rawDecoded.length; i++) {
const original = args[i];
const decoded = rawDecoded[i];
if (!abiUtils.isAbiDataEqual(params.names[i], params.types[i], original, decoded)) {
throw new Error(
`Cannot safely encode argument: ${params.names[i]} (${original}) of type ${
params.types[i]
}. (Possible type overflow or other encoding error)`,
);
}
}
return rawEncoded;
}
protected static async _applyDefaultsToContractTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
const txDataWithDefaults = BaseContract._removeUndefinedProperties<T>(txData);
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected static _assertCallParams(callData: Partial<CallData>, defaultBlock?: BlockParam): void {
assert.doesConformToSchema('callData', callData, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
if (defaultBlock !== undefined) {
assert.isBlockParam('defaultBlock', defaultBlock);
}
}
private static _removeUndefinedProperties<T>(props: any): T {
const clonedProps = { ...props };
Object.keys(clonedProps).forEach(key => clonedProps[key] === undefined && delete clonedProps[key]);
return clonedProps;
}
protected _promiseWithTransactionHash(
txHashPromise: Promise<string>,
opts: AwaitTransactionSuccessOpts,
): PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs> {
return new PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs>(
txHashPromise,
(async (): Promise<TransactionReceiptWithDecodedLogs> => {
// When the transaction hash resolves, wait for it to be mined.
return this._web3Wrapper.awaitTransactionSuccessAsync(
await txHashPromise,
opts.pollingIntervalMs,
opts.timeoutMs,
);
})(),
);
}
protected async _applyDefaultsToTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
// Gas amount sourced with the following priorities:
// 1. Optional param passed in to public method call
// 2. Global config passed in at library instantiation
// 3. Gas estimate calculation + safety margin
// tslint:disable-next-line:no-object-literal-type-assertion
const txDataWithDefaults = {
to: this.address,
...this._web3Wrapper.getContractDefaults(),
...BaseContract._removeUndefinedProperties(txData),
} as T;
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected async _evmExecAsync(encodedData: string): Promise<string> {
const encodedDataBytes = Buffer.from(encodedData.substr(2), 'hex');
const addressBuf = Buffer.from(this.address.substr(2), 'hex');
// should only run once, the first time it is called
if (this._evmIfExists === undefined) {
const vm = new VM({});
const psm = new PStateManager(vm.stateManager);
// create an account with 1 ETH
const accountPk = Buffer.from(ARBITRARY_PRIVATE_KEY, 'hex');
const accountAddress = util.privateToAddress(accountPk);
const account = new Account({ balance: 1e18 });
await psm.putAccount(accountAddress, account);
// 'deploy' the contract
if (this._deployedBytecodeIfExists === undefined) {
const contractCode = await this._web3Wrapper.getContractCodeAsync(this.address);
this._deployedBytecodeIfExists = Buffer.from(contractCode.substr(2), 'hex');
}
await psm.putContractCode(addressBuf, this._deployedBytecodeIfExists);
// save for later
this._evmIfExists = vm;
this._evmAccountIfExists = accountAddress;
}
let rawCallResult;
try {
const result = await this._evmIfExists.runCall({
to: addressBuf,
caller: this._evmAccountIfExists,
origin: this._evmAccountIfExists,
data: encodedDataBytes,
});
rawCallResult = `0x${result.execResult.returnValue.toString('hex')}`;
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected async _performCallAsync(callData: Partial<CallData>, defaultBlock?: BlockParam): Promise<string> {
const callDataWithDefaults = await this._applyDefaultsToTxDataAsync(callData);
let rawCallResult: string;
try {
rawCallResult = await this._web3Wrapper.callAsync(callDataWithDefaults, defaultBlock);
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected _lookupAbiEncoder(functionSignature: string): AbiEncoder.Method {
const abiEncoder = this._abiEncoderByFunctionSignature[functionSignature];
if (abiEncoder === undefined) {
throw new Error(`Failed to lookup method with function signature '${functionSignature}'`);
}
return abiEncoder;
}
protected _lookupAbi(functionSignature: string): MethodAbi {
const methodAbi = this.abi.find((abiDefinition: AbiDefinition) => {
if (abiDefinition.type !== AbiType.Function) {
return false;
}
// tslint:disable-next-line:no-unnecessary-type-assertion
const abiFunctionSignature = new AbiEncoder.Method(abiDefinition as MethodAbi).getSignature();
if (abiFunctionSignature === functionSignature) {
return true;
}
return false;
}) as MethodAbi;
return methodAbi;
}
protected _strictEncodeArguments(functionSignature: string, functionArguments: any): string {
const abiEncoder = this._lookupAbiEncoder(functionSignature);
const inputAbi = abiEncoder.getDataItem().components;
if (inputAbi === undefined) {
throw new Error(`Undefined Method Input ABI`);
}
const abiEncodedArguments = abiEncoder.encode(functionArguments);
return abiEncodedArguments;
}
/// @dev Constructs a contract wrapper.
/// @param contractName Name of contract.
/// @param abi of the contract.
/// @param address of the deployed contract.
/// @param supportedProvider for communicating with an ethereum node.
/// @param logDecodeDependencies the name and ABI of contracts whose event logs are
/// decoded by this wrapper.
/// @param deployedBytecode the deployedBytecode of the contract, used for executing
/// pure Solidity functions in memory. This is different from the bytecode.
constructor(
contractName: string,
abi: ContractAbi,
address: string,
supportedProvider: SupportedProvider,
callAndTxnDefaults?: Partial<CallData>,
logDecodeDependencies?: { [contractName: string]: ContractAbi },
deployedBytecode?: string,
) {
assert.isString('contractName', contractName);
assert.isETHAddressHex('address', address);
if (deployedBytecode !== undefined && deployedBytecode !== '') {
// `deployedBytecode` might contain references to
// unlinked libraries and, hence, would not be a hex string. We'll just
// leave `_deployedBytecodeIfExists` empty if this is the case.
// TODO(dorothy-zbornak): We should link the `deployedBytecode`
// beforehand in the generated wrappers.
try {
assert.isHexString('deployedBytecode', deployedBytecode);
this._deployedBytecodeIfExists = Buffer.from(deployedBytecode.substr(2), 'hex');
} catch (err) {
// Do nothing.
}
}
const provider = providerUtils.standardizeOrThrow(supportedProvider);
if (callAndTxnDefaults !== undefined) {
assert.doesConformToSchema('callAndTxnDefaults', callAndTxnDefaults, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
}
this.contractName = contractName;
this._web3Wrapper = new Web3Wrapper(provider, callAndTxnDefaults);
this.abi = abi;
this.address = address;
const methodAbis = this.abi.filter(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Function,
) as MethodAbi[];
this._abiEncoderByFunctionSignature = {};
methodAbis.forEach(methodAbi => {
const abiEncoder = new AbiEncoder.Method(methodAbi);
const functionSignature = abiEncoder.getSignature();
this._abiEncoderByFunctionSignature[functionSignature] = abiEncoder;
this._web3Wrapper.abiDecoder.addABI(abi, contractName);
});
if (logDecodeDependencies) {
Object.entries(logDecodeDependencies).forEach(([dependencyName, dependencyAbi]) =>
this._web3Wrapper.abiDecoder.addABI(dependencyAbi, dependencyName),
);
}
}
}
| BaseContract | identifier_name |
index.ts | import { assert } from '@0x/assert';
import { schemas } from '@0x/json-schemas';
import {
AbiEncoder,
abiUtils,
BigNumber,
decodeBytesAsRevertError,
decodeThrownErrorAsRevertError,
providerUtils,
RevertError,
StringRevertError,
} from '@0x/utils';
import { Web3Wrapper } from '@0x/web3-wrapper';
import {
AbiDefinition,
AbiType,
BlockParam,
CallData,
ConstructorAbi,
ContractAbi,
DataItem,
MethodAbi,
SupportedProvider,
TransactionReceiptWithDecodedLogs,
TxData,
TxDataPayable,
} from 'ethereum-types';
import Account from 'ethereumjs-account';
import * as util from 'ethereumjs-util';
import { default as VM } from 'ethereumjs-vm';
import PStateManager from 'ethereumjs-vm/dist/state/promisified';
export { linkLibrariesInBytecode, methodAbiToFunctionSignature } from './utils';
import { AwaitTransactionSuccessOpts } from './types';
import { formatABIDataItem } from './utils';
export { SubscriptionManager } from './subscription_manager';
| ContractEvent,
SendTransactionOpts,
AwaitTransactionSuccessOpts,
ContractFunctionObj,
ContractTxFunctionObj,
SubscriptionErrors,
} from './types';
export interface AbiEncoderByFunctionSignature {
[key: string]: AbiEncoder.Method;
}
const ARBITRARY_PRIVATE_KEY = 'e331b6d69882b4cb4ea581d88e0b604039a3de5967688d3dcffdd2270c0fd109';
// tslint:disable: max-classes-per-file
/**
* @dev A promise-compatible type that exposes a `txHash` field.
* Not used by BaseContract, but generated contracts will return it in
* `awaitTransactionSuccessAsync()`.
* Maybe there's a better place for this.
*/
export class PromiseWithTransactionHash<T> implements Promise<T> {
public readonly txHashPromise: Promise<string>;
private readonly _promise: Promise<T>;
constructor(txHashPromise: Promise<string>, promise: Promise<T>) {
this.txHashPromise = txHashPromise;
this._promise = promise;
}
// tslint:disable:promise-function-async
// tslint:disable:async-suffix
public then<TResult>(
onFulfilled?: (v: T) => TResult | Promise<TResult>,
onRejected?: (reason: any) => Promise<never>,
): Promise<TResult> {
return this._promise.then<TResult>(onFulfilled, onRejected);
}
public catch<TResult>(onRejected?: (reason: any) => Promise<TResult>): Promise<TResult | T> {
return this._promise.catch(onRejected);
}
public finally(onFinally?: (() => void) | null): Promise<T> {
return this._promise.finally(onFinally);
}
// tslint:enable:promise-function-async
// tslint:enable:async-suffix
get [Symbol.toStringTag](): 'Promise' {
return this._promise[Symbol.toStringTag];
}
}
export class BaseContract {
protected _abiEncoderByFunctionSignature: AbiEncoderByFunctionSignature;
protected _web3Wrapper: Web3Wrapper;
public abi: ContractAbi;
public address: string;
public contractName: string;
public constructorArgs: any[] = [];
public _deployedBytecodeIfExists?: Buffer;
private _evmIfExists?: VM;
private _evmAccountIfExists?: Buffer;
protected static _formatABIDataItemList(
abis: DataItem[],
values: any[],
formatter: (type: string, value: any) => any,
): any {
return values.map((value: any, i: number) => formatABIDataItem(abis[i], value, formatter));
}
protected static _lowercaseAddress(type: string, value: string): string {
return type === 'address' ? value.toLowerCase() : value;
}
protected static _bigNumberToString(_type: string, value: any): any {
return BigNumber.isBigNumber(value) ? value.toString() : value;
}
protected static _lookupConstructorAbi(abi: ContractAbi): ConstructorAbi {
const constructorAbiIfExists = abi.find(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Constructor,
// tslint:disable-next-line:no-unnecessary-type-assertion
) as ConstructorAbi | undefined;
if (constructorAbiIfExists !== undefined) {
return constructorAbiIfExists;
} else {
// If the constructor is not explicitly defined, it won't be included in the ABI. It is
// still callable however, so we construct what the ABI would look like were it to exist.
const defaultConstructorAbi: ConstructorAbi = {
type: AbiType.Constructor,
stateMutability: 'nonpayable',
payable: false,
inputs: [],
};
return defaultConstructorAbi;
}
}
protected static _throwIfCallResultIsRevertError(rawCallResult: string): void {
// Try to decode the call result as a revert error.
let revert: RevertError;
try {
revert = decodeBytesAsRevertError(rawCallResult);
} catch (err) {
// Can't decode it as a revert error, so assume it didn't revert.
return;
}
throw revert;
}
protected static _throwIfThrownErrorIsRevertError(error: Error): void {
// Try to decode a thrown error.
let revertError: RevertError;
try {
revertError = decodeThrownErrorAsRevertError(error);
} catch (err) {
// Can't decode it.
return;
}
// Re-cast StringRevertErrors as plain Errors for backwards-compatibility.
if (revertError instanceof StringRevertError) {
throw new Error(revertError.values.message as string);
}
throw revertError;
}
protected static _throwIfUnexpectedEmptyCallResult(rawCallResult: string, methodAbi: AbiEncoder.Method): void {
// With live nodes, we will receive an empty call result if:
// 1. The function has no return value.
// 2. The contract reverts without data.
// 3. The contract reverts with an invalid opcode (`assert(false)` or `invalid()`).
if (!rawCallResult || rawCallResult === '0x') {
const returnValueDataItem = methodAbi.getReturnValueDataItem();
if (returnValueDataItem.components === undefined || returnValueDataItem.components.length === 0) {
// Expected no result (which makes it hard to tell if the call reverted).
return;
}
throw new Error(`Function "${methodAbi.getSignature()}" reverted with no data`);
}
}
// Throws if the given arguments cannot be safely/correctly encoded based on
// the given inputAbi. An argument may not be considered safely encodeable
// if it overflows the corresponding Solidity type, there is a bug in the
// encoder, or the encoder performs unsafe type coercion.
public static strictArgumentEncodingCheck(inputAbi: DataItem[], args: any[]): string {
const abiEncoder = AbiEncoder.create(inputAbi);
const params = abiUtils.parseEthersParams(inputAbi);
const rawEncoded = abiEncoder.encode(args);
const rawDecoded = abiEncoder.decodeAsArray(rawEncoded);
for (let i = 0; i < rawDecoded.length; i++) {
const original = args[i];
const decoded = rawDecoded[i];
if (!abiUtils.isAbiDataEqual(params.names[i], params.types[i], original, decoded)) {
throw new Error(
`Cannot safely encode argument: ${params.names[i]} (${original}) of type ${
params.types[i]
}. (Possible type overflow or other encoding error)`,
);
}
}
return rawEncoded;
}
protected static async _applyDefaultsToContractTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
const txDataWithDefaults = BaseContract._removeUndefinedProperties<T>(txData);
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected static _assertCallParams(callData: Partial<CallData>, defaultBlock?: BlockParam): void {
assert.doesConformToSchema('callData', callData, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
if (defaultBlock !== undefined) {
assert.isBlockParam('defaultBlock', defaultBlock);
}
}
private static _removeUndefinedProperties<T>(props: any): T {
const clonedProps = { ...props };
Object.keys(clonedProps).forEach(key => clonedProps[key] === undefined && delete clonedProps[key]);
return clonedProps;
}
protected _promiseWithTransactionHash(
txHashPromise: Promise<string>,
opts: AwaitTransactionSuccessOpts,
): PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs> {
return new PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs>(
txHashPromise,
(async (): Promise<TransactionReceiptWithDecodedLogs> => {
// When the transaction hash resolves, wait for it to be mined.
return this._web3Wrapper.awaitTransactionSuccessAsync(
await txHashPromise,
opts.pollingIntervalMs,
opts.timeoutMs,
);
})(),
);
}
protected async _applyDefaultsToTxDataAsync<T extends Partial<TxData | TxDataPayable>>(
txData: T,
estimateGasAsync?: (txData: T) => Promise<number>,
): Promise<TxData> {
// Gas amount sourced with the following priorities:
// 1. Optional param passed in to public method call
// 2. Global config passed in at library instantiation
// 3. Gas estimate calculation + safety margin
// tslint:disable-next-line:no-object-literal-type-assertion
const txDataWithDefaults = {
to: this.address,
...this._web3Wrapper.getContractDefaults(),
...BaseContract._removeUndefinedProperties(txData),
} as T;
if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) {
txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults);
}
if (txDataWithDefaults.from !== undefined) {
txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase();
}
return txDataWithDefaults as TxData;
}
protected async _evmExecAsync(encodedData: string): Promise<string> {
const encodedDataBytes = Buffer.from(encodedData.substr(2), 'hex');
const addressBuf = Buffer.from(this.address.substr(2), 'hex');
// should only run once, the first time it is called
if (this._evmIfExists === undefined) {
const vm = new VM({});
const psm = new PStateManager(vm.stateManager);
// create an account with 1 ETH
const accountPk = Buffer.from(ARBITRARY_PRIVATE_KEY, 'hex');
const accountAddress = util.privateToAddress(accountPk);
const account = new Account({ balance: 1e18 });
await psm.putAccount(accountAddress, account);
// 'deploy' the contract
if (this._deployedBytecodeIfExists === undefined) {
const contractCode = await this._web3Wrapper.getContractCodeAsync(this.address);
this._deployedBytecodeIfExists = Buffer.from(contractCode.substr(2), 'hex');
}
await psm.putContractCode(addressBuf, this._deployedBytecodeIfExists);
// save for later
this._evmIfExists = vm;
this._evmAccountIfExists = accountAddress;
}
let rawCallResult;
try {
const result = await this._evmIfExists.runCall({
to: addressBuf,
caller: this._evmAccountIfExists,
origin: this._evmAccountIfExists,
data: encodedDataBytes,
});
rawCallResult = `0x${result.execResult.returnValue.toString('hex')}`;
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected async _performCallAsync(callData: Partial<CallData>, defaultBlock?: BlockParam): Promise<string> {
const callDataWithDefaults = await this._applyDefaultsToTxDataAsync(callData);
let rawCallResult: string;
try {
rawCallResult = await this._web3Wrapper.callAsync(callDataWithDefaults, defaultBlock);
} catch (err) {
BaseContract._throwIfThrownErrorIsRevertError(err);
throw err;
}
BaseContract._throwIfCallResultIsRevertError(rawCallResult);
return rawCallResult;
}
protected _lookupAbiEncoder(functionSignature: string): AbiEncoder.Method {
const abiEncoder = this._abiEncoderByFunctionSignature[functionSignature];
if (abiEncoder === undefined) {
throw new Error(`Failed to lookup method with function signature '${functionSignature}'`);
}
return abiEncoder;
}
protected _lookupAbi(functionSignature: string): MethodAbi {
const methodAbi = this.abi.find((abiDefinition: AbiDefinition) => {
if (abiDefinition.type !== AbiType.Function) {
return false;
}
// tslint:disable-next-line:no-unnecessary-type-assertion
const abiFunctionSignature = new AbiEncoder.Method(abiDefinition as MethodAbi).getSignature();
if (abiFunctionSignature === functionSignature) {
return true;
}
return false;
}) as MethodAbi;
return methodAbi;
}
protected _strictEncodeArguments(functionSignature: string, functionArguments: any): string {
const abiEncoder = this._lookupAbiEncoder(functionSignature);
const inputAbi = abiEncoder.getDataItem().components;
if (inputAbi === undefined) {
throw new Error(`Undefined Method Input ABI`);
}
const abiEncodedArguments = abiEncoder.encode(functionArguments);
return abiEncodedArguments;
}
/// @dev Constructs a contract wrapper.
/// @param contractName Name of contract.
/// @param abi of the contract.
/// @param address of the deployed contract.
/// @param supportedProvider for communicating with an ethereum node.
/// @param logDecodeDependencies the name and ABI of contracts whose event logs are
/// decoded by this wrapper.
/// @param deployedBytecode the deployedBytecode of the contract, used for executing
/// pure Solidity functions in memory. This is different from the bytecode.
constructor(
contractName: string,
abi: ContractAbi,
address: string,
supportedProvider: SupportedProvider,
callAndTxnDefaults?: Partial<CallData>,
logDecodeDependencies?: { [contractName: string]: ContractAbi },
deployedBytecode?: string,
) {
assert.isString('contractName', contractName);
assert.isETHAddressHex('address', address);
if (deployedBytecode !== undefined && deployedBytecode !== '') {
// `deployedBytecode` might contain references to
// unlinked libraries and, hence, would not be a hex string. We'll just
// leave `_deployedBytecodeIfExists` empty if this is the case.
// TODO(dorothy-zbornak): We should link the `deployedBytecode`
// beforehand in the generated wrappers.
try {
assert.isHexString('deployedBytecode', deployedBytecode);
this._deployedBytecodeIfExists = Buffer.from(deployedBytecode.substr(2), 'hex');
} catch (err) {
// Do nothing.
}
}
const provider = providerUtils.standardizeOrThrow(supportedProvider);
if (callAndTxnDefaults !== undefined) {
assert.doesConformToSchema('callAndTxnDefaults', callAndTxnDefaults, schemas.callDataSchema, [
schemas.addressSchema,
schemas.numberSchema,
schemas.jsNumber,
]);
}
this.contractName = contractName;
this._web3Wrapper = new Web3Wrapper(provider, callAndTxnDefaults);
this.abi = abi;
this.address = address;
const methodAbis = this.abi.filter(
(abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Function,
) as MethodAbi[];
this._abiEncoderByFunctionSignature = {};
methodAbis.forEach(methodAbi => {
const abiEncoder = new AbiEncoder.Method(methodAbi);
const functionSignature = abiEncoder.getSignature();
this._abiEncoderByFunctionSignature[functionSignature] = abiEncoder;
this._web3Wrapper.abiDecoder.addABI(abi, contractName);
});
if (logDecodeDependencies) {
Object.entries(logDecodeDependencies).forEach(([dependencyName, dependencyAbi]) =>
this._web3Wrapper.abiDecoder.addABI(dependencyAbi, dependencyName),
);
}
}
} | export { | random_line_split |
cluster.go | package v1
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PgclusterResourcePlural ..
const PgclusterResourcePlural = "pgclusters"
// Pgcluster is the CRD that defines a Crunchy PG Cluster
//
// swagger:ignore Pgcluster
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Pgcluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PgclusterSpec `json:"spec"`
Status PgclusterStatus `json:"status,omitempty"`
}
// PgclusterSpec is the CRD that defines a Crunchy PG Cluster Spec
// swagger:ignore
type PgclusterSpec struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
ClusterName string `json:"clustername"`
Policies string `json:"policies"`
CCPImage string `json:"ccpimage"`
CCPImageTag string `json:"ccpimagetag"`
CCPImagePrefix string `json:"ccpimageprefix"`
PGOImagePrefix string `json:"pgoimageprefix"`
Port string `json:"port"`
PGBadgerPort string `json:"pgbadgerport"`
ExporterPort string `json:"exporterport"`
PrimaryStorage PgStorageSpec `json:primarystorage`
WALStorage PgStorageSpec `json:walstorage`
ArchiveStorage PgStorageSpec `json:archivestorage`
ReplicaStorage PgStorageSpec `json:replicastorage`
BackrestStorage PgStorageSpec `json:backreststorage`
// Resources behaves just like the "Requests" section of a Kubernetes
// container definition. You can set individual items such as "cpu" and
// "memory", e.g. "{ cpu: "0.5", memory: "2Gi" }"
Resources v1.ResourceList `json:"resources"`
// Limits stores the CPU/memory limits to use with PostgreSQL instances
//
// A long note on memory limits.
//
// We want to avoid the OOM killer coming for the PostgreSQL process or any
// of their backends per lots of guidance from the PostgreSQL documentation.
// Based on Kubernetes' behavior with limits, the best thing is to not set
// them. However, if they ever do set, we suggest that you have
// Request == Limit to get the Guaranteed QoS
//
// Guaranteed QoS prevents a backend from being first in line to be killed if
// the *Node* has memory pressure, but if there is, say
// a runaway client backend that causes the *Pod* to exceed its memory
// limit, a backend can still be killed by the OOM killer, which is not
// great.
//
// As such, given the choice, the preference is for the Pod to be evicted
// and have a failover event, vs. having an individual client backend killed
// and causing potential "bad things."
//
// For more info on PostgreSQL and Kubernetes memory management, see:
//
// https://www.postgresql.org/docs/current/kernel-resources.html#LINUX-MEMORY-OVERCOMMIT
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#how-pods-with-resource-limits-are-run
Limits v1.ResourceList `json:"limits"`
// BackrestResources, if specified, contains the container request resources
// for the pgBackRest Deployment for this PostgreSQL cluster
BackrestResources v1.ResourceList `json:"backrestResources"`
// BackrestLimits, if specified, contains the container resource limits
// for the pgBackRest Deployment for this PostgreSQL cluster
BackrestLimits v1.ResourceList `json:"backrestLimits"`
// PgBouncer contains all of the settings to properly maintain a pgBouncer
// implementation
PgBouncer PgBouncerSpec `json:"pgBouncer"`
User string `json:"user"`
Database string `json:"database"`
Replicas string `json:"replicas"`
UserSecretName string `json:"usersecretname"`
RootSecretName string `json:"rootsecretname"`
PrimarySecretName string `json:"primarysecretname"`
CollectSecretName string `json:"collectSecretName"`
Status string `json:"status"`
CustomConfig string `json:"customconfig"`
UserLabels map[string]string `json:"userlabels"`
PodAntiAffinity PodAntiAffinitySpec `json:"podAntiAffinity"`
SyncReplication *bool `json:"syncReplication"`
BackrestS3Bucket string `json:"backrestS3Bucket"`
BackrestS3Region string `json:"backrestS3Region"`
BackrestS3Endpoint string `json:"backrestS3Endpoint"`
BackrestS3URIStyle string `json:"backrestS3URIStyle"`
BackrestS3VerifyTLS string `json:"backrestS3VerifyTLS"`
BackrestRepoPath string `json:"backrestRepoPath"`
TablespaceMounts map[string]PgStorageSpec `json:"tablespaceMounts"`
TLS TLSSpec `json:"tls"`
TLSOnly bool `json:"tlsOnly"`
Standby bool `json:"standby"`
Shutdown bool `json:"shutdown"`
PGDataSource PGDataSourceSpec `json:"pgDataSource"`
}
// PGDataSourceSpec defines the data source that should be used to populate the initial PGDATA
// directory when bootstrapping a new PostgreSQL cluster
// swagger:ignore
type PGDataSourceSpec struct {
RestoreFrom string `json:"restoreFrom"`
RestoreOpts string `json:"restoreOpts"`
}
// PgclusterList is the CRD that defines a Crunchy PG Cluster List
// swagger:ignore
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PgclusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Pgcluster `json:"items"`
}
// PgclusterStatus is the CRD that defines PG Cluster Status
// swagger:ignore
type PgclusterStatus struct {
State PgclusterState `json:"state,omitempty"`
Message string `json:"message,omitempty"`
}
// PgclusterState is the crd that defines PG Cluster Stage
// swagger:ignore
type PgclusterState string
// PodAntiAffinityDeployment distinguishes between the different types of
// Deployments that can leverage PodAntiAffinity
type PodAntiAffinityDeployment int
// PodAntiAffinityType defines the different types of type of anti-affinity rules applied to pg
// clusters when utilizing the default pod anti-affinity rules provided by the PostgreSQL Operator,
// which are enabled for a new pg cluster by default. Valid Values include "required" for
// requiredDuringSchedulingIgnoredDuringExecution anti-affinity, "preferred" for
// preferredDuringSchedulingIgnoredDuringExecution anti-affinity, and "disabled" to disable the
// default pod anti-affinity rules for the pg cluster all together.
type PodAntiAffinityType string
// PodAntiAffinitySpec provides multiple configurations for how pod
// anti-affinity can be set.
// - "Default" is the default rule that applies to all Pods that are a part of
// the PostgreSQL cluster
// - "PgBackrest" applies just to the pgBackRest repository Pods in said
// Deployment
// - "PgBouncer" applies to just pgBouncer Pods in said Deployment
// swaggier:ignore
type PodAntiAffinitySpec struct {
Default PodAntiAffinityType `json:"default"`
PgBackRest PodAntiAffinityType `json:"pgBackRest"`
PgBouncer PodAntiAffinityType `json:"pgBouncer"`
}
// PgBouncerSpec is a struct that is used within the Cluster specification that
// provides the attributes for managing a PgBouncer implementation, including:
// - is it enabled?
// - what resources it should consume
// - the total number of replicas
type PgBouncerSpec struct {
// Replicas represents the total number of Pods to deploy with pgBouncer,
// which effectively enables/disables the pgBouncer.
//
// if it is set to 0 or less, it is disabled.
//
// if it is set to 1 or more, it is enabled
Replicas int32 `json:"replicas"`
// Resources, if specified, contains the container request resources
// for any pgBouncer Deployments that are part of a PostgreSQL cluster
Resources v1.ResourceList `json:"resources"`
// Limits, if specified, contains the container resource limits
// for any pgBouncer Deployments that are part of a PostgreSQL cluster
Limits v1.ResourceList `json:"limits"`
}
// Enabled returns true if the pgBouncer is enabled for the cluster, i.e. there
// is at least one replica set
func (s *PgBouncerSpec) Enabled() bool {
return s.Replicas > 0
}
// TLSSpec contains the information to set up a TLS-enabled PostgreSQL cluster
type TLSSpec struct {
// CASecret contains the name of the secret to use as the trusted CA for the
// TLSSecret
// This is our own format and should contain at least one key: "ca.crt"
// It can also contain a key "ca.crl" which is the certificate revocation list
CASecret string `json:"caSecret"`
// ReplicationTLSSecret contains the name of the secret that specifies a TLS
// keypair that can be used by the replication user (e.g. "primaryuser") to
// perform certificate based authentication between replicas.
// The keypair must be considered valid by the CA specified in the CASecret
ReplicationTLSSecret string `json:"replicationTLSSecret"`
// TLSSecret contains the name of the secret to use that contains the TLS
// keypair for the PostgreSQL server
// This follows the Kubernetes secret format ("kubernetes.io/tls") which has
// two keys: tls.crt and tls.key
TLSSecret string `json:"tlsSecret"`
}
// IsTLSEnabled returns true if the cluster is TLS enabled, i.e. both the TLS
// secret name and the CA secret name are available
func (t TLSSpec) IsTLSEnabled() bool {
return (t.TLSSecret != "" && t.CASecret != "")
}
const (
// PgclusterStateCreated ...
PgclusterStateCreated PgclusterState = "pgcluster Created"
// PgclusterStateProcessed ...
PgclusterStateProcessed PgclusterState = "pgcluster Processed"
// PgclusterStateInitialized ...
PgclusterStateInitialized PgclusterState = "pgcluster Initialized" | // PgclusterStateBootstrapping defines the state of a cluster when it is being bootstrapped
// from an existing data source
PgclusterStateBootstrapping PgclusterState = "pgcluster Bootstrapping"
// PgclusterStateBootstrapped defines the state of a cluster when it has been bootstrapped
// successfully from an existing data source
PgclusterStateBootstrapped PgclusterState = "pgcluster Bootstrapped"
// PgclusterStateRestore ...
PgclusterStateRestore PgclusterState = "pgcluster Restoring"
// PgclusterStateShutdown indicates that the cluster has been shut down (i.e. the primary)
// deployment has been scaled to 0
PgclusterStateShutdown PgclusterState = "pgcluster Shutdown"
// PodAntiAffinityRequired results in requiredDuringSchedulingIgnoredDuringExecution for any
// default pod anti-affinity rules applied to pg custers
PodAntiAffinityRequired PodAntiAffinityType = "required"
// PodAntiAffinityPreffered results in preferredDuringSchedulingIgnoredDuringExecution for any
// default pod anti-affinity rules applied to pg custers
PodAntiAffinityPreffered PodAntiAffinityType = "preferred"
// PodAntiAffinityDisabled disables any default pod anti-affinity rules applied to pg custers
PodAntiAffinityDisabled PodAntiAffinityType = "disabled"
)
// The list of different types of PodAntiAffinityDeployments
const (
PodAntiAffinityDeploymentDefault PodAntiAffinityDeployment = iota
PodAntiAffinityDeploymentPgBackRest
PodAntiAffinityDeploymentPgBouncer
)
// ValidatePodAntiAffinityType is responsible for validating whether or not the type of pod
// anti-affinity specified is valid
func (p PodAntiAffinityType) Validate() error {
switch p {
case
PodAntiAffinityRequired,
PodAntiAffinityPreffered,
PodAntiAffinityDisabled,
"":
return nil
}
return fmt.Errorf("Invalid pod anti-affinity type. Valid values are '%s', '%s' or '%s'",
PodAntiAffinityRequired, PodAntiAffinityPreffered, PodAntiAffinityDisabled)
} | random_line_split | |
cluster.go | package v1
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PgclusterResourcePlural ..
const PgclusterResourcePlural = "pgclusters"
// Pgcluster is the CRD that defines a Crunchy PG Cluster
//
// swagger:ignore Pgcluster
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Pgcluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PgclusterSpec `json:"spec"`
Status PgclusterStatus `json:"status,omitempty"`
}
// PgclusterSpec is the CRD that defines a Crunchy PG Cluster Spec
// swagger:ignore
type PgclusterSpec struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
ClusterName string `json:"clustername"`
Policies string `json:"policies"`
CCPImage string `json:"ccpimage"`
CCPImageTag string `json:"ccpimagetag"`
CCPImagePrefix string `json:"ccpimageprefix"`
PGOImagePrefix string `json:"pgoimageprefix"`
Port string `json:"port"`
PGBadgerPort string `json:"pgbadgerport"`
ExporterPort string `json:"exporterport"`
PrimaryStorage PgStorageSpec `json:primarystorage`
WALStorage PgStorageSpec `json:walstorage`
ArchiveStorage PgStorageSpec `json:archivestorage`
ReplicaStorage PgStorageSpec `json:replicastorage`
BackrestStorage PgStorageSpec `json:backreststorage`
// Resources behaves just like the "Requests" section of a Kubernetes
// container definition. You can set individual items such as "cpu" and
// "memory", e.g. "{ cpu: "0.5", memory: "2Gi" }"
Resources v1.ResourceList `json:"resources"`
// Limits stores the CPU/memory limits to use with PostgreSQL instances
//
// A long note on memory limits.
//
// We want to avoid the OOM killer coming for the PostgreSQL process or any
// of their backends per lots of guidance from the PostgreSQL documentation.
// Based on Kubernetes' behavior with limits, the best thing is to not set
// them. However, if they ever do set, we suggest that you have
// Request == Limit to get the Guaranteed QoS
//
// Guaranteed QoS prevents a backend from being first in line to be killed if
// the *Node* has memory pressure, but if there is, say
// a runaway client backend that causes the *Pod* to exceed its memory
// limit, a backend can still be killed by the OOM killer, which is not
// great.
//
// As such, given the choice, the preference is for the Pod to be evicted
// and have a failover event, vs. having an individual client backend killed
// and causing potential "bad things."
//
// For more info on PostgreSQL and Kubernetes memory management, see:
//
// https://www.postgresql.org/docs/current/kernel-resources.html#LINUX-MEMORY-OVERCOMMIT
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#how-pods-with-resource-limits-are-run
Limits v1.ResourceList `json:"limits"`
// BackrestResources, if specified, contains the container request resources
// for the pgBackRest Deployment for this PostgreSQL cluster
BackrestResources v1.ResourceList `json:"backrestResources"`
// BackrestLimits, if specified, contains the container resource limits
// for the pgBackRest Deployment for this PostgreSQL cluster
BackrestLimits v1.ResourceList `json:"backrestLimits"`
// PgBouncer contains all of the settings to properly maintain a pgBouncer
// implementation
PgBouncer PgBouncerSpec `json:"pgBouncer"`
User string `json:"user"`
Database string `json:"database"`
Replicas string `json:"replicas"`
UserSecretName string `json:"usersecretname"`
RootSecretName string `json:"rootsecretname"`
PrimarySecretName string `json:"primarysecretname"`
CollectSecretName string `json:"collectSecretName"`
Status string `json:"status"`
CustomConfig string `json:"customconfig"`
UserLabels map[string]string `json:"userlabels"`
PodAntiAffinity PodAntiAffinitySpec `json:"podAntiAffinity"`
SyncReplication *bool `json:"syncReplication"`
BackrestS3Bucket string `json:"backrestS3Bucket"`
BackrestS3Region string `json:"backrestS3Region"`
BackrestS3Endpoint string `json:"backrestS3Endpoint"`
BackrestS3URIStyle string `json:"backrestS3URIStyle"`
BackrestS3VerifyTLS string `json:"backrestS3VerifyTLS"`
BackrestRepoPath string `json:"backrestRepoPath"`
TablespaceMounts map[string]PgStorageSpec `json:"tablespaceMounts"`
TLS TLSSpec `json:"tls"`
TLSOnly bool `json:"tlsOnly"`
Standby bool `json:"standby"`
Shutdown bool `json:"shutdown"`
PGDataSource PGDataSourceSpec `json:"pgDataSource"`
}
// PGDataSourceSpec defines the data source that should be used to populate the initial PGDATA
// directory when bootstrapping a new PostgreSQL cluster
// swagger:ignore
type PGDataSourceSpec struct {
RestoreFrom string `json:"restoreFrom"`
RestoreOpts string `json:"restoreOpts"`
}
// PgclusterList is the CRD that defines a Crunchy PG Cluster List
// swagger:ignore
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PgclusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Pgcluster `json:"items"`
}
// PgclusterStatus is the CRD that defines PG Cluster Status
// swagger:ignore
type PgclusterStatus struct {
State PgclusterState `json:"state,omitempty"`
Message string `json:"message,omitempty"`
}
// PgclusterState is the crd that defines PG Cluster Stage
// swagger:ignore
type PgclusterState string
// PodAntiAffinityDeployment distinguishes between the different types of
// Deployments that can leverage PodAntiAffinity
type PodAntiAffinityDeployment int
// PodAntiAffinityType defines the different types of type of anti-affinity rules applied to pg
// clusters when utilizing the default pod anti-affinity rules provided by the PostgreSQL Operator,
// which are enabled for a new pg cluster by default. Valid Values include "required" for
// requiredDuringSchedulingIgnoredDuringExecution anti-affinity, "preferred" for
// preferredDuringSchedulingIgnoredDuringExecution anti-affinity, and "disabled" to disable the
// default pod anti-affinity rules for the pg cluster all together.
type PodAntiAffinityType string
// PodAntiAffinitySpec provides multiple configurations for how pod
// anti-affinity can be set.
// - "Default" is the default rule that applies to all Pods that are a part of
// the PostgreSQL cluster
// - "PgBackrest" applies just to the pgBackRest repository Pods in said
// Deployment
// - "PgBouncer" applies to just pgBouncer Pods in said Deployment
// swaggier:ignore
type PodAntiAffinitySpec struct {
Default PodAntiAffinityType `json:"default"`
PgBackRest PodAntiAffinityType `json:"pgBackRest"`
PgBouncer PodAntiAffinityType `json:"pgBouncer"`
}
// PgBouncerSpec is a struct that is used within the Cluster specification that
// provides the attributes for managing a PgBouncer implementation, including:
// - is it enabled?
// - what resources it should consume
// - the total number of replicas
type PgBouncerSpec struct {
// Replicas represents the total number of Pods to deploy with pgBouncer,
// which effectively enables/disables the pgBouncer.
//
// if it is set to 0 or less, it is disabled.
//
// if it is set to 1 or more, it is enabled
Replicas int32 `json:"replicas"`
// Resources, if specified, contains the container request resources
// for any pgBouncer Deployments that are part of a PostgreSQL cluster
Resources v1.ResourceList `json:"resources"`
// Limits, if specified, contains the container resource limits
// for any pgBouncer Deployments that are part of a PostgreSQL cluster
Limits v1.ResourceList `json:"limits"`
}
// Enabled returns true if the pgBouncer is enabled for the cluster, i.e. there
// is at least one replica set
func (s *PgBouncerSpec) Enabled() bool {
return s.Replicas > 0
}
// TLSSpec contains the information to set up a TLS-enabled PostgreSQL cluster
type TLSSpec struct {
// CASecret contains the name of the secret to use as the trusted CA for the
// TLSSecret
// This is our own format and should contain at least one key: "ca.crt"
// It can also contain a key "ca.crl" which is the certificate revocation list
CASecret string `json:"caSecret"`
// ReplicationTLSSecret contains the name of the secret that specifies a TLS
// keypair that can be used by the replication user (e.g. "primaryuser") to
// perform certificate based authentication between replicas.
// The keypair must be considered valid by the CA specified in the CASecret
ReplicationTLSSecret string `json:"replicationTLSSecret"`
// TLSSecret contains the name of the secret to use that contains the TLS
// keypair for the PostgreSQL server
// This follows the Kubernetes secret format ("kubernetes.io/tls") which has
// two keys: tls.crt and tls.key
TLSSecret string `json:"tlsSecret"`
}
// IsTLSEnabled returns true if the cluster is TLS enabled, i.e. both the TLS
// secret name and the CA secret name are available
func (t TLSSpec) IsTLSEnabled() bool |
const (
// PgclusterStateCreated ...
PgclusterStateCreated PgclusterState = "pgcluster Created"
// PgclusterStateProcessed ...
PgclusterStateProcessed PgclusterState = "pgcluster Processed"
// PgclusterStateInitialized ...
PgclusterStateInitialized PgclusterState = "pgcluster Initialized"
// PgclusterStateBootstrapping defines the state of a cluster when it is being bootstrapped
// from an existing data source
PgclusterStateBootstrapping PgclusterState = "pgcluster Bootstrapping"
// PgclusterStateBootstrapped defines the state of a cluster when it has been bootstrapped
// successfully from an existing data source
PgclusterStateBootstrapped PgclusterState = "pgcluster Bootstrapped"
// PgclusterStateRestore ...
PgclusterStateRestore PgclusterState = "pgcluster Restoring"
// PgclusterStateShutdown indicates that the cluster has been shut down (i.e. the primary)
// deployment has been scaled to 0
PgclusterStateShutdown PgclusterState = "pgcluster Shutdown"
// PodAntiAffinityRequired results in requiredDuringSchedulingIgnoredDuringExecution for any
// default pod anti-affinity rules applied to pg custers
PodAntiAffinityRequired PodAntiAffinityType = "required"
// PodAntiAffinityPreffered results in preferredDuringSchedulingIgnoredDuringExecution for any
// default pod anti-affinity rules applied to pg custers
PodAntiAffinityPreffered PodAntiAffinityType = "preferred"
// PodAntiAffinityDisabled disables any default pod anti-affinity rules applied to pg custers
PodAntiAffinityDisabled PodAntiAffinityType = "disabled"
)
// The list of different types of PodAntiAffinityDeployments
const (
PodAntiAffinityDeploymentDefault PodAntiAffinityDeployment = iota
PodAntiAffinityDeploymentPgBackRest
PodAntiAffinityDeploymentPgBouncer
)
// ValidatePodAntiAffinityType is responsible for validating whether or not the type of pod
// anti-affinity specified is valid
func (p PodAntiAffinityType) Validate() error {
switch p {
case
PodAntiAffinityRequired,
PodAntiAffinityPreffered,
PodAntiAffinityDisabled,
"":
return nil
}
return fmt.Errorf("Invalid pod anti-affinity type. Valid values are '%s', '%s' or '%s'",
PodAntiAffinityRequired, PodAntiAffinityPreffered, PodAntiAffinityDisabled)
}
| {
return (t.TLSSecret != "" && t.CASecret != "")
} | identifier_body |
cluster.go | package v1
/*
Copyright 2017 - 2020 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PgclusterResourcePlural ..
const PgclusterResourcePlural = "pgclusters"
// Pgcluster is the CRD that defines a Crunchy PG Cluster
//
// swagger:ignore Pgcluster
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type Pgcluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PgclusterSpec `json:"spec"`
Status PgclusterStatus `json:"status,omitempty"`
}
// PgclusterSpec is the CRD that defines a Crunchy PG Cluster Spec
// swagger:ignore
type PgclusterSpec struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
ClusterName string `json:"clustername"`
Policies string `json:"policies"`
CCPImage string `json:"ccpimage"`
CCPImageTag string `json:"ccpimagetag"`
CCPImagePrefix string `json:"ccpimageprefix"`
PGOImagePrefix string `json:"pgoimageprefix"`
Port string `json:"port"`
PGBadgerPort string `json:"pgbadgerport"`
ExporterPort string `json:"exporterport"`
PrimaryStorage PgStorageSpec `json:primarystorage`
WALStorage PgStorageSpec `json:walstorage`
ArchiveStorage PgStorageSpec `json:archivestorage`
ReplicaStorage PgStorageSpec `json:replicastorage`
BackrestStorage PgStorageSpec `json:backreststorage`
// Resources behaves just like the "Requests" section of a Kubernetes
// container definition. You can set individual items such as "cpu" and
// "memory", e.g. "{ cpu: "0.5", memory: "2Gi" }"
Resources v1.ResourceList `json:"resources"`
// Limits stores the CPU/memory limits to use with PostgreSQL instances
//
// A long note on memory limits.
//
// We want to avoid the OOM killer coming for the PostgreSQL process or any
// of their backends per lots of guidance from the PostgreSQL documentation.
// Based on Kubernetes' behavior with limits, the best thing is to not set
// them. However, if they ever do set, we suggest that you have
// Request == Limit to get the Guaranteed QoS
//
// Guaranteed QoS prevents a backend from being first in line to be killed if
// the *Node* has memory pressure, but if there is, say
// a runaway client backend that causes the *Pod* to exceed its memory
// limit, a backend can still be killed by the OOM killer, which is not
// great.
//
// As such, given the choice, the preference is for the Pod to be evicted
// and have a failover event, vs. having an individual client backend killed
// and causing potential "bad things."
//
// For more info on PostgreSQL and Kubernetes memory management, see:
//
// https://www.postgresql.org/docs/current/kernel-resources.html#LINUX-MEMORY-OVERCOMMIT
// https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#how-pods-with-resource-limits-are-run
Limits v1.ResourceList `json:"limits"`
// BackrestResources, if specified, contains the container request resources
// for the pgBackRest Deployment for this PostgreSQL cluster
BackrestResources v1.ResourceList `json:"backrestResources"`
// BackrestLimits, if specified, contains the container resource limits
// for the pgBackRest Deployment for this PostgreSQL cluster
BackrestLimits v1.ResourceList `json:"backrestLimits"`
// PgBouncer contains all of the settings to properly maintain a pgBouncer
// implementation
PgBouncer PgBouncerSpec `json:"pgBouncer"`
User string `json:"user"`
Database string `json:"database"`
Replicas string `json:"replicas"`
UserSecretName string `json:"usersecretname"`
RootSecretName string `json:"rootsecretname"`
PrimarySecretName string `json:"primarysecretname"`
CollectSecretName string `json:"collectSecretName"`
Status string `json:"status"`
CustomConfig string `json:"customconfig"`
UserLabels map[string]string `json:"userlabels"`
PodAntiAffinity PodAntiAffinitySpec `json:"podAntiAffinity"`
SyncReplication *bool `json:"syncReplication"`
BackrestS3Bucket string `json:"backrestS3Bucket"`
BackrestS3Region string `json:"backrestS3Region"`
BackrestS3Endpoint string `json:"backrestS3Endpoint"`
BackrestS3URIStyle string `json:"backrestS3URIStyle"`
BackrestS3VerifyTLS string `json:"backrestS3VerifyTLS"`
BackrestRepoPath string `json:"backrestRepoPath"`
TablespaceMounts map[string]PgStorageSpec `json:"tablespaceMounts"`
TLS TLSSpec `json:"tls"`
TLSOnly bool `json:"tlsOnly"`
Standby bool `json:"standby"`
Shutdown bool `json:"shutdown"`
PGDataSource PGDataSourceSpec `json:"pgDataSource"`
}
// PGDataSourceSpec defines the data source that should be used to populate the initial PGDATA
// directory when bootstrapping a new PostgreSQL cluster
// swagger:ignore
type PGDataSourceSpec struct {
RestoreFrom string `json:"restoreFrom"`
RestoreOpts string `json:"restoreOpts"`
}
// PgclusterList is the CRD that defines a Crunchy PG Cluster List
// swagger:ignore
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PgclusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Pgcluster `json:"items"`
}
// PgclusterStatus is the CRD that defines PG Cluster Status
// swagger:ignore
type PgclusterStatus struct {
State PgclusterState `json:"state,omitempty"`
Message string `json:"message,omitempty"`
}
// PgclusterState is the crd that defines PG Cluster Stage
// swagger:ignore
type PgclusterState string
// PodAntiAffinityDeployment distinguishes between the different types of
// Deployments that can leverage PodAntiAffinity
type PodAntiAffinityDeployment int
// PodAntiAffinityType defines the different types of type of anti-affinity rules applied to pg
// clusters when utilizing the default pod anti-affinity rules provided by the PostgreSQL Operator,
// which are enabled for a new pg cluster by default. Valid Values include "required" for
// requiredDuringSchedulingIgnoredDuringExecution anti-affinity, "preferred" for
// preferredDuringSchedulingIgnoredDuringExecution anti-affinity, and "disabled" to disable the
// default pod anti-affinity rules for the pg cluster all together.
type PodAntiAffinityType string
// PodAntiAffinitySpec provides multiple configurations for how pod
// anti-affinity can be set.
// - "Default" is the default rule that applies to all Pods that are a part of
// the PostgreSQL cluster
// - "PgBackrest" applies just to the pgBackRest repository Pods in said
// Deployment
// - "PgBouncer" applies to just pgBouncer Pods in said Deployment
// swaggier:ignore
type PodAntiAffinitySpec struct {
Default PodAntiAffinityType `json:"default"`
PgBackRest PodAntiAffinityType `json:"pgBackRest"`
PgBouncer PodAntiAffinityType `json:"pgBouncer"`
}
// PgBouncerSpec is a struct that is used within the Cluster specification that
// provides the attributes for managing a PgBouncer implementation, including:
// - is it enabled?
// - what resources it should consume
// - the total number of replicas
type PgBouncerSpec struct {
// Replicas represents the total number of Pods to deploy with pgBouncer,
// which effectively enables/disables the pgBouncer.
//
// if it is set to 0 or less, it is disabled.
//
// if it is set to 1 or more, it is enabled
Replicas int32 `json:"replicas"`
// Resources, if specified, contains the container request resources
// for any pgBouncer Deployments that are part of a PostgreSQL cluster
Resources v1.ResourceList `json:"resources"`
// Limits, if specified, contains the container resource limits
// for any pgBouncer Deployments that are part of a PostgreSQL cluster
Limits v1.ResourceList `json:"limits"`
}
// Enabled returns true if the pgBouncer is enabled for the cluster, i.e. there
// is at least one replica set
func (s *PgBouncerSpec) | () bool {
return s.Replicas > 0
}
// TLSSpec contains the information to set up a TLS-enabled PostgreSQL cluster
type TLSSpec struct {
// CASecret contains the name of the secret to use as the trusted CA for the
// TLSSecret
// This is our own format and should contain at least one key: "ca.crt"
// It can also contain a key "ca.crl" which is the certificate revocation list
CASecret string `json:"caSecret"`
// ReplicationTLSSecret contains the name of the secret that specifies a TLS
// keypair that can be used by the replication user (e.g. "primaryuser") to
// perform certificate based authentication between replicas.
// The keypair must be considered valid by the CA specified in the CASecret
ReplicationTLSSecret string `json:"replicationTLSSecret"`
// TLSSecret contains the name of the secret to use that contains the TLS
// keypair for the PostgreSQL server
// This follows the Kubernetes secret format ("kubernetes.io/tls") which has
// two keys: tls.crt and tls.key
TLSSecret string `json:"tlsSecret"`
}
// IsTLSEnabled returns true if the cluster is TLS enabled, i.e. both the TLS
// secret name and the CA secret name are available
func (t TLSSpec) IsTLSEnabled() bool {
return (t.TLSSecret != "" && t.CASecret != "")
}
const (
// PgclusterStateCreated ...
PgclusterStateCreated PgclusterState = "pgcluster Created"
// PgclusterStateProcessed ...
PgclusterStateProcessed PgclusterState = "pgcluster Processed"
// PgclusterStateInitialized ...
PgclusterStateInitialized PgclusterState = "pgcluster Initialized"
// PgclusterStateBootstrapping defines the state of a cluster when it is being bootstrapped
// from an existing data source
PgclusterStateBootstrapping PgclusterState = "pgcluster Bootstrapping"
// PgclusterStateBootstrapped defines the state of a cluster when it has been bootstrapped
// successfully from an existing data source
PgclusterStateBootstrapped PgclusterState = "pgcluster Bootstrapped"
// PgclusterStateRestore ...
PgclusterStateRestore PgclusterState = "pgcluster Restoring"
// PgclusterStateShutdown indicates that the cluster has been shut down (i.e. the primary)
// deployment has been scaled to 0
PgclusterStateShutdown PgclusterState = "pgcluster Shutdown"
// PodAntiAffinityRequired results in requiredDuringSchedulingIgnoredDuringExecution for any
// default pod anti-affinity rules applied to pg custers
PodAntiAffinityRequired PodAntiAffinityType = "required"
// PodAntiAffinityPreffered results in preferredDuringSchedulingIgnoredDuringExecution for any
// default pod anti-affinity rules applied to pg custers
PodAntiAffinityPreffered PodAntiAffinityType = "preferred"
// PodAntiAffinityDisabled disables any default pod anti-affinity rules applied to pg custers
PodAntiAffinityDisabled PodAntiAffinityType = "disabled"
)
// The list of different types of PodAntiAffinityDeployments
const (
PodAntiAffinityDeploymentDefault PodAntiAffinityDeployment = iota
PodAntiAffinityDeploymentPgBackRest
PodAntiAffinityDeploymentPgBouncer
)
// ValidatePodAntiAffinityType is responsible for validating whether or not the type of pod
// anti-affinity specified is valid
func (p PodAntiAffinityType) Validate() error {
switch p {
case
PodAntiAffinityRequired,
PodAntiAffinityPreffered,
PodAntiAffinityDisabled,
"":
return nil
}
return fmt.Errorf("Invalid pod anti-affinity type. Valid values are '%s', '%s' or '%s'",
PodAntiAffinityRequired, PodAntiAffinityPreffered, PodAntiAffinityDisabled)
}
| Enabled | identifier_name |
service.go | // Package youtube provides loading audio from video files for given youtube channels
package youtube
import (
"context"
"crypto/sha1"
"encoding/xml"
"fmt"
"os"
"path"
"regexp"
"sort"
"strings"
"time"
"github.com/bogem/id3v2/v2"
log "github.com/go-pkgz/lgr"
"github.com/google/uuid"
"github.com/pkg/errors"
rssfeed "github.com/umputun/feed-master/app/feed"
ytfeed "github.com/umputun/feed-master/app/youtube/feed"
)
//go:generate moq -out mocks/downloader.go -pkg mocks -skip-ensure -fmt goimports . DownloaderService
//go:generate moq -out mocks/channel.go -pkg mocks -skip-ensure -fmt goimports . ChannelService
//go:generate moq -out mocks/store.go -pkg mocks -skip-ensure -fmt goimports . StoreService
//go:generate moq -out mocks/duration.go -pkg mocks -skip-ensure -fmt goimports . DurationService
// Service loads audio from youtube channels
type Service struct {
Feeds []FeedInfo
Downloader DownloaderService
ChannelService ChannelService
Store StoreService
CheckDuration time.Duration
RSSFileStore RSSFileStore
DurationService DurationService
KeepPerChannel int
RootURL string
SkipShorts time.Duration
}
// FeedInfo contains channel or feed ID, readable name and other per-feed info
type FeedInfo struct {
Name string `yaml:"name"`
ID string `yaml:"id"`
Type ytfeed.Type `yaml:"type"`
Keep int `yaml:"keep"`
Language string `yaml:"lang"`
Filter FeedFilter `yaml:"filter"`
}
// FeedFilter contains filter criteria for the feed
type FeedFilter struct {
Include string `yaml:"include"`
Exclude string `yaml:"exclude"`
}
// DownloaderService is an interface for downloading audio from youtube
type DownloaderService interface {
Get(ctx context.Context, id string, fname string) (file string, err error)
}
// ChannelService is an interface for getting channel entries, i.e. the list of videos
type ChannelService interface {
Get(ctx context.Context, chanID string, feedType ytfeed.Type) ([]ytfeed.Entry, error)
}
// StoreService is an interface for storing and loading metadata about downloaded audio
type StoreService interface {
Save(entry ytfeed.Entry) (bool, error)
Load(channelID string, max int) ([]ytfeed.Entry, error)
Exist(entry ytfeed.Entry) (bool, error)
RemoveOld(channelID string, keep int) ([]string, error)
Remove(entry ytfeed.Entry) error
SetProcessed(entry ytfeed.Entry) error
ResetProcessed(entry ytfeed.Entry) error
CheckProcessed(entry ytfeed.Entry) (found bool, ts time.Time, err error)
CountProcessed() (count int)
}
// DurationService is an interface for getting duration of audio file
type DurationService interface {
File(fname string) int
}
// Do is a blocking function that downloads audio from youtube channels and updates metadata
func (s *Service) Do(ctx context.Context) error {
log.Printf("[INFO] starting youtube service")
if s.SkipShorts > 0 {
log.Printf("[DEBUG] skip youtube episodes shorter than %v", s.SkipShorts)
}
for _, f := range s.Feeds {
log.Printf("[INFO] youtube feed %+v", f)
}
tick := time.NewTicker(s.CheckDuration)
defer tick.Stop()
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
}
}
}
// RSSFeed generates RSS feed for given channel
func (s *Service) RSSFeed(fi FeedInfo) (string, error) |
// procChannels processes all channels, downloads audio, updates metadata and stores RSS
func (s *Service) procChannels(ctx context.Context) error {
var allStats stats
for _, feedInfo := range s.Feeds {
entries, err := s.ChannelService.Get(ctx, feedInfo.ID, feedInfo.Type)
if err != nil {
log.Printf("[WARN] failed to get channel entries for %s: %s", feedInfo.ID, err)
continue
}
log.Printf("[INFO] got %d entries for %s, limit to %d", len(entries), feedInfo.Name, s.keep(feedInfo))
changed, processed := false, 0
for i, entry := range entries {
// exit right away if context is done
select {
case <-ctx.Done():
return ctx.Err()
default:
}
allStats.entries++
if processed >= s.keep(feedInfo) {
break
}
isAllowed, err := s.isAllowed(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s is relevant", entry.VideoID)
}
if !isAllowed {
log.Printf("[DEBUG] skipping filtered %s", entry.String())
allStats.ignored++
continue
}
ok, err := s.isNew(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s exists", entry.VideoID)
}
if !ok {
allStats.skipped++
processed++
continue
}
// got new entry, but with very old timestamp. skip it if we have already reached max capacity
// (this is to eliminate the initial load) and this entry is older than the oldest one we have.
// Also marks it as processed as we don't want to process it again
oldestEntry := s.oldestEntry()
if entry.Published.Before(oldestEntry.Published) && s.countAllEntries() >= s.totalEntriesToKeep() {
allStats.ignored++
log.Printf("[INFO] skipping entry %s as it is older than the oldest one we have %s",
entry.String(), oldestEntry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
log.Printf("[INFO] new entry [%d] %s, %s, %s, %s", i+1, entry.VideoID, entry.Title, feedInfo.Name, entry.String())
file, downErr := s.Downloader.Get(ctx, entry.VideoID, s.makeFileName(entry))
if downErr != nil {
allStats.ignored++
if downErr == ytfeed.ErrSkip { // downloader decided to skip this entry
log.Printf("[INFO] skipping %s", entry.String())
continue
}
log.Printf("[WARN] failed to download %s: %s", entry.VideoID, downErr)
continue
}
if short, duration := s.isShort(file); short {
allStats.ignored++
log.Printf("[INFO] skip short file %s (%v): %s, %s", file, duration, entry.VideoID, entry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
// update metadata
if tagsErr := s.updateMp3Tags(file, entry, feedInfo); tagsErr != nil {
log.Printf("[WARN] failed to update metadata for %s: %s", entry.VideoID, tagsErr)
}
processed++
fsize := 0
if fi, err := os.Stat(file); err == nil {
fsize = int(fi.Size())
} else {
log.Printf("[WARN] failed to get file size for %s: %v", file, err)
}
log.Printf("[INFO] downloaded %s (%s) to %s, size: %d, channel: %+v", entry.VideoID, entry.Title, file, fsize, feedInfo)
entry = s.update(entry, file, feedInfo)
ok, saveErr := s.Store.Save(entry)
if saveErr != nil {
return errors.Wrapf(saveErr, "failed to save entry %+v", entry)
}
if !ok {
log.Printf("[WARN] attempt to save dup entry %+v", entry)
}
changed = true
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
allStats.added++
log.Printf("[INFO] saved %s (%s) to %s, channel: %+v", entry.VideoID, entry.Title, file, feedInfo)
}
allStats.processed += processed
if changed {
removed := s.removeOld(feedInfo)
allStats.removed += removed
// save rss feed to fs if there are new entries
rss, rssErr := s.RSSFeed(feedInfo)
if rssErr != nil {
log.Printf("[WARN] failed to generate rss for %s: %s", feedInfo.Name, rssErr)
} else {
if err := s.RSSFileStore.Save(feedInfo.ID, rss); err != nil {
log.Printf("[WARN] failed to save rss for %s: %s", feedInfo.Name, err)
}
}
}
}
log.Printf("[INFO] all channels processed - channels: %d, %s, lifetime: %d, feed size: %d",
len(s.Feeds), allStats.String(), s.Store.CountProcessed(), s.countAllEntries())
newestEntry := s.newestEntry()
log.Printf("[INFO] last entry: %s", newestEntry.String())
return nil
}
// StoreRSS saves RSS feed to file
func (s *Service) StoreRSS(chanID, rss string) error {
return s.RSSFileStore.Save(chanID, rss)
}
// RemoveEntry deleted entry from store. Doesn't removes file
func (s *Service) RemoveEntry(entry ytfeed.Entry) error {
if err := s.Store.ResetProcessed(entry); err != nil {
return errors.Wrapf(err, "failed to reset processed entry %s", entry.VideoID)
}
if err := s.Store.Remove(entry); err != nil {
return errors.Wrapf(err, "failed to remove entry %s", entry.VideoID)
}
return nil
}
// isNew checks if entry already processed
func (s *Service) isNew(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
// check if entry already exists in store
// this method won't work after migration to locally altered published ts but have to stay for now
// to avoid false-positives on old entries what never got set with SetProcessed
exists, exErr := s.Store.Exist(entry)
if err != nil {
return false, errors.Wrapf(exErr, "failed to check if entry %s exists", entry.VideoID)
}
if exists {
return false, nil
}
// check if we already processed this entry.
// this is needed to avoid infinite get/remove loop when the original feed is updated in place.
// after migration to locally altered published ts, it is also the primary way to detect already processed entries
found, _, procErr := s.Store.CheckProcessed(entry)
if procErr != nil {
log.Printf("[WARN] can't get processed status for %s, %+v", entry.VideoID, fi)
}
if procErr == nil && found {
return false, nil
}
return true, nil
}
// isAllowed checks if entry matches all filters for the channel feed
func (s *Service) isAllowed(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
matchedIncludeFilter := true
if fi.Filter.Include != "" {
matchedIncludeFilter, err = regexp.MatchString(fi.Filter.Include, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches include filter", entry.VideoID)
}
}
matchedExcludeFilter := false
if fi.Filter.Exclude != "" {
matchedExcludeFilter, err = regexp.MatchString(fi.Filter.Exclude, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches exclude filter", entry.VideoID)
}
}
return matchedIncludeFilter && !matchedExcludeFilter, nil
}
func (s *Service) isShort(file string) (bool, time.Duration) {
if s.SkipShorts.Seconds() > 0 {
// skip shorts if duration is less than SkipShorts
duration := s.DurationService.File(file)
if duration > 0 && duration < int(s.SkipShorts.Seconds()) {
return true, time.Duration(duration) * time.Second
}
}
return false, 0
}
// update sets entry file name and reset published ts
func (s *Service) update(entry ytfeed.Entry, file string, fi FeedInfo) ytfeed.Entry {
entry.File = file
// only reset time if published not too long ago
// this is done to avoid initial set of entries added with a new channel to the top of the feed
if time.Since(entry.Published) < time.Hour*24 {
log.Printf("[DEBUG] reset published time for %s, from %s to %s (%v), %s",
entry.VideoID, entry.Published.Format(time.RFC3339), time.Now().Format(time.RFC3339),
time.Since(entry.Published), entry.String())
entry.Published = time.Now() // reset published ts to prevent possible out-of-order entries
} else {
log.Printf("[DEBUG] keep published time for %s, %s", entry.VideoID, entry.Published.Format(time.RFC3339))
}
if !strings.Contains(entry.Title, fi.Name) { // if title doesn't contains channel name add it
entry.Title = fi.Name + ": " + entry.Title
}
entry.Duration = s.DurationService.File(file)
log.Printf("[DEBUG] updated entry: %s", entry.String())
return entry
}
// removeOld deletes old entries from store and corresponding files
func (s *Service) removeOld(fi FeedInfo) int {
removed := 0
keep := s.keep(fi)
files, err := s.Store.RemoveOld(fi.ID, keep+1)
if err != nil { // even with error we get a list of files to remove
log.Printf("[WARN] failed to remove some old meta data for %s, %v", fi.ID, err)
}
for _, f := range files {
if e := os.Remove(f); e != nil {
log.Printf("[WARN] failed to remove file %s: %v", f, e)
continue
}
removed++
log.Printf("[INFO] removed %s for %s (%s)", f, fi.ID, fi.Name)
}
return removed
}
func (s *Service) keep(fi FeedInfo) int {
keep := s.KeepPerChannel
if fi.Keep > 0 {
keep = fi.Keep
}
return keep
}
func (s *Service) makeFileName(entry ytfeed.Entry) string {
h := sha1.New()
if _, err := h.Write([]byte(entry.UID())); err != nil {
return uuid.New().String()
}
return fmt.Sprintf("%x", h.Sum(nil))
}
// totalEntriesToKeep returns total number of entries to keep, summing all channels' keep values
func (s *Service) totalEntriesToKeep() (res int) {
for _, fi := range s.Feeds {
res += s.keep(fi)
}
return res
}
// countAllEntries returns total number of entries across all channels, respects keep settings
func (s *Service) countAllEntries() int {
var result int
for _, fi := range s.Feeds {
if entries, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
result += len(entries)
}
}
return result
}
// newestEntry returns the newest entry across all channels, respects keep settings
func (s *Service) newestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, 1); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.After(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
// oldestEntry returns the oldest entry from all channels, respecting keep settings
func (s *Service) oldestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.Before(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
func (s *Service) updateMp3Tags(file string, entry ytfeed.Entry, fi FeedInfo) error {
fh, err := id3v2.Open(file, id3v2.Options{Parse: false})
if err != nil {
return errors.Wrapf(err, "failed to open file %s", file)
}
defer fh.Close()
fh.SetTitle(entry.Title)
fh.SetArtist(entry.Author.Name)
fh.SetAlbum(fi.Name)
fh.SetGenre("podcast")
fh.SetYear(entry.Published.Format("2006"))
fh.AddTextFrame(fh.CommonID("Recording time"), fh.DefaultEncoding(), entry.Published.Format("20060102T150405"))
if err = fh.Save(); err != nil {
return errors.Wrapf(err, "failed to close file %s", file)
}
return nil
}
type stats struct {
entries int
processed int
added int
removed int
ignored int
skipped int
}
func (st stats) String() string {
return fmt.Sprintf("entries: %d, processed: %d, updated: %d, removed: %d, ignored: %d, skipped: %d",
st.entries, st.processed, st.added, st.removed, st.ignored, st.skipped)
}
| {
entries, err := s.Store.Load(fi.ID, s.keep(fi))
if err != nil {
return "", errors.Wrap(err, "failed to get channel entries")
}
if len(entries) == 0 {
return "", nil
}
items := []rssfeed.Item{}
for _, entry := range entries {
fileURL := s.RootURL + "/" + path.Base(entry.File)
var fileSize int
if fileInfo, fiErr := os.Stat(entry.File); fiErr != nil {
log.Printf("[WARN] failed to get file size for %s (%s %s): %v", entry.File, entry.VideoID, entry.Title, fiErr)
} else {
fileSize = int(fileInfo.Size())
}
duration := ""
if entry.Duration > 0 {
duration = fmt.Sprintf("%d", entry.Duration)
}
items = append(items, rssfeed.Item{
Title: entry.Title,
Description: entry.Media.Description,
Link: entry.Link.Href,
PubDate: entry.Published.In(time.UTC).Format(time.RFC1123Z),
GUID: entry.ChannelID + "::" + entry.VideoID,
Author: entry.Author.Name,
Enclosure: rssfeed.Enclosure{
URL: fileURL,
Type: "audio/mpeg",
Length: fileSize,
},
Duration: duration,
DT: time.Now(),
})
}
rss := rssfeed.Rss2{
Version: "2.0",
NsItunes: "http://www.itunes.com/dtds/podcast-1.0.dtd",
NsMedia: "http://search.yahoo.com/mrss/",
ItemList: items,
Title: fi.Name,
Description: "generated by feed-master",
Link: entries[0].Author.URI,
PubDate: items[0].PubDate,
LastBuildDate: time.Now().Format(time.RFC1123Z),
Language: fi.Language,
ItunesAuthor: entries[0].Author.Name,
ItunesExplicit: "no",
}
// set image from channel as rss thumbnail
// TODO: we may want to load it locally in case if youtube doesn't like such remote usage of images
if image := entries[0].Media.Thumbnail.URL; image != "" {
rss.ItunesImage = &rssfeed.ItunesImg{URL: image}
rss.MediaThumbnail = &rssfeed.MediaThumbnail{URL: image}
}
if fi.Type == ytfeed.FTPlaylist {
rss.Link = "https://www.youtube.com/playlist?list=" + fi.ID
}
b, err := xml.MarshalIndent(&rss, "", " ")
if err != nil {
return "", errors.Wrap(err, "failed to marshal rss")
}
res := string(b)
// this hack to avoid having different items for marshal and unmarshal due to "itunes" namespace
res = strings.Replace(res, "<duration>", "<itunes:duration>", -1)
res = strings.Replace(res, "</duration>", "</itunes:duration>", -1)
return res, nil
} | identifier_body |
service.go | // Package youtube provides loading audio from video files for given youtube channels
package youtube
import (
"context"
"crypto/sha1"
"encoding/xml"
"fmt"
"os"
"path"
"regexp"
"sort"
"strings"
"time"
"github.com/bogem/id3v2/v2"
log "github.com/go-pkgz/lgr"
"github.com/google/uuid"
"github.com/pkg/errors"
rssfeed "github.com/umputun/feed-master/app/feed"
ytfeed "github.com/umputun/feed-master/app/youtube/feed"
)
//go:generate moq -out mocks/downloader.go -pkg mocks -skip-ensure -fmt goimports . DownloaderService
//go:generate moq -out mocks/channel.go -pkg mocks -skip-ensure -fmt goimports . ChannelService
//go:generate moq -out mocks/store.go -pkg mocks -skip-ensure -fmt goimports . StoreService
//go:generate moq -out mocks/duration.go -pkg mocks -skip-ensure -fmt goimports . DurationService
// Service loads audio from youtube channels
type Service struct {
Feeds []FeedInfo
Downloader DownloaderService
ChannelService ChannelService
Store StoreService
CheckDuration time.Duration
RSSFileStore RSSFileStore
DurationService DurationService
KeepPerChannel int
RootURL string
SkipShorts time.Duration
}
// FeedInfo contains channel or feed ID, readable name and other per-feed info
type FeedInfo struct {
Name string `yaml:"name"`
ID string `yaml:"id"`
Type ytfeed.Type `yaml:"type"`
Keep int `yaml:"keep"`
Language string `yaml:"lang"`
Filter FeedFilter `yaml:"filter"`
}
// FeedFilter contains filter criteria for the feed
type FeedFilter struct {
Include string `yaml:"include"`
Exclude string `yaml:"exclude"`
}
// DownloaderService is an interface for downloading audio from youtube
type DownloaderService interface {
Get(ctx context.Context, id string, fname string) (file string, err error)
}
// ChannelService is an interface for getting channel entries, i.e. the list of videos
type ChannelService interface {
Get(ctx context.Context, chanID string, feedType ytfeed.Type) ([]ytfeed.Entry, error)
}
// StoreService is an interface for storing and loading metadata about downloaded audio
type StoreService interface {
Save(entry ytfeed.Entry) (bool, error)
Load(channelID string, max int) ([]ytfeed.Entry, error)
Exist(entry ytfeed.Entry) (bool, error)
RemoveOld(channelID string, keep int) ([]string, error)
Remove(entry ytfeed.Entry) error
SetProcessed(entry ytfeed.Entry) error
ResetProcessed(entry ytfeed.Entry) error
CheckProcessed(entry ytfeed.Entry) (found bool, ts time.Time, err error)
CountProcessed() (count int)
}
// DurationService is an interface for getting duration of audio file
type DurationService interface {
File(fname string) int
}
// Do is a blocking function that downloads audio from youtube channels and updates metadata
func (s *Service) Do(ctx context.Context) error {
log.Printf("[INFO] starting youtube service")
if s.SkipShorts > 0 |
for _, f := range s.Feeds {
log.Printf("[INFO] youtube feed %+v", f)
}
tick := time.NewTicker(s.CheckDuration)
defer tick.Stop()
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
}
}
}
// RSSFeed generates RSS feed for given channel
func (s *Service) RSSFeed(fi FeedInfo) (string, error) {
entries, err := s.Store.Load(fi.ID, s.keep(fi))
if err != nil {
return "", errors.Wrap(err, "failed to get channel entries")
}
if len(entries) == 0 {
return "", nil
}
items := []rssfeed.Item{}
for _, entry := range entries {
fileURL := s.RootURL + "/" + path.Base(entry.File)
var fileSize int
if fileInfo, fiErr := os.Stat(entry.File); fiErr != nil {
log.Printf("[WARN] failed to get file size for %s (%s %s): %v", entry.File, entry.VideoID, entry.Title, fiErr)
} else {
fileSize = int(fileInfo.Size())
}
duration := ""
if entry.Duration > 0 {
duration = fmt.Sprintf("%d", entry.Duration)
}
items = append(items, rssfeed.Item{
Title: entry.Title,
Description: entry.Media.Description,
Link: entry.Link.Href,
PubDate: entry.Published.In(time.UTC).Format(time.RFC1123Z),
GUID: entry.ChannelID + "::" + entry.VideoID,
Author: entry.Author.Name,
Enclosure: rssfeed.Enclosure{
URL: fileURL,
Type: "audio/mpeg",
Length: fileSize,
},
Duration: duration,
DT: time.Now(),
})
}
rss := rssfeed.Rss2{
Version: "2.0",
NsItunes: "http://www.itunes.com/dtds/podcast-1.0.dtd",
NsMedia: "http://search.yahoo.com/mrss/",
ItemList: items,
Title: fi.Name,
Description: "generated by feed-master",
Link: entries[0].Author.URI,
PubDate: items[0].PubDate,
LastBuildDate: time.Now().Format(time.RFC1123Z),
Language: fi.Language,
ItunesAuthor: entries[0].Author.Name,
ItunesExplicit: "no",
}
// set image from channel as rss thumbnail
// TODO: we may want to load it locally in case if youtube doesn't like such remote usage of images
if image := entries[0].Media.Thumbnail.URL; image != "" {
rss.ItunesImage = &rssfeed.ItunesImg{URL: image}
rss.MediaThumbnail = &rssfeed.MediaThumbnail{URL: image}
}
if fi.Type == ytfeed.FTPlaylist {
rss.Link = "https://www.youtube.com/playlist?list=" + fi.ID
}
b, err := xml.MarshalIndent(&rss, "", " ")
if err != nil {
return "", errors.Wrap(err, "failed to marshal rss")
}
res := string(b)
// this hack to avoid having different items for marshal and unmarshal due to "itunes" namespace
res = strings.Replace(res, "<duration>", "<itunes:duration>", -1)
res = strings.Replace(res, "</duration>", "</itunes:duration>", -1)
return res, nil
}
// procChannels processes all channels, downloads audio, updates metadata and stores RSS
func (s *Service) procChannels(ctx context.Context) error {
var allStats stats
for _, feedInfo := range s.Feeds {
entries, err := s.ChannelService.Get(ctx, feedInfo.ID, feedInfo.Type)
if err != nil {
log.Printf("[WARN] failed to get channel entries for %s: %s", feedInfo.ID, err)
continue
}
log.Printf("[INFO] got %d entries for %s, limit to %d", len(entries), feedInfo.Name, s.keep(feedInfo))
changed, processed := false, 0
for i, entry := range entries {
// exit right away if context is done
select {
case <-ctx.Done():
return ctx.Err()
default:
}
allStats.entries++
if processed >= s.keep(feedInfo) {
break
}
isAllowed, err := s.isAllowed(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s is relevant", entry.VideoID)
}
if !isAllowed {
log.Printf("[DEBUG] skipping filtered %s", entry.String())
allStats.ignored++
continue
}
ok, err := s.isNew(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s exists", entry.VideoID)
}
if !ok {
allStats.skipped++
processed++
continue
}
// got new entry, but with very old timestamp. skip it if we have already reached max capacity
// (this is to eliminate the initial load) and this entry is older than the oldest one we have.
// Also marks it as processed as we don't want to process it again
oldestEntry := s.oldestEntry()
if entry.Published.Before(oldestEntry.Published) && s.countAllEntries() >= s.totalEntriesToKeep() {
allStats.ignored++
log.Printf("[INFO] skipping entry %s as it is older than the oldest one we have %s",
entry.String(), oldestEntry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
log.Printf("[INFO] new entry [%d] %s, %s, %s, %s", i+1, entry.VideoID, entry.Title, feedInfo.Name, entry.String())
file, downErr := s.Downloader.Get(ctx, entry.VideoID, s.makeFileName(entry))
if downErr != nil {
allStats.ignored++
if downErr == ytfeed.ErrSkip { // downloader decided to skip this entry
log.Printf("[INFO] skipping %s", entry.String())
continue
}
log.Printf("[WARN] failed to download %s: %s", entry.VideoID, downErr)
continue
}
if short, duration := s.isShort(file); short {
allStats.ignored++
log.Printf("[INFO] skip short file %s (%v): %s, %s", file, duration, entry.VideoID, entry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
// update metadata
if tagsErr := s.updateMp3Tags(file, entry, feedInfo); tagsErr != nil {
log.Printf("[WARN] failed to update metadata for %s: %s", entry.VideoID, tagsErr)
}
processed++
fsize := 0
if fi, err := os.Stat(file); err == nil {
fsize = int(fi.Size())
} else {
log.Printf("[WARN] failed to get file size for %s: %v", file, err)
}
log.Printf("[INFO] downloaded %s (%s) to %s, size: %d, channel: %+v", entry.VideoID, entry.Title, file, fsize, feedInfo)
entry = s.update(entry, file, feedInfo)
ok, saveErr := s.Store.Save(entry)
if saveErr != nil {
return errors.Wrapf(saveErr, "failed to save entry %+v", entry)
}
if !ok {
log.Printf("[WARN] attempt to save dup entry %+v", entry)
}
changed = true
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
allStats.added++
log.Printf("[INFO] saved %s (%s) to %s, channel: %+v", entry.VideoID, entry.Title, file, feedInfo)
}
allStats.processed += processed
if changed {
removed := s.removeOld(feedInfo)
allStats.removed += removed
// save rss feed to fs if there are new entries
rss, rssErr := s.RSSFeed(feedInfo)
if rssErr != nil {
log.Printf("[WARN] failed to generate rss for %s: %s", feedInfo.Name, rssErr)
} else {
if err := s.RSSFileStore.Save(feedInfo.ID, rss); err != nil {
log.Printf("[WARN] failed to save rss for %s: %s", feedInfo.Name, err)
}
}
}
}
log.Printf("[INFO] all channels processed - channels: %d, %s, lifetime: %d, feed size: %d",
len(s.Feeds), allStats.String(), s.Store.CountProcessed(), s.countAllEntries())
newestEntry := s.newestEntry()
log.Printf("[INFO] last entry: %s", newestEntry.String())
return nil
}
// StoreRSS saves RSS feed to file
func (s *Service) StoreRSS(chanID, rss string) error {
return s.RSSFileStore.Save(chanID, rss)
}
// RemoveEntry deleted entry from store. Doesn't removes file
func (s *Service) RemoveEntry(entry ytfeed.Entry) error {
if err := s.Store.ResetProcessed(entry); err != nil {
return errors.Wrapf(err, "failed to reset processed entry %s", entry.VideoID)
}
if err := s.Store.Remove(entry); err != nil {
return errors.Wrapf(err, "failed to remove entry %s", entry.VideoID)
}
return nil
}
// isNew checks if entry already processed
func (s *Service) isNew(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
// check if entry already exists in store
// this method won't work after migration to locally altered published ts but have to stay for now
// to avoid false-positives on old entries what never got set with SetProcessed
exists, exErr := s.Store.Exist(entry)
if err != nil {
return false, errors.Wrapf(exErr, "failed to check if entry %s exists", entry.VideoID)
}
if exists {
return false, nil
}
// check if we already processed this entry.
// this is needed to avoid infinite get/remove loop when the original feed is updated in place.
// after migration to locally altered published ts, it is also the primary way to detect already processed entries
found, _, procErr := s.Store.CheckProcessed(entry)
if procErr != nil {
log.Printf("[WARN] can't get processed status for %s, %+v", entry.VideoID, fi)
}
if procErr == nil && found {
return false, nil
}
return true, nil
}
// isAllowed checks if entry matches all filters for the channel feed
func (s *Service) isAllowed(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
matchedIncludeFilter := true
if fi.Filter.Include != "" {
matchedIncludeFilter, err = regexp.MatchString(fi.Filter.Include, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches include filter", entry.VideoID)
}
}
matchedExcludeFilter := false
if fi.Filter.Exclude != "" {
matchedExcludeFilter, err = regexp.MatchString(fi.Filter.Exclude, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches exclude filter", entry.VideoID)
}
}
return matchedIncludeFilter && !matchedExcludeFilter, nil
}
func (s *Service) isShort(file string) (bool, time.Duration) {
if s.SkipShorts.Seconds() > 0 {
// skip shorts if duration is less than SkipShorts
duration := s.DurationService.File(file)
if duration > 0 && duration < int(s.SkipShorts.Seconds()) {
return true, time.Duration(duration) * time.Second
}
}
return false, 0
}
// update sets entry file name and reset published ts
func (s *Service) update(entry ytfeed.Entry, file string, fi FeedInfo) ytfeed.Entry {
entry.File = file
// only reset time if published not too long ago
// this is done to avoid initial set of entries added with a new channel to the top of the feed
if time.Since(entry.Published) < time.Hour*24 {
log.Printf("[DEBUG] reset published time for %s, from %s to %s (%v), %s",
entry.VideoID, entry.Published.Format(time.RFC3339), time.Now().Format(time.RFC3339),
time.Since(entry.Published), entry.String())
entry.Published = time.Now() // reset published ts to prevent possible out-of-order entries
} else {
log.Printf("[DEBUG] keep published time for %s, %s", entry.VideoID, entry.Published.Format(time.RFC3339))
}
if !strings.Contains(entry.Title, fi.Name) { // if title doesn't contains channel name add it
entry.Title = fi.Name + ": " + entry.Title
}
entry.Duration = s.DurationService.File(file)
log.Printf("[DEBUG] updated entry: %s", entry.String())
return entry
}
// removeOld deletes old entries from store and corresponding files
func (s *Service) removeOld(fi FeedInfo) int {
removed := 0
keep := s.keep(fi)
files, err := s.Store.RemoveOld(fi.ID, keep+1)
if err != nil { // even with error we get a list of files to remove
log.Printf("[WARN] failed to remove some old meta data for %s, %v", fi.ID, err)
}
for _, f := range files {
if e := os.Remove(f); e != nil {
log.Printf("[WARN] failed to remove file %s: %v", f, e)
continue
}
removed++
log.Printf("[INFO] removed %s for %s (%s)", f, fi.ID, fi.Name)
}
return removed
}
func (s *Service) keep(fi FeedInfo) int {
keep := s.KeepPerChannel
if fi.Keep > 0 {
keep = fi.Keep
}
return keep
}
func (s *Service) makeFileName(entry ytfeed.Entry) string {
h := sha1.New()
if _, err := h.Write([]byte(entry.UID())); err != nil {
return uuid.New().String()
}
return fmt.Sprintf("%x", h.Sum(nil))
}
// totalEntriesToKeep returns total number of entries to keep, summing all channels' keep values
func (s *Service) totalEntriesToKeep() (res int) {
for _, fi := range s.Feeds {
res += s.keep(fi)
}
return res
}
// countAllEntries returns total number of entries across all channels, respects keep settings
func (s *Service) countAllEntries() int {
var result int
for _, fi := range s.Feeds {
if entries, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
result += len(entries)
}
}
return result
}
// newestEntry returns the newest entry across all channels, respects keep settings
func (s *Service) newestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, 1); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.After(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
// oldestEntry returns the oldest entry from all channels, respecting keep settings
func (s *Service) oldestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.Before(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
func (s *Service) updateMp3Tags(file string, entry ytfeed.Entry, fi FeedInfo) error {
fh, err := id3v2.Open(file, id3v2.Options{Parse: false})
if err != nil {
return errors.Wrapf(err, "failed to open file %s", file)
}
defer fh.Close()
fh.SetTitle(entry.Title)
fh.SetArtist(entry.Author.Name)
fh.SetAlbum(fi.Name)
fh.SetGenre("podcast")
fh.SetYear(entry.Published.Format("2006"))
fh.AddTextFrame(fh.CommonID("Recording time"), fh.DefaultEncoding(), entry.Published.Format("20060102T150405"))
if err = fh.Save(); err != nil {
return errors.Wrapf(err, "failed to close file %s", file)
}
return nil
}
type stats struct {
entries int
processed int
added int
removed int
ignored int
skipped int
}
func (st stats) String() string {
return fmt.Sprintf("entries: %d, processed: %d, updated: %d, removed: %d, ignored: %d, skipped: %d",
st.entries, st.processed, st.added, st.removed, st.ignored, st.skipped)
}
| {
log.Printf("[DEBUG] skip youtube episodes shorter than %v", s.SkipShorts)
} | conditional_block |
service.go | // Package youtube provides loading audio from video files for given youtube channels
package youtube
import (
"context"
"crypto/sha1"
"encoding/xml"
"fmt"
"os"
"path"
"regexp"
"sort"
"strings"
"time"
"github.com/bogem/id3v2/v2"
log "github.com/go-pkgz/lgr"
"github.com/google/uuid"
"github.com/pkg/errors"
rssfeed "github.com/umputun/feed-master/app/feed"
ytfeed "github.com/umputun/feed-master/app/youtube/feed"
)
//go:generate moq -out mocks/downloader.go -pkg mocks -skip-ensure -fmt goimports . DownloaderService
//go:generate moq -out mocks/channel.go -pkg mocks -skip-ensure -fmt goimports . ChannelService
//go:generate moq -out mocks/store.go -pkg mocks -skip-ensure -fmt goimports . StoreService
//go:generate moq -out mocks/duration.go -pkg mocks -skip-ensure -fmt goimports . DurationService
// Service loads audio from youtube channels
type Service struct {
Feeds []FeedInfo
Downloader DownloaderService
ChannelService ChannelService
Store StoreService
CheckDuration time.Duration
RSSFileStore RSSFileStore
DurationService DurationService
KeepPerChannel int
RootURL string
SkipShorts time.Duration
}
// FeedInfo contains channel or feed ID, readable name and other per-feed info
type FeedInfo struct {
Name string `yaml:"name"`
ID string `yaml:"id"`
Type ytfeed.Type `yaml:"type"`
Keep int `yaml:"keep"`
Language string `yaml:"lang"`
Filter FeedFilter `yaml:"filter"`
}
// FeedFilter contains filter criteria for the feed
type FeedFilter struct {
Include string `yaml:"include"`
Exclude string `yaml:"exclude"`
}
// DownloaderService is an interface for downloading audio from youtube
type DownloaderService interface {
Get(ctx context.Context, id string, fname string) (file string, err error)
}
// ChannelService is an interface for getting channel entries, i.e. the list of videos
type ChannelService interface {
Get(ctx context.Context, chanID string, feedType ytfeed.Type) ([]ytfeed.Entry, error)
}
// StoreService is an interface for storing and loading metadata about downloaded audio
type StoreService interface {
Save(entry ytfeed.Entry) (bool, error)
Load(channelID string, max int) ([]ytfeed.Entry, error)
Exist(entry ytfeed.Entry) (bool, error)
RemoveOld(channelID string, keep int) ([]string, error)
Remove(entry ytfeed.Entry) error
SetProcessed(entry ytfeed.Entry) error
ResetProcessed(entry ytfeed.Entry) error
CheckProcessed(entry ytfeed.Entry) (found bool, ts time.Time, err error)
CountProcessed() (count int)
}
// DurationService is an interface for getting duration of audio file
type DurationService interface {
File(fname string) int
}
// Do is a blocking function that downloads audio from youtube channels and updates metadata
func (s *Service) Do(ctx context.Context) error {
log.Printf("[INFO] starting youtube service")
if s.SkipShorts > 0 {
log.Printf("[DEBUG] skip youtube episodes shorter than %v", s.SkipShorts)
}
for _, f := range s.Feeds {
log.Printf("[INFO] youtube feed %+v", f)
}
tick := time.NewTicker(s.CheckDuration)
defer tick.Stop()
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
}
}
}
// RSSFeed generates RSS feed for given channel
func (s *Service) RSSFeed(fi FeedInfo) (string, error) {
entries, err := s.Store.Load(fi.ID, s.keep(fi))
if err != nil {
return "", errors.Wrap(err, "failed to get channel entries")
}
if len(entries) == 0 {
return "", nil
}
items := []rssfeed.Item{}
for _, entry := range entries {
fileURL := s.RootURL + "/" + path.Base(entry.File)
var fileSize int
if fileInfo, fiErr := os.Stat(entry.File); fiErr != nil {
log.Printf("[WARN] failed to get file size for %s (%s %s): %v", entry.File, entry.VideoID, entry.Title, fiErr)
} else {
fileSize = int(fileInfo.Size())
}
duration := ""
if entry.Duration > 0 {
duration = fmt.Sprintf("%d", entry.Duration)
}
items = append(items, rssfeed.Item{
Title: entry.Title,
Description: entry.Media.Description,
Link: entry.Link.Href,
PubDate: entry.Published.In(time.UTC).Format(time.RFC1123Z),
GUID: entry.ChannelID + "::" + entry.VideoID,
Author: entry.Author.Name,
Enclosure: rssfeed.Enclosure{
URL: fileURL,
Type: "audio/mpeg",
Length: fileSize,
},
Duration: duration,
DT: time.Now(),
})
}
rss := rssfeed.Rss2{
Version: "2.0",
NsItunes: "http://www.itunes.com/dtds/podcast-1.0.dtd",
NsMedia: "http://search.yahoo.com/mrss/",
ItemList: items,
Title: fi.Name,
Description: "generated by feed-master",
Link: entries[0].Author.URI,
PubDate: items[0].PubDate,
LastBuildDate: time.Now().Format(time.RFC1123Z),
Language: fi.Language,
ItunesAuthor: entries[0].Author.Name,
ItunesExplicit: "no",
}
// set image from channel as rss thumbnail
// TODO: we may want to load it locally in case if youtube doesn't like such remote usage of images
if image := entries[0].Media.Thumbnail.URL; image != "" {
rss.ItunesImage = &rssfeed.ItunesImg{URL: image}
rss.MediaThumbnail = &rssfeed.MediaThumbnail{URL: image}
}
if fi.Type == ytfeed.FTPlaylist {
rss.Link = "https://www.youtube.com/playlist?list=" + fi.ID
}
b, err := xml.MarshalIndent(&rss, "", " ")
if err != nil {
return "", errors.Wrap(err, "failed to marshal rss")
}
res := string(b)
// this hack to avoid having different items for marshal and unmarshal due to "itunes" namespace
res = strings.Replace(res, "<duration>", "<itunes:duration>", -1)
res = strings.Replace(res, "</duration>", "</itunes:duration>", -1)
return res, nil
}
// procChannels processes all channels, downloads audio, updates metadata and stores RSS
func (s *Service) procChannels(ctx context.Context) error {
var allStats stats
for _, feedInfo := range s.Feeds {
entries, err := s.ChannelService.Get(ctx, feedInfo.ID, feedInfo.Type)
if err != nil {
log.Printf("[WARN] failed to get channel entries for %s: %s", feedInfo.ID, err)
continue
}
log.Printf("[INFO] got %d entries for %s, limit to %d", len(entries), feedInfo.Name, s.keep(feedInfo))
changed, processed := false, 0
for i, entry := range entries {
// exit right away if context is done
select {
case <-ctx.Done():
return ctx.Err()
default:
}
allStats.entries++
if processed >= s.keep(feedInfo) {
break
}
isAllowed, err := s.isAllowed(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s is relevant", entry.VideoID)
}
if !isAllowed {
log.Printf("[DEBUG] skipping filtered %s", entry.String())
allStats.ignored++
continue
}
ok, err := s.isNew(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s exists", entry.VideoID)
}
if !ok {
allStats.skipped++
processed++
continue
}
// got new entry, but with very old timestamp. skip it if we have already reached max capacity
// (this is to eliminate the initial load) and this entry is older than the oldest one we have.
// Also marks it as processed as we don't want to process it again
oldestEntry := s.oldestEntry()
if entry.Published.Before(oldestEntry.Published) && s.countAllEntries() >= s.totalEntriesToKeep() {
allStats.ignored++
log.Printf("[INFO] skipping entry %s as it is older than the oldest one we have %s",
entry.String(), oldestEntry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
log.Printf("[INFO] new entry [%d] %s, %s, %s, %s", i+1, entry.VideoID, entry.Title, feedInfo.Name, entry.String())
file, downErr := s.Downloader.Get(ctx, entry.VideoID, s.makeFileName(entry))
if downErr != nil {
allStats.ignored++
if downErr == ytfeed.ErrSkip { // downloader decided to skip this entry
log.Printf("[INFO] skipping %s", entry.String())
continue
}
log.Printf("[WARN] failed to download %s: %s", entry.VideoID, downErr)
continue
}
if short, duration := s.isShort(file); short {
allStats.ignored++
log.Printf("[INFO] skip short file %s (%v): %s, %s", file, duration, entry.VideoID, entry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
// update metadata
if tagsErr := s.updateMp3Tags(file, entry, feedInfo); tagsErr != nil {
log.Printf("[WARN] failed to update metadata for %s: %s", entry.VideoID, tagsErr)
}
processed++
fsize := 0
if fi, err := os.Stat(file); err == nil {
fsize = int(fi.Size())
} else {
log.Printf("[WARN] failed to get file size for %s: %v", file, err)
}
log.Printf("[INFO] downloaded %s (%s) to %s, size: %d, channel: %+v", entry.VideoID, entry.Title, file, fsize, feedInfo)
entry = s.update(entry, file, feedInfo)
ok, saveErr := s.Store.Save(entry)
if saveErr != nil {
return errors.Wrapf(saveErr, "failed to save entry %+v", entry)
}
if !ok {
log.Printf("[WARN] attempt to save dup entry %+v", entry)
}
changed = true
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
allStats.added++
log.Printf("[INFO] saved %s (%s) to %s, channel: %+v", entry.VideoID, entry.Title, file, feedInfo)
}
allStats.processed += processed
if changed {
removed := s.removeOld(feedInfo)
allStats.removed += removed
// save rss feed to fs if there are new entries
rss, rssErr := s.RSSFeed(feedInfo)
if rssErr != nil {
log.Printf("[WARN] failed to generate rss for %s: %s", feedInfo.Name, rssErr)
} else {
if err := s.RSSFileStore.Save(feedInfo.ID, rss); err != nil {
log.Printf("[WARN] failed to save rss for %s: %s", feedInfo.Name, err)
}
}
}
}
log.Printf("[INFO] all channels processed - channels: %d, %s, lifetime: %d, feed size: %d",
len(s.Feeds), allStats.String(), s.Store.CountProcessed(), s.countAllEntries())
newestEntry := s.newestEntry()
log.Printf("[INFO] last entry: %s", newestEntry.String())
return nil
}
// StoreRSS saves RSS feed to file
func (s *Service) StoreRSS(chanID, rss string) error {
return s.RSSFileStore.Save(chanID, rss)
}
// RemoveEntry deleted entry from store. Doesn't removes file
func (s *Service) RemoveEntry(entry ytfeed.Entry) error {
if err := s.Store.ResetProcessed(entry); err != nil {
return errors.Wrapf(err, "failed to reset processed entry %s", entry.VideoID)
}
if err := s.Store.Remove(entry); err != nil {
return errors.Wrapf(err, "failed to remove entry %s", entry.VideoID)
}
return nil
}
// isNew checks if entry already processed
func (s *Service) isNew(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
// check if entry already exists in store
// this method won't work after migration to locally altered published ts but have to stay for now
// to avoid false-positives on old entries what never got set with SetProcessed
exists, exErr := s.Store.Exist(entry)
if err != nil {
return false, errors.Wrapf(exErr, "failed to check if entry %s exists", entry.VideoID)
}
if exists {
return false, nil
}
// check if we already processed this entry.
// this is needed to avoid infinite get/remove loop when the original feed is updated in place.
// after migration to locally altered published ts, it is also the primary way to detect already processed entries
found, _, procErr := s.Store.CheckProcessed(entry)
if procErr != nil {
log.Printf("[WARN] can't get processed status for %s, %+v", entry.VideoID, fi)
}
if procErr == nil && found {
return false, nil
}
return true, nil
}
// isAllowed checks if entry matches all filters for the channel feed
func (s *Service) isAllowed(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
matchedIncludeFilter := true
if fi.Filter.Include != "" {
matchedIncludeFilter, err = regexp.MatchString(fi.Filter.Include, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches include filter", entry.VideoID)
}
}
matchedExcludeFilter := false
if fi.Filter.Exclude != "" {
matchedExcludeFilter, err = regexp.MatchString(fi.Filter.Exclude, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches exclude filter", entry.VideoID)
}
}
return matchedIncludeFilter && !matchedExcludeFilter, nil
}
func (s *Service) isShort(file string) (bool, time.Duration) {
if s.SkipShorts.Seconds() > 0 {
// skip shorts if duration is less than SkipShorts
duration := s.DurationService.File(file)
if duration > 0 && duration < int(s.SkipShorts.Seconds()) {
return true, time.Duration(duration) * time.Second
}
}
return false, 0
}
// update sets entry file name and reset published ts
func (s *Service) update(entry ytfeed.Entry, file string, fi FeedInfo) ytfeed.Entry {
entry.File = file
// only reset time if published not too long ago
// this is done to avoid initial set of entries added with a new channel to the top of the feed
if time.Since(entry.Published) < time.Hour*24 {
log.Printf("[DEBUG] reset published time for %s, from %s to %s (%v), %s",
entry.VideoID, entry.Published.Format(time.RFC3339), time.Now().Format(time.RFC3339),
time.Since(entry.Published), entry.String())
entry.Published = time.Now() // reset published ts to prevent possible out-of-order entries
} else {
log.Printf("[DEBUG] keep published time for %s, %s", entry.VideoID, entry.Published.Format(time.RFC3339))
}
if !strings.Contains(entry.Title, fi.Name) { // if title doesn't contains channel name add it
entry.Title = fi.Name + ": " + entry.Title
}
entry.Duration = s.DurationService.File(file)
log.Printf("[DEBUG] updated entry: %s", entry.String())
return entry
}
// removeOld deletes old entries from store and corresponding files
func (s *Service) removeOld(fi FeedInfo) int {
removed := 0
keep := s.keep(fi)
files, err := s.Store.RemoveOld(fi.ID, keep+1)
if err != nil { // even with error we get a list of files to remove
log.Printf("[WARN] failed to remove some old meta data for %s, %v", fi.ID, err)
}
for _, f := range files {
if e := os.Remove(f); e != nil {
log.Printf("[WARN] failed to remove file %s: %v", f, e)
continue
}
removed++
log.Printf("[INFO] removed %s for %s (%s)", f, fi.ID, fi.Name)
}
return removed
}
func (s *Service) keep(fi FeedInfo) int {
keep := s.KeepPerChannel
if fi.Keep > 0 {
keep = fi.Keep
}
return keep
}
func (s *Service) makeFileName(entry ytfeed.Entry) string {
h := sha1.New()
if _, err := h.Write([]byte(entry.UID())); err != nil {
return uuid.New().String()
}
return fmt.Sprintf("%x", h.Sum(nil))
}
// totalEntriesToKeep returns total number of entries to keep, summing all channels' keep values
func (s *Service) | () (res int) {
for _, fi := range s.Feeds {
res += s.keep(fi)
}
return res
}
// countAllEntries returns total number of entries across all channels, respects keep settings
func (s *Service) countAllEntries() int {
var result int
for _, fi := range s.Feeds {
if entries, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
result += len(entries)
}
}
return result
}
// newestEntry returns the newest entry across all channels, respects keep settings
func (s *Service) newestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, 1); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.After(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
// oldestEntry returns the oldest entry from all channels, respecting keep settings
func (s *Service) oldestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.Before(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
func (s *Service) updateMp3Tags(file string, entry ytfeed.Entry, fi FeedInfo) error {
fh, err := id3v2.Open(file, id3v2.Options{Parse: false})
if err != nil {
return errors.Wrapf(err, "failed to open file %s", file)
}
defer fh.Close()
fh.SetTitle(entry.Title)
fh.SetArtist(entry.Author.Name)
fh.SetAlbum(fi.Name)
fh.SetGenre("podcast")
fh.SetYear(entry.Published.Format("2006"))
fh.AddTextFrame(fh.CommonID("Recording time"), fh.DefaultEncoding(), entry.Published.Format("20060102T150405"))
if err = fh.Save(); err != nil {
return errors.Wrapf(err, "failed to close file %s", file)
}
return nil
}
type stats struct {
entries int
processed int
added int
removed int
ignored int
skipped int
}
func (st stats) String() string {
return fmt.Sprintf("entries: %d, processed: %d, updated: %d, removed: %d, ignored: %d, skipped: %d",
st.entries, st.processed, st.added, st.removed, st.ignored, st.skipped)
}
| totalEntriesToKeep | identifier_name |
service.go | // Package youtube provides loading audio from video files for given youtube channels
package youtube
import (
"context"
"crypto/sha1"
"encoding/xml"
"fmt"
"os"
"path"
"regexp"
"sort"
"strings"
"time"
"github.com/bogem/id3v2/v2"
log "github.com/go-pkgz/lgr"
"github.com/google/uuid"
"github.com/pkg/errors"
rssfeed "github.com/umputun/feed-master/app/feed"
ytfeed "github.com/umputun/feed-master/app/youtube/feed"
)
//go:generate moq -out mocks/downloader.go -pkg mocks -skip-ensure -fmt goimports . DownloaderService
//go:generate moq -out mocks/channel.go -pkg mocks -skip-ensure -fmt goimports . ChannelService
//go:generate moq -out mocks/store.go -pkg mocks -skip-ensure -fmt goimports . StoreService
//go:generate moq -out mocks/duration.go -pkg mocks -skip-ensure -fmt goimports . DurationService
// Service loads audio from youtube channels
type Service struct {
Feeds []FeedInfo
Downloader DownloaderService
ChannelService ChannelService
Store StoreService
CheckDuration time.Duration
RSSFileStore RSSFileStore
DurationService DurationService
KeepPerChannel int
RootURL string
SkipShorts time.Duration
}
// FeedInfo contains channel or feed ID, readable name and other per-feed info
type FeedInfo struct {
Name string `yaml:"name"`
ID string `yaml:"id"`
Type ytfeed.Type `yaml:"type"`
Keep int `yaml:"keep"`
Language string `yaml:"lang"`
Filter FeedFilter `yaml:"filter"`
}
// FeedFilter contains filter criteria for the feed
type FeedFilter struct {
Include string `yaml:"include"`
Exclude string `yaml:"exclude"`
}
// DownloaderService is an interface for downloading audio from youtube
type DownloaderService interface {
Get(ctx context.Context, id string, fname string) (file string, err error)
}
// ChannelService is an interface for getting channel entries, i.e. the list of videos
type ChannelService interface {
Get(ctx context.Context, chanID string, feedType ytfeed.Type) ([]ytfeed.Entry, error)
}
// StoreService is an interface for storing and loading metadata about downloaded audio
type StoreService interface {
Save(entry ytfeed.Entry) (bool, error)
Load(channelID string, max int) ([]ytfeed.Entry, error)
Exist(entry ytfeed.Entry) (bool, error)
RemoveOld(channelID string, keep int) ([]string, error)
Remove(entry ytfeed.Entry) error
SetProcessed(entry ytfeed.Entry) error
ResetProcessed(entry ytfeed.Entry) error
CheckProcessed(entry ytfeed.Entry) (found bool, ts time.Time, err error)
CountProcessed() (count int)
}
// DurationService is an interface for getting duration of audio file
type DurationService interface {
File(fname string) int
}
// Do is a blocking function that downloads audio from youtube channels and updates metadata
func (s *Service) Do(ctx context.Context) error {
log.Printf("[INFO] starting youtube service")
if s.SkipShorts > 0 {
log.Printf("[DEBUG] skip youtube episodes shorter than %v", s.SkipShorts)
}
for _, f := range s.Feeds {
log.Printf("[INFO] youtube feed %+v", f)
}
tick := time.NewTicker(s.CheckDuration)
defer tick.Stop()
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-tick.C:
if err := s.procChannels(ctx); err != nil {
return errors.Wrap(err, "failed to process channels")
}
}
}
}
// RSSFeed generates RSS feed for given channel
func (s *Service) RSSFeed(fi FeedInfo) (string, error) {
entries, err := s.Store.Load(fi.ID, s.keep(fi))
if err != nil {
return "", errors.Wrap(err, "failed to get channel entries")
}
if len(entries) == 0 {
return "", nil
}
items := []rssfeed.Item{}
for _, entry := range entries {
fileURL := s.RootURL + "/" + path.Base(entry.File)
var fileSize int
if fileInfo, fiErr := os.Stat(entry.File); fiErr != nil {
log.Printf("[WARN] failed to get file size for %s (%s %s): %v", entry.File, entry.VideoID, entry.Title, fiErr)
} else {
fileSize = int(fileInfo.Size())
}
duration := ""
if entry.Duration > 0 {
duration = fmt.Sprintf("%d", entry.Duration)
}
items = append(items, rssfeed.Item{
Title: entry.Title,
Description: entry.Media.Description,
Link: entry.Link.Href,
PubDate: entry.Published.In(time.UTC).Format(time.RFC1123Z),
GUID: entry.ChannelID + "::" + entry.VideoID,
Author: entry.Author.Name,
Enclosure: rssfeed.Enclosure{
URL: fileURL,
Type: "audio/mpeg",
Length: fileSize,
},
Duration: duration,
DT: time.Now(),
})
}
rss := rssfeed.Rss2{
Version: "2.0",
NsItunes: "http://www.itunes.com/dtds/podcast-1.0.dtd",
NsMedia: "http://search.yahoo.com/mrss/",
ItemList: items,
Title: fi.Name,
Description: "generated by feed-master",
Link: entries[0].Author.URI,
PubDate: items[0].PubDate,
LastBuildDate: time.Now().Format(time.RFC1123Z),
Language: fi.Language,
ItunesAuthor: entries[0].Author.Name,
ItunesExplicit: "no",
}
// set image from channel as rss thumbnail
// TODO: we may want to load it locally in case if youtube doesn't like such remote usage of images
if image := entries[0].Media.Thumbnail.URL; image != "" {
rss.ItunesImage = &rssfeed.ItunesImg{URL: image}
rss.MediaThumbnail = &rssfeed.MediaThumbnail{URL: image}
}
if fi.Type == ytfeed.FTPlaylist {
rss.Link = "https://www.youtube.com/playlist?list=" + fi.ID
}
b, err := xml.MarshalIndent(&rss, "", " ")
if err != nil {
return "", errors.Wrap(err, "failed to marshal rss")
}
res := string(b)
// this hack to avoid having different items for marshal and unmarshal due to "itunes" namespace
res = strings.Replace(res, "<duration>", "<itunes:duration>", -1)
res = strings.Replace(res, "</duration>", "</itunes:duration>", -1)
return res, nil
}
// procChannels processes all channels, downloads audio, updates metadata and stores RSS
func (s *Service) procChannels(ctx context.Context) error {
var allStats stats
for _, feedInfo := range s.Feeds {
entries, err := s.ChannelService.Get(ctx, feedInfo.ID, feedInfo.Type)
if err != nil {
log.Printf("[WARN] failed to get channel entries for %s: %s", feedInfo.ID, err)
continue
}
log.Printf("[INFO] got %d entries for %s, limit to %d", len(entries), feedInfo.Name, s.keep(feedInfo))
changed, processed := false, 0
for i, entry := range entries {
// exit right away if context is done
select {
case <-ctx.Done():
return ctx.Err()
default:
}
allStats.entries++
if processed >= s.keep(feedInfo) {
break
}
isAllowed, err := s.isAllowed(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s is relevant", entry.VideoID)
}
if !isAllowed {
log.Printf("[DEBUG] skipping filtered %s", entry.String())
allStats.ignored++
continue
}
ok, err := s.isNew(entry, feedInfo)
if err != nil {
return errors.Wrapf(err, "failed to check if entry %s exists", entry.VideoID)
}
if !ok {
allStats.skipped++
processed++
continue
}
// got new entry, but with very old timestamp. skip it if we have already reached max capacity
// (this is to eliminate the initial load) and this entry is older than the oldest one we have.
// Also marks it as processed as we don't want to process it again
oldestEntry := s.oldestEntry()
if entry.Published.Before(oldestEntry.Published) && s.countAllEntries() >= s.totalEntriesToKeep() {
allStats.ignored++
log.Printf("[INFO] skipping entry %s as it is older than the oldest one we have %s",
entry.String(), oldestEntry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
log.Printf("[INFO] new entry [%d] %s, %s, %s, %s", i+1, entry.VideoID, entry.Title, feedInfo.Name, entry.String())
file, downErr := s.Downloader.Get(ctx, entry.VideoID, s.makeFileName(entry))
if downErr != nil {
allStats.ignored++
if downErr == ytfeed.ErrSkip { // downloader decided to skip this entry
log.Printf("[INFO] skipping %s", entry.String())
continue
}
log.Printf("[WARN] failed to download %s: %s", entry.VideoID, downErr)
continue
}
if short, duration := s.isShort(file); short {
allStats.ignored++
log.Printf("[INFO] skip short file %s (%v): %s, %s", file, duration, entry.VideoID, entry.String())
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
continue
}
// update metadata
if tagsErr := s.updateMp3Tags(file, entry, feedInfo); tagsErr != nil {
log.Printf("[WARN] failed to update metadata for %s: %s", entry.VideoID, tagsErr)
}
processed++
fsize := 0
if fi, err := os.Stat(file); err == nil {
fsize = int(fi.Size())
} else {
log.Printf("[WARN] failed to get file size for %s: %v", file, err)
}
log.Printf("[INFO] downloaded %s (%s) to %s, size: %d, channel: %+v", entry.VideoID, entry.Title, file, fsize, feedInfo)
entry = s.update(entry, file, feedInfo)
ok, saveErr := s.Store.Save(entry)
if saveErr != nil {
return errors.Wrapf(saveErr, "failed to save entry %+v", entry)
}
if !ok {
log.Printf("[WARN] attempt to save dup entry %+v", entry)
}
changed = true
if procErr := s.Store.SetProcessed(entry); procErr != nil {
log.Printf("[WARN] failed to set processed status for %s: %v", entry.VideoID, procErr)
}
allStats.added++
log.Printf("[INFO] saved %s (%s) to %s, channel: %+v", entry.VideoID, entry.Title, file, feedInfo)
}
allStats.processed += processed
if changed {
removed := s.removeOld(feedInfo)
allStats.removed += removed
// save rss feed to fs if there are new entries
rss, rssErr := s.RSSFeed(feedInfo)
if rssErr != nil {
log.Printf("[WARN] failed to generate rss for %s: %s", feedInfo.Name, rssErr)
} else {
if err := s.RSSFileStore.Save(feedInfo.ID, rss); err != nil {
log.Printf("[WARN] failed to save rss for %s: %s", feedInfo.Name, err)
}
}
}
}
log.Printf("[INFO] all channels processed - channels: %d, %s, lifetime: %d, feed size: %d",
len(s.Feeds), allStats.String(), s.Store.CountProcessed(), s.countAllEntries())
newestEntry := s.newestEntry()
log.Printf("[INFO] last entry: %s", newestEntry.String())
return nil
}
// StoreRSS saves RSS feed to file
func (s *Service) StoreRSS(chanID, rss string) error {
return s.RSSFileStore.Save(chanID, rss)
}
// RemoveEntry deleted entry from store. Doesn't removes file
func (s *Service) RemoveEntry(entry ytfeed.Entry) error {
if err := s.Store.ResetProcessed(entry); err != nil {
return errors.Wrapf(err, "failed to reset processed entry %s", entry.VideoID)
}
if err := s.Store.Remove(entry); err != nil {
return errors.Wrapf(err, "failed to remove entry %s", entry.VideoID)
}
return nil
}
// isNew checks if entry already processed
func (s *Service) isNew(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
// check if entry already exists in store
// this method won't work after migration to locally altered published ts but have to stay for now
// to avoid false-positives on old entries what never got set with SetProcessed
exists, exErr := s.Store.Exist(entry)
if err != nil {
return false, errors.Wrapf(exErr, "failed to check if entry %s exists", entry.VideoID)
}
if exists {
return false, nil
}
// check if we already processed this entry.
// this is needed to avoid infinite get/remove loop when the original feed is updated in place.
// after migration to locally altered published ts, it is also the primary way to detect already processed entries
found, _, procErr := s.Store.CheckProcessed(entry)
if procErr != nil {
log.Printf("[WARN] can't get processed status for %s, %+v", entry.VideoID, fi)
}
if procErr == nil && found {
return false, nil
}
return true, nil
}
// isAllowed checks if entry matches all filters for the channel feed
func (s *Service) isAllowed(entry ytfeed.Entry, fi FeedInfo) (ok bool, err error) {
matchedIncludeFilter := true
if fi.Filter.Include != "" {
matchedIncludeFilter, err = regexp.MatchString(fi.Filter.Include, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches include filter", entry.VideoID)
}
}
matchedExcludeFilter := false
if fi.Filter.Exclude != "" {
matchedExcludeFilter, err = regexp.MatchString(fi.Filter.Exclude, entry.Title)
if err != nil {
return false, errors.Wrapf(err, "failed to check if entry %s matches exclude filter", entry.VideoID)
}
}
return matchedIncludeFilter && !matchedExcludeFilter, nil
}
func (s *Service) isShort(file string) (bool, time.Duration) {
if s.SkipShorts.Seconds() > 0 {
// skip shorts if duration is less than SkipShorts
duration := s.DurationService.File(file)
if duration > 0 && duration < int(s.SkipShorts.Seconds()) {
return true, time.Duration(duration) * time.Second
}
}
return false, 0
}
// update sets entry file name and reset published ts
func (s *Service) update(entry ytfeed.Entry, file string, fi FeedInfo) ytfeed.Entry {
entry.File = file
// only reset time if published not too long ago
// this is done to avoid initial set of entries added with a new channel to the top of the feed
if time.Since(entry.Published) < time.Hour*24 {
log.Printf("[DEBUG] reset published time for %s, from %s to %s (%v), %s",
entry.VideoID, entry.Published.Format(time.RFC3339), time.Now().Format(time.RFC3339),
time.Since(entry.Published), entry.String())
entry.Published = time.Now() // reset published ts to prevent possible out-of-order entries
} else {
log.Printf("[DEBUG] keep published time for %s, %s", entry.VideoID, entry.Published.Format(time.RFC3339))
}
if !strings.Contains(entry.Title, fi.Name) { // if title doesn't contains channel name add it
entry.Title = fi.Name + ": " + entry.Title
}
entry.Duration = s.DurationService.File(file) | return entry
}
// removeOld deletes old entries from store and corresponding files
func (s *Service) removeOld(fi FeedInfo) int {
removed := 0
keep := s.keep(fi)
files, err := s.Store.RemoveOld(fi.ID, keep+1)
if err != nil { // even with error we get a list of files to remove
log.Printf("[WARN] failed to remove some old meta data for %s, %v", fi.ID, err)
}
for _, f := range files {
if e := os.Remove(f); e != nil {
log.Printf("[WARN] failed to remove file %s: %v", f, e)
continue
}
removed++
log.Printf("[INFO] removed %s for %s (%s)", f, fi.ID, fi.Name)
}
return removed
}
func (s *Service) keep(fi FeedInfo) int {
keep := s.KeepPerChannel
if fi.Keep > 0 {
keep = fi.Keep
}
return keep
}
func (s *Service) makeFileName(entry ytfeed.Entry) string {
h := sha1.New()
if _, err := h.Write([]byte(entry.UID())); err != nil {
return uuid.New().String()
}
return fmt.Sprintf("%x", h.Sum(nil))
}
// totalEntriesToKeep returns total number of entries to keep, summing all channels' keep values
func (s *Service) totalEntriesToKeep() (res int) {
for _, fi := range s.Feeds {
res += s.keep(fi)
}
return res
}
// countAllEntries returns total number of entries across all channels, respects keep settings
func (s *Service) countAllEntries() int {
var result int
for _, fi := range s.Feeds {
if entries, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
result += len(entries)
}
}
return result
}
// newestEntry returns the newest entry across all channels, respects keep settings
func (s *Service) newestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, 1); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.After(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
// oldestEntry returns the oldest entry from all channels, respecting keep settings
func (s *Service) oldestEntry() ytfeed.Entry {
entries := []ytfeed.Entry{}
for _, fi := range s.Feeds {
if recs, err := s.Store.Load(fi.ID, s.keep(fi)); err == nil {
entries = append(entries, recs...)
}
}
sort.Slice(entries, func(i, j int) bool {
return entries[i].Published.Before(entries[j].Published)
})
if len(entries) == 0 {
return ytfeed.Entry{}
}
return entries[0]
}
func (s *Service) updateMp3Tags(file string, entry ytfeed.Entry, fi FeedInfo) error {
fh, err := id3v2.Open(file, id3v2.Options{Parse: false})
if err != nil {
return errors.Wrapf(err, "failed to open file %s", file)
}
defer fh.Close()
fh.SetTitle(entry.Title)
fh.SetArtist(entry.Author.Name)
fh.SetAlbum(fi.Name)
fh.SetGenre("podcast")
fh.SetYear(entry.Published.Format("2006"))
fh.AddTextFrame(fh.CommonID("Recording time"), fh.DefaultEncoding(), entry.Published.Format("20060102T150405"))
if err = fh.Save(); err != nil {
return errors.Wrapf(err, "failed to close file %s", file)
}
return nil
}
type stats struct {
entries int
processed int
added int
removed int
ignored int
skipped int
}
func (st stats) String() string {
return fmt.Sprintf("entries: %d, processed: %d, updated: %d, removed: %d, ignored: %d, skipped: %d",
st.entries, st.processed, st.added, st.removed, st.ignored, st.skipped)
} | log.Printf("[DEBUG] updated entry: %s", entry.String()) | random_line_split |
mod.rs | use std::mem;
use std::time::SystemTime;
#[cfg(feature="dynamic_mem")]
const MAX_MEMORY_SLOTS: usize = 1024 * 1024 * 2;
#[cfg(not(feature="dynamic_mem"))]
const MAX_MEMORY_SLOTS: usize = 1024 * 128;
type Bits = u128;
const MARK_BITS_PER_SLOT: usize = mem::size_of::<Bits>();
const MARK_BITS: usize = MAX_MEMORY_SLOTS / MARK_BITS_PER_SLOT;
#[cfg(feature="dynamic_mem")]
type Mem = Vec<usize>;
#[cfg(not(feature="dynamic_mem"))]
type Mem = [usize; MAX_MEMORY_SLOTS] ;
pub const OBJECT_HEADER_SLOTS: usize = 1;
pub struct Memory {
head: usize,
mem: Mem,
mark_bits: [u128; MARK_BITS],
roots: Vec<usize>,
gc_count: usize,
allocates: usize,
last_gc_ms: u128,
total_gc_ms: u128,
lastgc_live_mem: usize,
lastgc_free_mem: usize,
show_gc: bool,
show_allocates: bool,
show_heap_map: bool,
show_free_list: bool,
}
impl<'a> IntoIterator for &'a Memory {
type Item = usize;
type IntoIter = MemoryIntoIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
MemoryIntoIterator {
mem: self,
scan: 0,
free: 0,
}
}
}
pub struct MemoryIntoIterator<'a> {
mem: &'a Memory,
scan: usize,
free: usize,
}
impl<'a> Iterator for MemoryIntoIterator<'a> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.scan == 0 {
self.scan = 1;
self.free = self.mem.head;
} else {
self.scan = self.mem.next_object_in_heap(self.scan);
}
while self.scan == self.free {
self.scan = self.mem.next_object_in_heap(self.free);
self.free = self.mem.get_fl_next(self.free);
}
if self.scan >= MAX_MEMORY_SLOTS - 1 {
return None;
} else {
return Some(self.scan);
}
}
}
#[cfg(feature = "dynamic_mem")]
fn im() -> Mem {
return vec![0; MAX_MEMORY_SLOTS];
}
#[cfg(not(feature = "dynamic_mem"))]
fn im() -> Mem {
return [0; MAX_MEMORY_SLOTS];
}
impl Memory {
pub fn initialze_memory() -> Memory {
let mut mem = Memory {
head: 1,
mem: im(),
mark_bits: [0; MARK_BITS],
roots: Vec::new(),
gc_count: 0,
allocates: 0,
lastgc_live_mem: 0,
lastgc_free_mem: 0,
last_gc_ms: 0,
total_gc_ms: 0,
show_gc: false,
show_allocates: false,
show_heap_map: false,
show_free_list: false,
};
mem.set_size(0, MAX_MEMORY_SLOTS); // magic memory at zero is heap_size
mem.set_size(mem.head, MAX_MEMORY_SLOTS - 2); // set initial object size as all heap
mem.set_fl_next(mem.head, 0);
mem
}
// objects API
// allocate_object (size) --- size is number of indexable slots
// add/remote_root () --- add to or remove from gc root set.
// element_size() - number of indexable slots - get_size() - OBJECT_HEADER_SLOTS
// at_put - store into object slot at index
// at -- fetch object slot at index
pub fn allocate_object(&mut self, unrounded_size: usize) -> usize {
self.allocates += 1;
let mut result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.gc();
result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.print_freelist();
self.print_heap();
panic!("out of memory");
}
}
result
}
pub fn live_objects(&self) -> MemoryIntoIterator {
return self.into_iter();
}
pub fn add_root(&mut self, obj: usize) {
self.roots.push(obj);
}
pub fn remove_root(&mut self, obj: usize) {
for i in 0..self.roots.len() {
if obj == self.roots[i] {
self.roots.remove(i);
return;
}
}
}
pub fn at_put(&mut self, obj: usize, index: usize, value: usize) {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&mut self.mem[ base.. base + slots ];
object[index] = value;
}
pub fn at(&self, obj: usize, index: usize) -> usize {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&self.mem[ base.. base + slots ];
return object[index];
}
pub fn element_size(&self, obj: usize) -> usize {
return self.mem[obj] - OBJECT_HEADER_SLOTS;
}
pub fn enable_show_heap_map(&mut self, enabled: bool) {
self.show_heap_map = enabled;
}
pub fn enable_show_freelist(&mut self, enabled: bool) {
self.show_free_list = enabled;
}
pub fn enable_show_gc(&mut self, enabled: bool) {
self.show_gc = enabled;
}
pub fn enable_show_allocates(&mut self, enabled: bool) {
self.show_allocates = enabled;
}
fn rounded_size(unrounded_size: usize) -> usize {
(unrounded_size + 1) & !(1) // rounded to 2
}
fn get_size(&self, obj: usize) -> usize {
return self.mem[obj];
}
fn set_size(&mut self, obj: usize, size: usize) {
self.mem[obj] = size;
}
fn next_object_in_heap(&self, obj: usize) -> usize {
return obj + self.get_size(obj);
}
//free list is linked off the first slot
fn get_fl_next(&self, obj: usize) -> usize {
return self.mem[obj + 1];
}
fn | (&mut self, obj: usize, next: usize) {
self.mem[obj + 1] = next;
}
fn mark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] |= 1 << (obj % MARK_BITS_PER_SLOT);
}
fn unmark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] &= !(1 << (obj % MARK_BITS_PER_SLOT));
}
fn is_marked(&self, obj: usize) -> bool {
(self.mark_bits[obj / MARK_BITS_PER_SLOT] & (1 << (obj % MARK_BITS_PER_SLOT))) != 0
}
fn allocate_object_nocompress(&mut self, unrounded_size: usize) -> usize {
let size = Memory::rounded_size(unrounded_size + OBJECT_HEADER_SLOTS);
let mut free = self.head;
while free != 0 {
let avail = self.get_size(free);
if avail > size {
let newsize = avail - size;
if newsize < 2 {
panic!("remaining size is less than 2");
}
// shrink current free to smaller size
self.set_size(free, newsize);
// new object is on the end of current free object
let new_object = free + newsize;
self.set_size(new_object, size);
for index in 0..self.element_size(new_object) {
self.at_put(new_object, index, 0);
}
if self.show_allocates {
println!(
"Success: allocate_object returning -> {} size {}",
new_object, size
);
}
if self.head != free {
if self.show_allocates {
println!("Reset head past intermediate free blocks \n");
let mut show = self.head;
while show != free {
println!("Abandon {} size {}\n", show, self.get_size(show));
show = self.get_fl_next(show);
}
}
self.head = free;
}
return new_object;
}
free = self.get_fl_next(free);
}
0
}
pub fn gc(&mut self) {
let start = SystemTime::now();
for i in 0..self.roots.len() {
self.mark_and_scan(self.roots[i]);
}
self.sweep();
self.gc_count += 1;
if self.show_gc {
self.print_gc_stats();
}
match start.elapsed() {
Ok(elapsed) => {
self.last_gc_ms = elapsed.as_millis();
self.total_gc_ms += self.last_gc_ms;
}
Err(e) => {
println!("Error: {:?}", e);
}
}
}
fn sweep(&mut self) {
let mut scan = 1;
self.head = 0;
let mut tail = self.head;
self.lastgc_free_mem = 0;
self.lastgc_live_mem = 0;
while scan < MAX_MEMORY_SLOTS - 1 {
if self.is_marked(scan) {
self.unmark_object(scan);
self.lastgc_live_mem += self.get_size(scan);
} else {
self.lastgc_free_mem += self.get_size(scan);
if tail == 0 {
self.head = scan;
self.set_fl_next(scan, 0);
tail = scan;
} else {
if self.next_object_in_heap(tail) == scan {
self.set_size(tail, self.get_size(tail) + self.get_size(scan));
} else {
self.set_fl_next(tail, scan);
self.set_fl_next(scan, 0);
tail = scan;
}
}
}
scan = self.next_object_in_heap(scan);
}
if self.show_free_list {
self.print_freelist();
}
if self.show_heap_map {
self.print_heap();
}
}
fn mark_and_scan(&mut self, object: usize) {
if object == 0 || self.is_marked(object) {
return;
}
let slots = self.get_size(object);
self.mark_object(object);
for i in OBJECT_HEADER_SLOTS..slots {
self.mark_and_scan(self.mem[object + i]);
}
}
pub fn print_gc_stats(&self) {
println!(
"{} gcs, {} object allocates, Last GC: Live {} Dead {} in {} ms, Lifetime GC {} ms\n",
self.gc_count,
self.allocates,
self.lastgc_live_mem,
self.lastgc_free_mem,
self.last_gc_ms,
self.total_gc_ms,
);
}
fn print_heap(&mut self) {
print!("\x1B[{};{}H", 1, 1);
let mut scan = 1;
let mut count = 0;
let mut free = self.head;
while scan < MAX_MEMORY_SLOTS - 1 {
// skip free ones, print x's //
let mut num_chars_to_print = 0;
let mut char_to_print = '?';
if scan == free {
while scan == free {
char_to_print = 'x';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(free);
free = self.get_fl_next(free);
}
} else {
char_to_print = '.';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(scan);
}
for _i in 1..num_chars_to_print / 2 {
print!("{}", char_to_print);
count += 1;
if count % 120 == 0 {
print!("\n");
}
}
}
self.print_gc_stats();
}
pub fn print_freelist(&mut self) {
println!("\nprint_freelist: Head = {}", self.head);
let mut free = self.head;
let mut count = 0;
let mut total_free = 0;
while free != 0 {
let size = self.get_size(free);
let next = self.get_fl_next(free);
total_free += self.get_size(free);
println!("{}: Free = {} {} slots next = {}", count, free, size, next);
free = next;
count += 1;
if count > MAX_MEMORY_SLOTS {
panic!()
}
}
println!(
"print_freelist {} elements, total free = {}\n",
count, total_free
);
}
}
| set_fl_next | identifier_name |
mod.rs | use std::mem;
use std::time::SystemTime; | #[cfg(not(feature="dynamic_mem"))]
const MAX_MEMORY_SLOTS: usize = 1024 * 128;
type Bits = u128;
const MARK_BITS_PER_SLOT: usize = mem::size_of::<Bits>();
const MARK_BITS: usize = MAX_MEMORY_SLOTS / MARK_BITS_PER_SLOT;
#[cfg(feature="dynamic_mem")]
type Mem = Vec<usize>;
#[cfg(not(feature="dynamic_mem"))]
type Mem = [usize; MAX_MEMORY_SLOTS] ;
pub const OBJECT_HEADER_SLOTS: usize = 1;
pub struct Memory {
head: usize,
mem: Mem,
mark_bits: [u128; MARK_BITS],
roots: Vec<usize>,
gc_count: usize,
allocates: usize,
last_gc_ms: u128,
total_gc_ms: u128,
lastgc_live_mem: usize,
lastgc_free_mem: usize,
show_gc: bool,
show_allocates: bool,
show_heap_map: bool,
show_free_list: bool,
}
impl<'a> IntoIterator for &'a Memory {
type Item = usize;
type IntoIter = MemoryIntoIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
MemoryIntoIterator {
mem: self,
scan: 0,
free: 0,
}
}
}
pub struct MemoryIntoIterator<'a> {
mem: &'a Memory,
scan: usize,
free: usize,
}
impl<'a> Iterator for MemoryIntoIterator<'a> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.scan == 0 {
self.scan = 1;
self.free = self.mem.head;
} else {
self.scan = self.mem.next_object_in_heap(self.scan);
}
while self.scan == self.free {
self.scan = self.mem.next_object_in_heap(self.free);
self.free = self.mem.get_fl_next(self.free);
}
if self.scan >= MAX_MEMORY_SLOTS - 1 {
return None;
} else {
return Some(self.scan);
}
}
}
#[cfg(feature = "dynamic_mem")]
fn im() -> Mem {
return vec![0; MAX_MEMORY_SLOTS];
}
#[cfg(not(feature = "dynamic_mem"))]
fn im() -> Mem {
return [0; MAX_MEMORY_SLOTS];
}
impl Memory {
pub fn initialze_memory() -> Memory {
let mut mem = Memory {
head: 1,
mem: im(),
mark_bits: [0; MARK_BITS],
roots: Vec::new(),
gc_count: 0,
allocates: 0,
lastgc_live_mem: 0,
lastgc_free_mem: 0,
last_gc_ms: 0,
total_gc_ms: 0,
show_gc: false,
show_allocates: false,
show_heap_map: false,
show_free_list: false,
};
mem.set_size(0, MAX_MEMORY_SLOTS); // magic memory at zero is heap_size
mem.set_size(mem.head, MAX_MEMORY_SLOTS - 2); // set initial object size as all heap
mem.set_fl_next(mem.head, 0);
mem
}
// objects API
// allocate_object (size) --- size is number of indexable slots
// add/remote_root () --- add to or remove from gc root set.
// element_size() - number of indexable slots - get_size() - OBJECT_HEADER_SLOTS
// at_put - store into object slot at index
// at -- fetch object slot at index
pub fn allocate_object(&mut self, unrounded_size: usize) -> usize {
self.allocates += 1;
let mut result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.gc();
result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.print_freelist();
self.print_heap();
panic!("out of memory");
}
}
result
}
pub fn live_objects(&self) -> MemoryIntoIterator {
return self.into_iter();
}
pub fn add_root(&mut self, obj: usize) {
self.roots.push(obj);
}
pub fn remove_root(&mut self, obj: usize) {
for i in 0..self.roots.len() {
if obj == self.roots[i] {
self.roots.remove(i);
return;
}
}
}
pub fn at_put(&mut self, obj: usize, index: usize, value: usize) {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&mut self.mem[ base.. base + slots ];
object[index] = value;
}
pub fn at(&self, obj: usize, index: usize) -> usize {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&self.mem[ base.. base + slots ];
return object[index];
}
pub fn element_size(&self, obj: usize) -> usize {
return self.mem[obj] - OBJECT_HEADER_SLOTS;
}
pub fn enable_show_heap_map(&mut self, enabled: bool) {
self.show_heap_map = enabled;
}
pub fn enable_show_freelist(&mut self, enabled: bool) {
self.show_free_list = enabled;
}
pub fn enable_show_gc(&mut self, enabled: bool) {
self.show_gc = enabled;
}
pub fn enable_show_allocates(&mut self, enabled: bool) {
self.show_allocates = enabled;
}
fn rounded_size(unrounded_size: usize) -> usize {
(unrounded_size + 1) & !(1) // rounded to 2
}
fn get_size(&self, obj: usize) -> usize {
return self.mem[obj];
}
fn set_size(&mut self, obj: usize, size: usize) {
self.mem[obj] = size;
}
fn next_object_in_heap(&self, obj: usize) -> usize {
return obj + self.get_size(obj);
}
//free list is linked off the first slot
fn get_fl_next(&self, obj: usize) -> usize {
return self.mem[obj + 1];
}
fn set_fl_next(&mut self, obj: usize, next: usize) {
self.mem[obj + 1] = next;
}
fn mark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] |= 1 << (obj % MARK_BITS_PER_SLOT);
}
fn unmark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] &= !(1 << (obj % MARK_BITS_PER_SLOT));
}
fn is_marked(&self, obj: usize) -> bool {
(self.mark_bits[obj / MARK_BITS_PER_SLOT] & (1 << (obj % MARK_BITS_PER_SLOT))) != 0
}
fn allocate_object_nocompress(&mut self, unrounded_size: usize) -> usize {
let size = Memory::rounded_size(unrounded_size + OBJECT_HEADER_SLOTS);
let mut free = self.head;
while free != 0 {
let avail = self.get_size(free);
if avail > size {
let newsize = avail - size;
if newsize < 2 {
panic!("remaining size is less than 2");
}
// shrink current free to smaller size
self.set_size(free, newsize);
// new object is on the end of current free object
let new_object = free + newsize;
self.set_size(new_object, size);
for index in 0..self.element_size(new_object) {
self.at_put(new_object, index, 0);
}
if self.show_allocates {
println!(
"Success: allocate_object returning -> {} size {}",
new_object, size
);
}
if self.head != free {
if self.show_allocates {
println!("Reset head past intermediate free blocks \n");
let mut show = self.head;
while show != free {
println!("Abandon {} size {}\n", show, self.get_size(show));
show = self.get_fl_next(show);
}
}
self.head = free;
}
return new_object;
}
free = self.get_fl_next(free);
}
0
}
pub fn gc(&mut self) {
let start = SystemTime::now();
for i in 0..self.roots.len() {
self.mark_and_scan(self.roots[i]);
}
self.sweep();
self.gc_count += 1;
if self.show_gc {
self.print_gc_stats();
}
match start.elapsed() {
Ok(elapsed) => {
self.last_gc_ms = elapsed.as_millis();
self.total_gc_ms += self.last_gc_ms;
}
Err(e) => {
println!("Error: {:?}", e);
}
}
}
fn sweep(&mut self) {
let mut scan = 1;
self.head = 0;
let mut tail = self.head;
self.lastgc_free_mem = 0;
self.lastgc_live_mem = 0;
while scan < MAX_MEMORY_SLOTS - 1 {
if self.is_marked(scan) {
self.unmark_object(scan);
self.lastgc_live_mem += self.get_size(scan);
} else {
self.lastgc_free_mem += self.get_size(scan);
if tail == 0 {
self.head = scan;
self.set_fl_next(scan, 0);
tail = scan;
} else {
if self.next_object_in_heap(tail) == scan {
self.set_size(tail, self.get_size(tail) + self.get_size(scan));
} else {
self.set_fl_next(tail, scan);
self.set_fl_next(scan, 0);
tail = scan;
}
}
}
scan = self.next_object_in_heap(scan);
}
if self.show_free_list {
self.print_freelist();
}
if self.show_heap_map {
self.print_heap();
}
}
fn mark_and_scan(&mut self, object: usize) {
if object == 0 || self.is_marked(object) {
return;
}
let slots = self.get_size(object);
self.mark_object(object);
for i in OBJECT_HEADER_SLOTS..slots {
self.mark_and_scan(self.mem[object + i]);
}
}
pub fn print_gc_stats(&self) {
println!(
"{} gcs, {} object allocates, Last GC: Live {} Dead {} in {} ms, Lifetime GC {} ms\n",
self.gc_count,
self.allocates,
self.lastgc_live_mem,
self.lastgc_free_mem,
self.last_gc_ms,
self.total_gc_ms,
);
}
fn print_heap(&mut self) {
print!("\x1B[{};{}H", 1, 1);
let mut scan = 1;
let mut count = 0;
let mut free = self.head;
while scan < MAX_MEMORY_SLOTS - 1 {
// skip free ones, print x's //
let mut num_chars_to_print = 0;
let mut char_to_print = '?';
if scan == free {
while scan == free {
char_to_print = 'x';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(free);
free = self.get_fl_next(free);
}
} else {
char_to_print = '.';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(scan);
}
for _i in 1..num_chars_to_print / 2 {
print!("{}", char_to_print);
count += 1;
if count % 120 == 0 {
print!("\n");
}
}
}
self.print_gc_stats();
}
pub fn print_freelist(&mut self) {
println!("\nprint_freelist: Head = {}", self.head);
let mut free = self.head;
let mut count = 0;
let mut total_free = 0;
while free != 0 {
let size = self.get_size(free);
let next = self.get_fl_next(free);
total_free += self.get_size(free);
println!("{}: Free = {} {} slots next = {}", count, free, size, next);
free = next;
count += 1;
if count > MAX_MEMORY_SLOTS {
panic!()
}
}
println!(
"print_freelist {} elements, total free = {}\n",
count, total_free
);
}
} |
#[cfg(feature="dynamic_mem")]
const MAX_MEMORY_SLOTS: usize = 1024 * 1024 * 2; | random_line_split |
mod.rs | use std::mem;
use std::time::SystemTime;
#[cfg(feature="dynamic_mem")]
const MAX_MEMORY_SLOTS: usize = 1024 * 1024 * 2;
#[cfg(not(feature="dynamic_mem"))]
const MAX_MEMORY_SLOTS: usize = 1024 * 128;
type Bits = u128;
const MARK_BITS_PER_SLOT: usize = mem::size_of::<Bits>();
const MARK_BITS: usize = MAX_MEMORY_SLOTS / MARK_BITS_PER_SLOT;
#[cfg(feature="dynamic_mem")]
type Mem = Vec<usize>;
#[cfg(not(feature="dynamic_mem"))]
type Mem = [usize; MAX_MEMORY_SLOTS] ;
pub const OBJECT_HEADER_SLOTS: usize = 1;
pub struct Memory {
head: usize,
mem: Mem,
mark_bits: [u128; MARK_BITS],
roots: Vec<usize>,
gc_count: usize,
allocates: usize,
last_gc_ms: u128,
total_gc_ms: u128,
lastgc_live_mem: usize,
lastgc_free_mem: usize,
show_gc: bool,
show_allocates: bool,
show_heap_map: bool,
show_free_list: bool,
}
impl<'a> IntoIterator for &'a Memory {
type Item = usize;
type IntoIter = MemoryIntoIterator<'a>;
fn into_iter(self) -> Self::IntoIter {
MemoryIntoIterator {
mem: self,
scan: 0,
free: 0,
}
}
}
pub struct MemoryIntoIterator<'a> {
mem: &'a Memory,
scan: usize,
free: usize,
}
impl<'a> Iterator for MemoryIntoIterator<'a> {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.scan == 0 {
self.scan = 1;
self.free = self.mem.head;
} else {
self.scan = self.mem.next_object_in_heap(self.scan);
}
while self.scan == self.free {
self.scan = self.mem.next_object_in_heap(self.free);
self.free = self.mem.get_fl_next(self.free);
}
if self.scan >= MAX_MEMORY_SLOTS - 1 {
return None;
} else {
return Some(self.scan);
}
}
}
#[cfg(feature = "dynamic_mem")]
fn im() -> Mem {
return vec![0; MAX_MEMORY_SLOTS];
}
#[cfg(not(feature = "dynamic_mem"))]
fn im() -> Mem {
return [0; MAX_MEMORY_SLOTS];
}
impl Memory {
pub fn initialze_memory() -> Memory {
let mut mem = Memory {
head: 1,
mem: im(),
mark_bits: [0; MARK_BITS],
roots: Vec::new(),
gc_count: 0,
allocates: 0,
lastgc_live_mem: 0,
lastgc_free_mem: 0,
last_gc_ms: 0,
total_gc_ms: 0,
show_gc: false,
show_allocates: false,
show_heap_map: false,
show_free_list: false,
};
mem.set_size(0, MAX_MEMORY_SLOTS); // magic memory at zero is heap_size
mem.set_size(mem.head, MAX_MEMORY_SLOTS - 2); // set initial object size as all heap
mem.set_fl_next(mem.head, 0);
mem
}
// objects API
// allocate_object (size) --- size is number of indexable slots
// add/remote_root () --- add to or remove from gc root set.
// element_size() - number of indexable slots - get_size() - OBJECT_HEADER_SLOTS
// at_put - store into object slot at index
// at -- fetch object slot at index
pub fn allocate_object(&mut self, unrounded_size: usize) -> usize {
self.allocates += 1;
let mut result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.gc();
result = self.allocate_object_nocompress(unrounded_size);
if result == 0 {
self.print_freelist();
self.print_heap();
panic!("out of memory");
}
}
result
}
pub fn live_objects(&self) -> MemoryIntoIterator {
return self.into_iter();
}
pub fn add_root(&mut self, obj: usize) {
self.roots.push(obj);
}
pub fn remove_root(&mut self, obj: usize) {
for i in 0..self.roots.len() {
if obj == self.roots[i] {
self.roots.remove(i);
return;
}
}
}
pub fn at_put(&mut self, obj: usize, index: usize, value: usize) {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&mut self.mem[ base.. base + slots ];
object[index] = value;
}
pub fn at(&self, obj: usize, index: usize) -> usize {
let slots = self.mem[obj];
let base = obj+OBJECT_HEADER_SLOTS;
let object =&self.mem[ base.. base + slots ];
return object[index];
}
pub fn element_size(&self, obj: usize) -> usize {
return self.mem[obj] - OBJECT_HEADER_SLOTS;
}
pub fn enable_show_heap_map(&mut self, enabled: bool) {
self.show_heap_map = enabled;
}
pub fn enable_show_freelist(&mut self, enabled: bool) {
self.show_free_list = enabled;
}
pub fn enable_show_gc(&mut self, enabled: bool) {
self.show_gc = enabled;
}
pub fn enable_show_allocates(&mut self, enabled: bool) {
self.show_allocates = enabled;
}
fn rounded_size(unrounded_size: usize) -> usize {
(unrounded_size + 1) & !(1) // rounded to 2
}
fn get_size(&self, obj: usize) -> usize {
return self.mem[obj];
}
fn set_size(&mut self, obj: usize, size: usize) {
self.mem[obj] = size;
}
fn next_object_in_heap(&self, obj: usize) -> usize {
return obj + self.get_size(obj);
}
//free list is linked off the first slot
fn get_fl_next(&self, obj: usize) -> usize {
return self.mem[obj + 1];
}
fn set_fl_next(&mut self, obj: usize, next: usize) {
self.mem[obj + 1] = next;
}
fn mark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] |= 1 << (obj % MARK_BITS_PER_SLOT);
}
fn unmark_object(&mut self, obj: usize) {
self.mark_bits[obj / MARK_BITS_PER_SLOT] &= !(1 << (obj % MARK_BITS_PER_SLOT));
}
fn is_marked(&self, obj: usize) -> bool {
(self.mark_bits[obj / MARK_BITS_PER_SLOT] & (1 << (obj % MARK_BITS_PER_SLOT))) != 0
}
fn allocate_object_nocompress(&mut self, unrounded_size: usize) -> usize {
let size = Memory::rounded_size(unrounded_size + OBJECT_HEADER_SLOTS);
let mut free = self.head;
while free != 0 {
let avail = self.get_size(free);
if avail > size {
let newsize = avail - size;
if newsize < 2 {
panic!("remaining size is less than 2");
}
// shrink current free to smaller size
self.set_size(free, newsize);
// new object is on the end of current free object
let new_object = free + newsize;
self.set_size(new_object, size);
for index in 0..self.element_size(new_object) {
self.at_put(new_object, index, 0);
}
if self.show_allocates {
println!(
"Success: allocate_object returning -> {} size {}",
new_object, size
);
}
if self.head != free {
if self.show_allocates {
println!("Reset head past intermediate free blocks \n");
let mut show = self.head;
while show != free {
println!("Abandon {} size {}\n", show, self.get_size(show));
show = self.get_fl_next(show);
}
}
self.head = free;
}
return new_object;
}
free = self.get_fl_next(free);
}
0
}
pub fn gc(&mut self) {
let start = SystemTime::now();
for i in 0..self.roots.len() {
self.mark_and_scan(self.roots[i]);
}
self.sweep();
self.gc_count += 1;
if self.show_gc {
self.print_gc_stats();
}
match start.elapsed() {
Ok(elapsed) => {
self.last_gc_ms = elapsed.as_millis();
self.total_gc_ms += self.last_gc_ms;
}
Err(e) => {
println!("Error: {:?}", e);
}
}
}
fn sweep(&mut self) {
let mut scan = 1;
self.head = 0;
let mut tail = self.head;
self.lastgc_free_mem = 0;
self.lastgc_live_mem = 0;
while scan < MAX_MEMORY_SLOTS - 1 {
if self.is_marked(scan) {
self.unmark_object(scan);
self.lastgc_live_mem += self.get_size(scan);
} else {
self.lastgc_free_mem += self.get_size(scan);
if tail == 0 {
self.head = scan;
self.set_fl_next(scan, 0);
tail = scan;
} else {
if self.next_object_in_heap(tail) == scan {
self.set_size(tail, self.get_size(tail) + self.get_size(scan));
} else {
self.set_fl_next(tail, scan);
self.set_fl_next(scan, 0);
tail = scan;
}
}
}
scan = self.next_object_in_heap(scan);
}
if self.show_free_list {
self.print_freelist();
}
if self.show_heap_map {
self.print_heap();
}
}
fn mark_and_scan(&mut self, object: usize) {
if object == 0 || self.is_marked(object) |
let slots = self.get_size(object);
self.mark_object(object);
for i in OBJECT_HEADER_SLOTS..slots {
self.mark_and_scan(self.mem[object + i]);
}
}
pub fn print_gc_stats(&self) {
println!(
"{} gcs, {} object allocates, Last GC: Live {} Dead {} in {} ms, Lifetime GC {} ms\n",
self.gc_count,
self.allocates,
self.lastgc_live_mem,
self.lastgc_free_mem,
self.last_gc_ms,
self.total_gc_ms,
);
}
fn print_heap(&mut self) {
print!("\x1B[{};{}H", 1, 1);
let mut scan = 1;
let mut count = 0;
let mut free = self.head;
while scan < MAX_MEMORY_SLOTS - 1 {
// skip free ones, print x's //
let mut num_chars_to_print = 0;
let mut char_to_print = '?';
if scan == free {
while scan == free {
char_to_print = 'x';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(free);
free = self.get_fl_next(free);
}
} else {
char_to_print = '.';
num_chars_to_print += self.get_size(scan);
scan = self.next_object_in_heap(scan);
}
for _i in 1..num_chars_to_print / 2 {
print!("{}", char_to_print);
count += 1;
if count % 120 == 0 {
print!("\n");
}
}
}
self.print_gc_stats();
}
pub fn print_freelist(&mut self) {
println!("\nprint_freelist: Head = {}", self.head);
let mut free = self.head;
let mut count = 0;
let mut total_free = 0;
while free != 0 {
let size = self.get_size(free);
let next = self.get_fl_next(free);
total_free += self.get_size(free);
println!("{}: Free = {} {} slots next = {}", count, free, size, next);
free = next;
count += 1;
if count > MAX_MEMORY_SLOTS {
panic!()
}
}
println!(
"print_freelist {} elements, total free = {}\n",
count, total_free
);
}
}
| {
return;
} | conditional_block |
utils.py | '''
Script implements several preprocessing and evaluation steps that have to be done for the triplet loss network
'''
## Imports
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import balanced_accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder
from collections import Counter
def tackle_distribution_shift(data:pd.DataFrame, approach:str = "reuse") -> pd.DataFrame:
'''
function to balance the appearance of samples from different classes -> tackle distribution shift
Parameters:
- data: data with distribution shift [pandas.DataFrame]
- approach: strategy to tackle the distribution shift. Possible values are [String]
- reusing minor class samples --> 'reuse' = default
- mixing both approaches by using the size of the median common class --> "mix"
- constraining the number of major class samples --> 'constrain'
Returns:
- df: data without distribution shift [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## get all labels that exist
labels = data.loc[:, "Label"]
## get appearances of each label
counted_labels = Counter(labels).most_common()
## get max num of samples (valid for all classes)
if approach == "reuse":
## take appearance value of most common label
sample_size = counted_labels[0][1]
elif approach == "mix":
sample_size = counted_labels[int(counted_labels.__len__()*0.5)][1]
elif approach == "constrain":
## take appearance value of least common label
sample_size = counted_labels[-1][1]
else:
print("approach not implemented (yet)! Using 'resue' instead!")
## take appearance value of most common label
sample_size = counted_labels[0][1]
## take a 'subset' or 'superset' of every class
sampled_data = [df[df.Label == label].sample(n = sample_size, replace = True) for label in np.unique(labels)]
## return merged data
return pd.concat(sampled_data).reset_index(drop = True)
def encode_objects(data:pd.DataFrame, ignore_columns:list = [], how:str = "binarizer") -> pd.DataFrame:
'''
goes through given dataset, encodes all object columns into numerical data
Parameters:
- data: DataFrame to anaylse [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
- how: strategy to encode. The following are possible [String]
- Binarize: every unique value gets own column, filled with 0's and 1's, using pandas.get_dummies() --> 'binarizer' = Default
- OrdinalEncoder: unique values get replaced by increasing number (same amount of features) using sklearn's OrdinalEncoder --> 'ordinal'
Returns:
- encoded_data: encoded DataFrame [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
df = pd.DataFrame(data)
df.columns = ["Series_Data"]
else:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## define possible strategies
if how == "binarizer":
strategy = lambda x: pd.get_dummies(x)
elif how == "ordinal":
enc = OrdinalEncoder()
strategy = lambda x: pd.DataFrame(enc.fit_transform(x), columns = x.columns)
else:
print("strategy not implemented (yet!). Using pandas.get_dummies() instead!")
strategy = lambda x: pd.get_dummies(x)
cols = []
## go through all remaining columns, check if 'object' features exist
for column in columns:
if pd.api.types.is_string_dtype(df[column]):
cols.append(column)
## get all other columns from data
other_columns = list(set(df.columns) - set(cols))
## get both subdatasets - the encoded one and the remaining original one
encoded_data_raw = strategy(df[cols])
data_raw = df[other_columns]
## merge both subdatasets
encoded_data = pd.concat([encoded_data_raw, data_raw], axis = 1)
return encoded_data
def check_nan(data:pd.Series) -> bool:
'''
checks whether given data contains NaN's
Parameters:
- data: data to check [pandas.Series], can also be pandas.DataFrame
Returns:
- nan's: True, if data contains NaN's, otherwise False [Boolean]
'''
## make sure not to overwrite given data
df = data.copy()
if (not type(df) == pd.DataFrame) and (not type(df) == pd.Series):
print("data is no pandas.DataFrame, no check for NaN's done")
return False
if type(df) == pd.DataFrame:
return data.isna().sum().sum().astype(bool)
return data.isna().sum().astype(bool)
def add_nan(data:pd.DataFrame, amount:float = 0.05) -> pd.DataFrame:
'''
taking the given DataFrame and randomly adds the given amount of NaN's into it
Parameters:
- data: given data to add NaN's to [pandas.DataFrame]
- amount: desired amount of NaN's [Float, default = 0.05]
Returns:
- nan_data: data containing desired amount of NaN's [pandas.DataFrame]
'''
## set a numpy array with <amount> number of `True`s in the shape of data
nan_array = np.random.random(data.shape) < amount
## mask every element in 'data' with an NaN, when that element in 'nan_array' is set to True
nan_data = data.mask(nan_array)
return nan_data
def check_numeric(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that converts all columns in DataFrame into numeric ones. Deletes all columns where `pandas.to_numeric()` fails (as they seem to be Strings)
Parameters:
- data: DataFrame with all different kinds of column types [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame with converted columns and without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
num_df = pd.to_numeric(df, errors = "coerce")
if num_df.isna().sum() > 0:
print("data cannot be converted to numerical data, you have to encode it")
return df
return num_df
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## iterate over all columns, convert them to numeric ones (float or int)
for col in tqdm(columns, desc="make all columns numerical"):
## if error, then fill with NaN
df[col] = pd.to_numeric(df[col], errors="coerce")
## drop all columns that contain NaN's
df = df.dropna(axis=1)
return df
def iter_columns(data:pd.DataFrame, columns:list, trait:str) -> str:
'''
iterator, going over all columns in DataFrame, checking their content for desired trait
Parameters:
- data: DataFrame to iterate over [pandas.DataFrame]
- columns: columns to check [List]
- trait: what shall the column be checked for. Possible values [String]
- Unique: check for unique values per column, returns columns consisting of the same value over all samples --> 'unique'
Returns:
- col: column that contains only one different value [String]
'''
## iterate over all given columns
for col in tqdm(columns, desc=f"handle {trait}'s'"):
## check for Unique's
if trait == "unique":
## check if column contains more than one different value
if data[col].unique().__len__() == 1:
## if yes, return that column
yield col
def handle_nans(data:pd.DataFrame, strategy:str = "null", ignore_columns:list = []) -> pd.DataFrame:
'''
function that drops all columns (=features) that only contain one different value
Parameters:
- data: DataFrame [pandas.DataFrame]
- strategy: strategy to fill in the dataset. Possible values are [String]
- 0: fill all with Zero --> 'null' = default
- Mean: fill all with mean of respective feature --> 'mean'
- Median: fill all with median of respective feature --> 'median'
- Max: fill all with max of respective feature --> 'max'
- Min: fill all with min of respective feature --> 'min'
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is data contains NaN's
if not check_nan(df):
print("no NaN's inside data")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## check strategy, calculate filling value(s)
if strategy == "null":
value = [0 for _ in range(columns.__len__())]
elif strategy == "mean":
value = df[columns].mean()
elif strategy == "median":
value = df[columns].median()
elif strategy == "min":
value = df[columns].min()
elif strategy == "max":
value = df[columns].max()
else:
print("strategy not implemented (yet). Filling with 0")
value = [0 for _ in range(columns.__len__())]
df = df.fillna(dict(zip(columns, value)))
## drop columns that ONLY contain NaN's, no matter what 'ignore_columns' says
df = df.dropna(how = "all", axis = 1)
return df
def handle_uniques(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that handles all columns (=features) that only contain one different value by dropping them --> they do not contain helpful (any) information
Parameters:
- data: DataFrame [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## make sure not to overwrite given data
df = data.copy()
for col in iter_columns(df, columns, "unique"):
cols.append(col)
df = df.drop(cols, axis=1)
return df
def drop_features(data:pd.DataFrame, columns:list = []) -> pd.DataFrame:
'''
function that drops all columns are given by `columns`
Parameters:
- data: DataFrame with time columns [pandas.DataFrame]
- columns: List of columns that shall be deleted [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data or the given columns
cols = columns.copy()
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
df = df.drop(cols, axis=1)
return df
def flatten(X:np.array) -> np.array:
|
def iter_scale(X:np.array, scaler:object) -> np.array:
'''
iterates over fiven X, scales given 3D array using given (trained) scaler
Parameters:
- X: 3D array with shape samples x length x features [numpy.array]
- scaler: scaler object, e.g., sklearn.preprocessing.StandardScaler, sklearn.preprocessing.normalize [object]
Returns:
- scaled_X: scaled 3D array of same shape [numpy.array]
'''
## copy X to make sure not to overwrite the original data
scaled_X = X.copy()
for X_to_scale in tqdm(scaled_X):
yield scaler.transform(X_to_scale.reshape(1, -1)).reshape(X_to_scale.shape)
def pca(X:np.array, y:np.array) -> (np.array, np.array):
'''
plots a scatter showing the transformed dataset (if it is 2D) with different coloring for the different classes
Parameters:
- X: Array containing the original x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- X_transformed: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
'''
## init pca with two components
pca = PCA(n_components = 2)
## copy data to be sure not to accidentally overwrite something
pca_x = X.copy()
## check whether data has more than two dimensions (example shape of [60, 28, 28])
if pca_x.shape.__len__() > 2:
print("Dimension too high, X gets reshaped")
## if yes, reshape (in this case [60, 784])
pca_x = X.copy().reshape(X.shape[0], -1)
## fit PCA, transform data
X_transformed = pca.fit_transform(pca_x, y)
return X_transformed, y
def plot_reduced_data(X_new:np.array, y:np.array) -> None:
'''
plots a scatter showing the transformed dataset (if it is <= 2D) with different coloring for the different classes
Parameters:
- X_new: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- None
'''
## make DataFrame from transformed x values, add information about labels, rename the columns
reduced_data = pd.DataFrame(X_new).reset_index(drop = True)
if reduced_data.columns.__len__() == 1:
reduced_data["y"] = 0
reduced_data.columns = ["x","y"]
reduced_data["Label"] = y.reset_index(drop = True)
## make a list of SubDataSets that each only contain data about respective label
subdata = [reduced_data[reduced_data["Label"] == label] for label in np.unique(y)]
## set size, init figure
size = 10
fig=plt.figure(figsize=(2*size,size))
## add plots
ax = fig.add_subplot()
colors = list(mcolors.TABLEAU_COLORS) + list(mcolors.BASE_COLORS)
for i in range(len(subdata)):
ax.scatter(subdata[i]["x"], subdata[i]["y"], color = colors[i], label = i)
## update layout, without ticklabels and the grid to be on
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(True)
## set title, new legend, save figure, show plot
ax.set_title(f"Plot of the reduced data after PCA, colored in respective label", size=2*size)
if subdata.__len__() > 12:
ax.legend().set_visible(False)
else:
ax.legend(prop={'size': 1.5*size})
plt.show()
def evaluate_model(clf:object, X:np.array, y:np.array) -> None:
'''
evaluates the given model with given data, prints different metrices [accuracy, precision, recall, f1 score]
Parameters:
- clf: model to evaluate [object]
- X: x values [np.array]
- y: labels [np.array]
'''
## split data to train and test samples, fit classifier
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf.fit(X_train, y_train)
## init plot
size = 10
fig=plt.figure(figsize=(2*size,size))
ax=fig.add_subplot()
## plot confusion matrix
plot_confusion_matrix(clf, X_test, y_test, normalize="true", ax = ax, cmap=plt.cm.Blues)
## predict test samples
y_pred = clf.predict(X_test)
## calculate the metrics
accuracy = balanced_accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred,average="weighted")
recall = recall_score(y_test, y_pred,average="weighted")
f1 = f1_score(y_test, y_pred,average="weighted")
## print results
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}") | '''
flattens a 3D array into 2D array
Parameters:
- X: a 3D array with shape samples x width x height [numpy.array]
Returns:
- flattened_X: 2D array with shape sample x width*height [numpy.array]
'''
flattened_X = X.reshape(X.shape[0], -1)
return flattened_X | identifier_body |
utils.py | '''
Script implements several preprocessing and evaluation steps that have to be done for the triplet loss network
'''
## Imports
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import balanced_accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder
from collections import Counter
def tackle_distribution_shift(data:pd.DataFrame, approach:str = "reuse") -> pd.DataFrame:
'''
function to balance the appearance of samples from different classes -> tackle distribution shift
Parameters:
- data: data with distribution shift [pandas.DataFrame]
- approach: strategy to tackle the distribution shift. Possible values are [String]
- reusing minor class samples --> 'reuse' = default
- mixing both approaches by using the size of the median common class --> "mix"
- constraining the number of major class samples --> 'constrain'
Returns:
- df: data without distribution shift [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## get all labels that exist
labels = data.loc[:, "Label"]
## get appearances of each label
counted_labels = Counter(labels).most_common()
## get max num of samples (valid for all classes)
if approach == "reuse":
## take appearance value of most common label
sample_size = counted_labels[0][1]
elif approach == "mix":
sample_size = counted_labels[int(counted_labels.__len__()*0.5)][1]
elif approach == "constrain":
## take appearance value of least common label
sample_size = counted_labels[-1][1]
else:
print("approach not implemented (yet)! Using 'resue' instead!")
## take appearance value of most common label
sample_size = counted_labels[0][1]
## take a 'subset' or 'superset' of every class
sampled_data = [df[df.Label == label].sample(n = sample_size, replace = True) for label in np.unique(labels)]
## return merged data
return pd.concat(sampled_data).reset_index(drop = True)
def encode_objects(data:pd.DataFrame, ignore_columns:list = [], how:str = "binarizer") -> pd.DataFrame:
'''
goes through given dataset, encodes all object columns into numerical data
Parameters:
- data: DataFrame to anaylse [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
- how: strategy to encode. The following are possible [String]
- Binarize: every unique value gets own column, filled with 0's and 1's, using pandas.get_dummies() --> 'binarizer' = Default
- OrdinalEncoder: unique values get replaced by increasing number (same amount of features) using sklearn's OrdinalEncoder --> 'ordinal'
Returns:
- encoded_data: encoded DataFrame [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
df = pd.DataFrame(data)
df.columns = ["Series_Data"]
else:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## define possible strategies
if how == "binarizer":
strategy = lambda x: pd.get_dummies(x)
elif how == "ordinal":
enc = OrdinalEncoder()
strategy = lambda x: pd.DataFrame(enc.fit_transform(x), columns = x.columns)
else:
print("strategy not implemented (yet!). Using pandas.get_dummies() instead!")
strategy = lambda x: pd.get_dummies(x)
cols = []
## go through all remaining columns, check if 'object' features exist
for column in columns:
if pd.api.types.is_string_dtype(df[column]):
cols.append(column)
## get all other columns from data
other_columns = list(set(df.columns) - set(cols))
## get both subdatasets - the encoded one and the remaining original one
encoded_data_raw = strategy(df[cols])
data_raw = df[other_columns]
## merge both subdatasets
encoded_data = pd.concat([encoded_data_raw, data_raw], axis = 1)
return encoded_data
def check_nan(data:pd.Series) -> bool:
'''
checks whether given data contains NaN's
Parameters:
- data: data to check [pandas.Series], can also be pandas.DataFrame
Returns:
- nan's: True, if data contains NaN's, otherwise False [Boolean]
'''
## make sure not to overwrite given data
df = data.copy()
if (not type(df) == pd.DataFrame) and (not type(df) == pd.Series):
print("data is no pandas.DataFrame, no check for NaN's done")
return False
if type(df) == pd.DataFrame:
return data.isna().sum().sum().astype(bool)
return data.isna().sum().astype(bool)
def add_nan(data:pd.DataFrame, amount:float = 0.05) -> pd.DataFrame:
'''
taking the given DataFrame and randomly adds the given amount of NaN's into it
Parameters:
- data: given data to add NaN's to [pandas.DataFrame]
- amount: desired amount of NaN's [Float, default = 0.05]
Returns:
- nan_data: data containing desired amount of NaN's [pandas.DataFrame]
'''
## set a numpy array with <amount> number of `True`s in the shape of data
nan_array = np.random.random(data.shape) < amount
## mask every element in 'data' with an NaN, when that element in 'nan_array' is set to True
nan_data = data.mask(nan_array)
return nan_data
def check_numeric(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that converts all columns in DataFrame into numeric ones. Deletes all columns where `pandas.to_numeric()` fails (as they seem to be Strings)
Parameters:
- data: DataFrame with all different kinds of column types [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame with converted columns and without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
num_df = pd.to_numeric(df, errors = "coerce")
if num_df.isna().sum() > 0:
print("data cannot be converted to numerical data, you have to encode it")
return df
return num_df
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## iterate over all columns, convert them to numeric ones (float or int)
for col in tqdm(columns, desc="make all columns numerical"):
## if error, then fill with NaN
df[col] = pd.to_numeric(df[col], errors="coerce")
## drop all columns that contain NaN's
df = df.dropna(axis=1)
return df
def iter_columns(data:pd.DataFrame, columns:list, trait:str) -> str:
'''
iterator, going over all columns in DataFrame, checking their content for desired trait
Parameters:
- data: DataFrame to iterate over [pandas.DataFrame]
- columns: columns to check [List]
- trait: what shall the column be checked for. Possible values [String]
- Unique: check for unique values per column, returns columns consisting of the same value over all samples --> 'unique'
Returns:
- col: column that contains only one different value [String]
'''
## iterate over all given columns
for col in tqdm(columns, desc=f"handle {trait}'s'"):
## check for Unique's
if trait == "unique":
## check if column contains more than one different value
if data[col].unique().__len__() == 1:
## if yes, return that column
yield col
def handle_nans(data:pd.DataFrame, strategy:str = "null", ignore_columns:list = []) -> pd.DataFrame:
'''
function that drops all columns (=features) that only contain one different value
Parameters:
- data: DataFrame [pandas.DataFrame]
- strategy: strategy to fill in the dataset. Possible values are [String]
- 0: fill all with Zero --> 'null' = default
- Mean: fill all with mean of respective feature --> 'mean'
- Median: fill all with median of respective feature --> 'median'
- Max: fill all with max of respective feature --> 'max'
- Min: fill all with min of respective feature --> 'min'
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is data contains NaN's
if not check_nan(df):
print("no NaN's inside data")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## check strategy, calculate filling value(s)
if strategy == "null":
value = [0 for _ in range(columns.__len__())]
elif strategy == "mean":
value = df[columns].mean()
elif strategy == "median":
value = df[columns].median()
elif strategy == "min":
value = df[columns].min()
elif strategy == "max":
value = df[columns].max()
else:
print("strategy not implemented (yet). Filling with 0")
value = [0 for _ in range(columns.__len__())]
df = df.fillna(dict(zip(columns, value)))
## drop columns that ONLY contain NaN's, no matter what 'ignore_columns' says
df = df.dropna(how = "all", axis = 1)
return df
def handle_uniques(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that handles all columns (=features) that only contain one different value by dropping them --> they do not contain helpful (any) information
Parameters:
- data: DataFrame [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## make sure not to overwrite given data
df = data.copy()
for col in iter_columns(df, columns, "unique"):
cols.append(col)
df = df.drop(cols, axis=1)
return df
def drop_features(data:pd.DataFrame, columns:list = []) -> pd.DataFrame:
'''
function that drops all columns are given by `columns`
Parameters:
- data: DataFrame with time columns [pandas.DataFrame]
- columns: List of columns that shall be deleted [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data or the given columns
cols = columns.copy()
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
df = df.drop(cols, axis=1)
return df
def flatten(X:np.array) -> np.array:
'''
flattens a 3D array into 2D array
Parameters:
- X: a 3D array with shape samples x width x height [numpy.array]
Returns:
- flattened_X: 2D array with shape sample x width*height [numpy.array]
'''
flattened_X = X.reshape(X.shape[0], -1)
return flattened_X
def iter_scale(X:np.array, scaler:object) -> np.array:
'''
iterates over fiven X, scales given 3D array using given (trained) scaler
Parameters:
- X: 3D array with shape samples x length x features [numpy.array]
- scaler: scaler object, e.g., sklearn.preprocessing.StandardScaler, sklearn.preprocessing.normalize [object]
Returns:
- scaled_X: scaled 3D array of same shape [numpy.array]
'''
## copy X to make sure not to overwrite the original data
scaled_X = X.copy()
for X_to_scale in tqdm(scaled_X):
yield scaler.transform(X_to_scale.reshape(1, -1)).reshape(X_to_scale.shape)
def pca(X:np.array, y:np.array) -> (np.array, np.array):
''' | - X_transformed: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
'''
## init pca with two components
pca = PCA(n_components = 2)
## copy data to be sure not to accidentally overwrite something
pca_x = X.copy()
## check whether data has more than two dimensions (example shape of [60, 28, 28])
if pca_x.shape.__len__() > 2:
print("Dimension too high, X gets reshaped")
## if yes, reshape (in this case [60, 784])
pca_x = X.copy().reshape(X.shape[0], -1)
## fit PCA, transform data
X_transformed = pca.fit_transform(pca_x, y)
return X_transformed, y
def plot_reduced_data(X_new:np.array, y:np.array) -> None:
'''
plots a scatter showing the transformed dataset (if it is <= 2D) with different coloring for the different classes
Parameters:
- X_new: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- None
'''
## make DataFrame from transformed x values, add information about labels, rename the columns
reduced_data = pd.DataFrame(X_new).reset_index(drop = True)
if reduced_data.columns.__len__() == 1:
reduced_data["y"] = 0
reduced_data.columns = ["x","y"]
reduced_data["Label"] = y.reset_index(drop = True)
## make a list of SubDataSets that each only contain data about respective label
subdata = [reduced_data[reduced_data["Label"] == label] for label in np.unique(y)]
## set size, init figure
size = 10
fig=plt.figure(figsize=(2*size,size))
## add plots
ax = fig.add_subplot()
colors = list(mcolors.TABLEAU_COLORS) + list(mcolors.BASE_COLORS)
for i in range(len(subdata)):
ax.scatter(subdata[i]["x"], subdata[i]["y"], color = colors[i], label = i)
## update layout, without ticklabels and the grid to be on
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(True)
## set title, new legend, save figure, show plot
ax.set_title(f"Plot of the reduced data after PCA, colored in respective label", size=2*size)
if subdata.__len__() > 12:
ax.legend().set_visible(False)
else:
ax.legend(prop={'size': 1.5*size})
plt.show()
def evaluate_model(clf:object, X:np.array, y:np.array) -> None:
'''
evaluates the given model with given data, prints different metrices [accuracy, precision, recall, f1 score]
Parameters:
- clf: model to evaluate [object]
- X: x values [np.array]
- y: labels [np.array]
'''
## split data to train and test samples, fit classifier
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf.fit(X_train, y_train)
## init plot
size = 10
fig=plt.figure(figsize=(2*size,size))
ax=fig.add_subplot()
## plot confusion matrix
plot_confusion_matrix(clf, X_test, y_test, normalize="true", ax = ax, cmap=plt.cm.Blues)
## predict test samples
y_pred = clf.predict(X_test)
## calculate the metrics
accuracy = balanced_accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred,average="weighted")
recall = recall_score(y_test, y_pred,average="weighted")
f1 = f1_score(y_test, y_pred,average="weighted")
## print results
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}") | plots a scatter showing the transformed dataset (if it is 2D) with different coloring for the different classes
Parameters:
- X: Array containing the original x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns: | random_line_split |
utils.py | '''
Script implements several preprocessing and evaluation steps that have to be done for the triplet loss network
'''
## Imports
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import balanced_accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder
from collections import Counter
def tackle_distribution_shift(data:pd.DataFrame, approach:str = "reuse") -> pd.DataFrame:
'''
function to balance the appearance of samples from different classes -> tackle distribution shift
Parameters:
- data: data with distribution shift [pandas.DataFrame]
- approach: strategy to tackle the distribution shift. Possible values are [String]
- reusing minor class samples --> 'reuse' = default
- mixing both approaches by using the size of the median common class --> "mix"
- constraining the number of major class samples --> 'constrain'
Returns:
- df: data without distribution shift [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## get all labels that exist
labels = data.loc[:, "Label"]
## get appearances of each label
counted_labels = Counter(labels).most_common()
## get max num of samples (valid for all classes)
if approach == "reuse":
## take appearance value of most common label
sample_size = counted_labels[0][1]
elif approach == "mix":
sample_size = counted_labels[int(counted_labels.__len__()*0.5)][1]
elif approach == "constrain":
## take appearance value of least common label
sample_size = counted_labels[-1][1]
else:
print("approach not implemented (yet)! Using 'resue' instead!")
## take appearance value of most common label
sample_size = counted_labels[0][1]
## take a 'subset' or 'superset' of every class
sampled_data = [df[df.Label == label].sample(n = sample_size, replace = True) for label in np.unique(labels)]
## return merged data
return pd.concat(sampled_data).reset_index(drop = True)
def encode_objects(data:pd.DataFrame, ignore_columns:list = [], how:str = "binarizer") -> pd.DataFrame:
'''
goes through given dataset, encodes all object columns into numerical data
Parameters:
- data: DataFrame to anaylse [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
- how: strategy to encode. The following are possible [String]
- Binarize: every unique value gets own column, filled with 0's and 1's, using pandas.get_dummies() --> 'binarizer' = Default
- OrdinalEncoder: unique values get replaced by increasing number (same amount of features) using sklearn's OrdinalEncoder --> 'ordinal'
Returns:
- encoded_data: encoded DataFrame [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
df = pd.DataFrame(data)
df.columns = ["Series_Data"]
else:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## define possible strategies
if how == "binarizer":
strategy = lambda x: pd.get_dummies(x)
elif how == "ordinal":
enc = OrdinalEncoder()
strategy = lambda x: pd.DataFrame(enc.fit_transform(x), columns = x.columns)
else:
print("strategy not implemented (yet!). Using pandas.get_dummies() instead!")
strategy = lambda x: pd.get_dummies(x)
cols = []
## go through all remaining columns, check if 'object' features exist
for column in columns:
if pd.api.types.is_string_dtype(df[column]):
cols.append(column)
## get all other columns from data
other_columns = list(set(df.columns) - set(cols))
## get both subdatasets - the encoded one and the remaining original one
encoded_data_raw = strategy(df[cols])
data_raw = df[other_columns]
## merge both subdatasets
encoded_data = pd.concat([encoded_data_raw, data_raw], axis = 1)
return encoded_data
def check_nan(data:pd.Series) -> bool:
'''
checks whether given data contains NaN's
Parameters:
- data: data to check [pandas.Series], can also be pandas.DataFrame
Returns:
- nan's: True, if data contains NaN's, otherwise False [Boolean]
'''
## make sure not to overwrite given data
df = data.copy()
if (not type(df) == pd.DataFrame) and (not type(df) == pd.Series):
print("data is no pandas.DataFrame, no check for NaN's done")
return False
if type(df) == pd.DataFrame:
return data.isna().sum().sum().astype(bool)
return data.isna().sum().astype(bool)
def add_nan(data:pd.DataFrame, amount:float = 0.05) -> pd.DataFrame:
'''
taking the given DataFrame and randomly adds the given amount of NaN's into it
Parameters:
- data: given data to add NaN's to [pandas.DataFrame]
- amount: desired amount of NaN's [Float, default = 0.05]
Returns:
- nan_data: data containing desired amount of NaN's [pandas.DataFrame]
'''
## set a numpy array with <amount> number of `True`s in the shape of data
nan_array = np.random.random(data.shape) < amount
## mask every element in 'data' with an NaN, when that element in 'nan_array' is set to True
nan_data = data.mask(nan_array)
return nan_data
def check_numeric(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that converts all columns in DataFrame into numeric ones. Deletes all columns where `pandas.to_numeric()` fails (as they seem to be Strings)
Parameters:
- data: DataFrame with all different kinds of column types [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame with converted columns and without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
num_df = pd.to_numeric(df, errors = "coerce")
if num_df.isna().sum() > 0:
print("data cannot be converted to numerical data, you have to encode it")
return df
return num_df
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## iterate over all columns, convert them to numeric ones (float or int)
for col in tqdm(columns, desc="make all columns numerical"):
## if error, then fill with NaN
df[col] = pd.to_numeric(df[col], errors="coerce")
## drop all columns that contain NaN's
df = df.dropna(axis=1)
return df
def iter_columns(data:pd.DataFrame, columns:list, trait:str) -> str:
'''
iterator, going over all columns in DataFrame, checking their content for desired trait
Parameters:
- data: DataFrame to iterate over [pandas.DataFrame]
- columns: columns to check [List]
- trait: what shall the column be checked for. Possible values [String]
- Unique: check for unique values per column, returns columns consisting of the same value over all samples --> 'unique'
Returns:
- col: column that contains only one different value [String]
'''
## iterate over all given columns
for col in tqdm(columns, desc=f"handle {trait}'s'"):
## check for Unique's
if trait == "unique":
## check if column contains more than one different value
if data[col].unique().__len__() == 1:
## if yes, return that column
yield col
def handle_nans(data:pd.DataFrame, strategy:str = "null", ignore_columns:list = []) -> pd.DataFrame:
'''
function that drops all columns (=features) that only contain one different value
Parameters:
- data: DataFrame [pandas.DataFrame]
- strategy: strategy to fill in the dataset. Possible values are [String]
- 0: fill all with Zero --> 'null' = default
- Mean: fill all with mean of respective feature --> 'mean'
- Median: fill all with median of respective feature --> 'median'
- Max: fill all with max of respective feature --> 'max'
- Min: fill all with min of respective feature --> 'min'
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is data contains NaN's
if not check_nan(df):
print("no NaN's inside data")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## check strategy, calculate filling value(s)
if strategy == "null":
|
elif strategy == "mean":
value = df[columns].mean()
elif strategy == "median":
value = df[columns].median()
elif strategy == "min":
value = df[columns].min()
elif strategy == "max":
value = df[columns].max()
else:
print("strategy not implemented (yet). Filling with 0")
value = [0 for _ in range(columns.__len__())]
df = df.fillna(dict(zip(columns, value)))
## drop columns that ONLY contain NaN's, no matter what 'ignore_columns' says
df = df.dropna(how = "all", axis = 1)
return df
def handle_uniques(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that handles all columns (=features) that only contain one different value by dropping them --> they do not contain helpful (any) information
Parameters:
- data: DataFrame [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## make sure not to overwrite given data
df = data.copy()
for col in iter_columns(df, columns, "unique"):
cols.append(col)
df = df.drop(cols, axis=1)
return df
def drop_features(data:pd.DataFrame, columns:list = []) -> pd.DataFrame:
'''
function that drops all columns are given by `columns`
Parameters:
- data: DataFrame with time columns [pandas.DataFrame]
- columns: List of columns that shall be deleted [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data or the given columns
cols = columns.copy()
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
df = df.drop(cols, axis=1)
return df
def flatten(X:np.array) -> np.array:
'''
flattens a 3D array into 2D array
Parameters:
- X: a 3D array with shape samples x width x height [numpy.array]
Returns:
- flattened_X: 2D array with shape sample x width*height [numpy.array]
'''
flattened_X = X.reshape(X.shape[0], -1)
return flattened_X
def iter_scale(X:np.array, scaler:object) -> np.array:
'''
iterates over fiven X, scales given 3D array using given (trained) scaler
Parameters:
- X: 3D array with shape samples x length x features [numpy.array]
- scaler: scaler object, e.g., sklearn.preprocessing.StandardScaler, sklearn.preprocessing.normalize [object]
Returns:
- scaled_X: scaled 3D array of same shape [numpy.array]
'''
## copy X to make sure not to overwrite the original data
scaled_X = X.copy()
for X_to_scale in tqdm(scaled_X):
yield scaler.transform(X_to_scale.reshape(1, -1)).reshape(X_to_scale.shape)
def pca(X:np.array, y:np.array) -> (np.array, np.array):
'''
plots a scatter showing the transformed dataset (if it is 2D) with different coloring for the different classes
Parameters:
- X: Array containing the original x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- X_transformed: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
'''
## init pca with two components
pca = PCA(n_components = 2)
## copy data to be sure not to accidentally overwrite something
pca_x = X.copy()
## check whether data has more than two dimensions (example shape of [60, 28, 28])
if pca_x.shape.__len__() > 2:
print("Dimension too high, X gets reshaped")
## if yes, reshape (in this case [60, 784])
pca_x = X.copy().reshape(X.shape[0], -1)
## fit PCA, transform data
X_transformed = pca.fit_transform(pca_x, y)
return X_transformed, y
def plot_reduced_data(X_new:np.array, y:np.array) -> None:
'''
plots a scatter showing the transformed dataset (if it is <= 2D) with different coloring for the different classes
Parameters:
- X_new: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- None
'''
## make DataFrame from transformed x values, add information about labels, rename the columns
reduced_data = pd.DataFrame(X_new).reset_index(drop = True)
if reduced_data.columns.__len__() == 1:
reduced_data["y"] = 0
reduced_data.columns = ["x","y"]
reduced_data["Label"] = y.reset_index(drop = True)
## make a list of SubDataSets that each only contain data about respective label
subdata = [reduced_data[reduced_data["Label"] == label] for label in np.unique(y)]
## set size, init figure
size = 10
fig=plt.figure(figsize=(2*size,size))
## add plots
ax = fig.add_subplot()
colors = list(mcolors.TABLEAU_COLORS) + list(mcolors.BASE_COLORS)
for i in range(len(subdata)):
ax.scatter(subdata[i]["x"], subdata[i]["y"], color = colors[i], label = i)
## update layout, without ticklabels and the grid to be on
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(True)
## set title, new legend, save figure, show plot
ax.set_title(f"Plot of the reduced data after PCA, colored in respective label", size=2*size)
if subdata.__len__() > 12:
ax.legend().set_visible(False)
else:
ax.legend(prop={'size': 1.5*size})
plt.show()
def evaluate_model(clf:object, X:np.array, y:np.array) -> None:
'''
evaluates the given model with given data, prints different metrices [accuracy, precision, recall, f1 score]
Parameters:
- clf: model to evaluate [object]
- X: x values [np.array]
- y: labels [np.array]
'''
## split data to train and test samples, fit classifier
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf.fit(X_train, y_train)
## init plot
size = 10
fig=plt.figure(figsize=(2*size,size))
ax=fig.add_subplot()
## plot confusion matrix
plot_confusion_matrix(clf, X_test, y_test, normalize="true", ax = ax, cmap=plt.cm.Blues)
## predict test samples
y_pred = clf.predict(X_test)
## calculate the metrics
accuracy = balanced_accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred,average="weighted")
recall = recall_score(y_test, y_pred,average="weighted")
f1 = f1_score(y_test, y_pred,average="weighted")
## print results
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}") | value = [0 for _ in range(columns.__len__())] | conditional_block |
utils.py | '''
Script implements several preprocessing and evaluation steps that have to be done for the triplet loss network
'''
## Imports
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import balanced_accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder
from collections import Counter
def | (data:pd.DataFrame, approach:str = "reuse") -> pd.DataFrame:
'''
function to balance the appearance of samples from different classes -> tackle distribution shift
Parameters:
- data: data with distribution shift [pandas.DataFrame]
- approach: strategy to tackle the distribution shift. Possible values are [String]
- reusing minor class samples --> 'reuse' = default
- mixing both approaches by using the size of the median common class --> "mix"
- constraining the number of major class samples --> 'constrain'
Returns:
- df: data without distribution shift [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## get all labels that exist
labels = data.loc[:, "Label"]
## get appearances of each label
counted_labels = Counter(labels).most_common()
## get max num of samples (valid for all classes)
if approach == "reuse":
## take appearance value of most common label
sample_size = counted_labels[0][1]
elif approach == "mix":
sample_size = counted_labels[int(counted_labels.__len__()*0.5)][1]
elif approach == "constrain":
## take appearance value of least common label
sample_size = counted_labels[-1][1]
else:
print("approach not implemented (yet)! Using 'resue' instead!")
## take appearance value of most common label
sample_size = counted_labels[0][1]
## take a 'subset' or 'superset' of every class
sampled_data = [df[df.Label == label].sample(n = sample_size, replace = True) for label in np.unique(labels)]
## return merged data
return pd.concat(sampled_data).reset_index(drop = True)
def encode_objects(data:pd.DataFrame, ignore_columns:list = [], how:str = "binarizer") -> pd.DataFrame:
'''
goes through given dataset, encodes all object columns into numerical data
Parameters:
- data: DataFrame to anaylse [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
- how: strategy to encode. The following are possible [String]
- Binarize: every unique value gets own column, filled with 0's and 1's, using pandas.get_dummies() --> 'binarizer' = Default
- OrdinalEncoder: unique values get replaced by increasing number (same amount of features) using sklearn's OrdinalEncoder --> 'ordinal'
Returns:
- encoded_data: encoded DataFrame [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
df = pd.DataFrame(data)
df.columns = ["Series_Data"]
else:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## define possible strategies
if how == "binarizer":
strategy = lambda x: pd.get_dummies(x)
elif how == "ordinal":
enc = OrdinalEncoder()
strategy = lambda x: pd.DataFrame(enc.fit_transform(x), columns = x.columns)
else:
print("strategy not implemented (yet!). Using pandas.get_dummies() instead!")
strategy = lambda x: pd.get_dummies(x)
cols = []
## go through all remaining columns, check if 'object' features exist
for column in columns:
if pd.api.types.is_string_dtype(df[column]):
cols.append(column)
## get all other columns from data
other_columns = list(set(df.columns) - set(cols))
## get both subdatasets - the encoded one and the remaining original one
encoded_data_raw = strategy(df[cols])
data_raw = df[other_columns]
## merge both subdatasets
encoded_data = pd.concat([encoded_data_raw, data_raw], axis = 1)
return encoded_data
def check_nan(data:pd.Series) -> bool:
'''
checks whether given data contains NaN's
Parameters:
- data: data to check [pandas.Series], can also be pandas.DataFrame
Returns:
- nan's: True, if data contains NaN's, otherwise False [Boolean]
'''
## make sure not to overwrite given data
df = data.copy()
if (not type(df) == pd.DataFrame) and (not type(df) == pd.Series):
print("data is no pandas.DataFrame, no check for NaN's done")
return False
if type(df) == pd.DataFrame:
return data.isna().sum().sum().astype(bool)
return data.isna().sum().astype(bool)
def add_nan(data:pd.DataFrame, amount:float = 0.05) -> pd.DataFrame:
'''
taking the given DataFrame and randomly adds the given amount of NaN's into it
Parameters:
- data: given data to add NaN's to [pandas.DataFrame]
- amount: desired amount of NaN's [Float, default = 0.05]
Returns:
- nan_data: data containing desired amount of NaN's [pandas.DataFrame]
'''
## set a numpy array with <amount> number of `True`s in the shape of data
nan_array = np.random.random(data.shape) < amount
## mask every element in 'data' with an NaN, when that element in 'nan_array' is set to True
nan_data = data.mask(nan_array)
return nan_data
def check_numeric(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that converts all columns in DataFrame into numeric ones. Deletes all columns where `pandas.to_numeric()` fails (as they seem to be Strings)
Parameters:
- data: DataFrame with all different kinds of column types [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame with converted columns and without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
num_df = pd.to_numeric(df, errors = "coerce")
if num_df.isna().sum() > 0:
print("data cannot be converted to numerical data, you have to encode it")
return df
return num_df
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## iterate over all columns, convert them to numeric ones (float or int)
for col in tqdm(columns, desc="make all columns numerical"):
## if error, then fill with NaN
df[col] = pd.to_numeric(df[col], errors="coerce")
## drop all columns that contain NaN's
df = df.dropna(axis=1)
return df
def iter_columns(data:pd.DataFrame, columns:list, trait:str) -> str:
'''
iterator, going over all columns in DataFrame, checking their content for desired trait
Parameters:
- data: DataFrame to iterate over [pandas.DataFrame]
- columns: columns to check [List]
- trait: what shall the column be checked for. Possible values [String]
- Unique: check for unique values per column, returns columns consisting of the same value over all samples --> 'unique'
Returns:
- col: column that contains only one different value [String]
'''
## iterate over all given columns
for col in tqdm(columns, desc=f"handle {trait}'s'"):
## check for Unique's
if trait == "unique":
## check if column contains more than one different value
if data[col].unique().__len__() == 1:
## if yes, return that column
yield col
def handle_nans(data:pd.DataFrame, strategy:str = "null", ignore_columns:list = []) -> pd.DataFrame:
'''
function that drops all columns (=features) that only contain one different value
Parameters:
- data: DataFrame [pandas.DataFrame]
- strategy: strategy to fill in the dataset. Possible values are [String]
- 0: fill all with Zero --> 'null' = default
- Mean: fill all with mean of respective feature --> 'mean'
- Median: fill all with median of respective feature --> 'median'
- Max: fill all with max of respective feature --> 'max'
- Min: fill all with min of respective feature --> 'min'
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is data contains NaN's
if not check_nan(df):
print("no NaN's inside data")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## check strategy, calculate filling value(s)
if strategy == "null":
value = [0 for _ in range(columns.__len__())]
elif strategy == "mean":
value = df[columns].mean()
elif strategy == "median":
value = df[columns].median()
elif strategy == "min":
value = df[columns].min()
elif strategy == "max":
value = df[columns].max()
else:
print("strategy not implemented (yet). Filling with 0")
value = [0 for _ in range(columns.__len__())]
df = df.fillna(dict(zip(columns, value)))
## drop columns that ONLY contain NaN's, no matter what 'ignore_columns' says
df = df.dropna(how = "all", axis = 1)
return df
def handle_uniques(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that handles all columns (=features) that only contain one different value by dropping them --> they do not contain helpful (any) information
Parameters:
- data: DataFrame [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## make sure not to overwrite given data
df = data.copy()
for col in iter_columns(df, columns, "unique"):
cols.append(col)
df = df.drop(cols, axis=1)
return df
def drop_features(data:pd.DataFrame, columns:list = []) -> pd.DataFrame:
'''
function that drops all columns are given by `columns`
Parameters:
- data: DataFrame with time columns [pandas.DataFrame]
- columns: List of columns that shall be deleted [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data or the given columns
cols = columns.copy()
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
df = df.drop(cols, axis=1)
return df
def flatten(X:np.array) -> np.array:
'''
flattens a 3D array into 2D array
Parameters:
- X: a 3D array with shape samples x width x height [numpy.array]
Returns:
- flattened_X: 2D array with shape sample x width*height [numpy.array]
'''
flattened_X = X.reshape(X.shape[0], -1)
return flattened_X
def iter_scale(X:np.array, scaler:object) -> np.array:
'''
iterates over fiven X, scales given 3D array using given (trained) scaler
Parameters:
- X: 3D array with shape samples x length x features [numpy.array]
- scaler: scaler object, e.g., sklearn.preprocessing.StandardScaler, sklearn.preprocessing.normalize [object]
Returns:
- scaled_X: scaled 3D array of same shape [numpy.array]
'''
## copy X to make sure not to overwrite the original data
scaled_X = X.copy()
for X_to_scale in tqdm(scaled_X):
yield scaler.transform(X_to_scale.reshape(1, -1)).reshape(X_to_scale.shape)
def pca(X:np.array, y:np.array) -> (np.array, np.array):
'''
plots a scatter showing the transformed dataset (if it is 2D) with different coloring for the different classes
Parameters:
- X: Array containing the original x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- X_transformed: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
'''
## init pca with two components
pca = PCA(n_components = 2)
## copy data to be sure not to accidentally overwrite something
pca_x = X.copy()
## check whether data has more than two dimensions (example shape of [60, 28, 28])
if pca_x.shape.__len__() > 2:
print("Dimension too high, X gets reshaped")
## if yes, reshape (in this case [60, 784])
pca_x = X.copy().reshape(X.shape[0], -1)
## fit PCA, transform data
X_transformed = pca.fit_transform(pca_x, y)
return X_transformed, y
def plot_reduced_data(X_new:np.array, y:np.array) -> None:
'''
plots a scatter showing the transformed dataset (if it is <= 2D) with different coloring for the different classes
Parameters:
- X_new: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- None
'''
## make DataFrame from transformed x values, add information about labels, rename the columns
reduced_data = pd.DataFrame(X_new).reset_index(drop = True)
if reduced_data.columns.__len__() == 1:
reduced_data["y"] = 0
reduced_data.columns = ["x","y"]
reduced_data["Label"] = y.reset_index(drop = True)
## make a list of SubDataSets that each only contain data about respective label
subdata = [reduced_data[reduced_data["Label"] == label] for label in np.unique(y)]
## set size, init figure
size = 10
fig=plt.figure(figsize=(2*size,size))
## add plots
ax = fig.add_subplot()
colors = list(mcolors.TABLEAU_COLORS) + list(mcolors.BASE_COLORS)
for i in range(len(subdata)):
ax.scatter(subdata[i]["x"], subdata[i]["y"], color = colors[i], label = i)
## update layout, without ticklabels and the grid to be on
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(True)
## set title, new legend, save figure, show plot
ax.set_title(f"Plot of the reduced data after PCA, colored in respective label", size=2*size)
if subdata.__len__() > 12:
ax.legend().set_visible(False)
else:
ax.legend(prop={'size': 1.5*size})
plt.show()
def evaluate_model(clf:object, X:np.array, y:np.array) -> None:
'''
evaluates the given model with given data, prints different metrices [accuracy, precision, recall, f1 score]
Parameters:
- clf: model to evaluate [object]
- X: x values [np.array]
- y: labels [np.array]
'''
## split data to train and test samples, fit classifier
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf.fit(X_train, y_train)
## init plot
size = 10
fig=plt.figure(figsize=(2*size,size))
ax=fig.add_subplot()
## plot confusion matrix
plot_confusion_matrix(clf, X_test, y_test, normalize="true", ax = ax, cmap=plt.cm.Blues)
## predict test samples
y_pred = clf.predict(X_test)
## calculate the metrics
accuracy = balanced_accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred,average="weighted")
recall = recall_score(y_test, y_pred,average="weighted")
f1 = f1_score(y_test, y_pred,average="weighted")
## print results
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}") | tackle_distribution_shift | identifier_name |
models.go | //go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package azkeys
import "time"
// BackupKeyResult - The backup key result, containing the backup blob.
type BackupKeyResult struct {
// READ-ONLY; The backup blob containing the backed up key.
Value []byte
}
// CreateKeyParameters - The key create parameters.
type CreateKeyParameters struct {
// REQUIRED; The type of key to create.
Kty *KeyType
// Elliptic curve name.
Curve *CurveName
// The attributes of a key managed by the key vault service.
KeyAttributes *KeyAttributes
KeyOps []*KeyOperation
// The key size in bits. For example: 2048, 3072, or 4096 for RSA.
KeySize *int32
// The public exponent for a RSA key.
PublicExponent *int32
// The policy rules under which the key can be exported.
ReleasePolicy *KeyReleasePolicy
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
}
// DeletedKey - A DeletedKey consisting of a WebKey plus its Attributes and deletion info
type DeletedKey struct {
// The key management attributes.
Attributes *KeyAttributes
// The Json web key.
Key *JSONWebKey
// The url of the recovery object, used to identify and recover the deleted key.
RecoveryID *string
// The policy rules under which the key can be exported.
ReleasePolicy *KeyReleasePolicy
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
// READ-ONLY; The time when the key was deleted, in UTC
DeletedDate *time.Time
// READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will
// be true.
Managed *bool
// READ-ONLY; The time when the key is scheduled to be purged, in UTC
ScheduledPurgeDate *time.Time
}
// DeletedKeyProperties - The deleted key item containing the deleted key metadata and information about deletion.
type DeletedKeyProperties struct {
// The key management attributes.
Attributes *KeyAttributes
// Key identifier.
KID *ID
// The url of the recovery object, used to identify and recover the deleted key.
RecoveryID *string
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
// READ-ONLY; The time when the key was deleted, in UTC
DeletedDate *time.Time
// READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will
// be true.
Managed *bool
// READ-ONLY; The time when the key is scheduled to be purged, in UTC
ScheduledPurgeDate *time.Time
}
// DeletedKeyPropertiesListResult - A list of keys that have been deleted in this vault.
type DeletedKeyPropertiesListResult struct {
// READ-ONLY; The URL to get the next set of deleted keys.
NextLink *string
// READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted
// keys
Value []*DeletedKeyProperties
}
// GetRandomBytesParameters - The get random bytes request object.
type GetRandomBytesParameters struct {
// REQUIRED; The requested number of random bytes.
Count *int32
}
// ImportKeyParameters - The key import parameters.
type ImportKeyParameters struct {
// REQUIRED; The Json web key
Key *JSONWebKey
// Whether to import as a hardware key (HSM) or software key.
HSM *bool
// The key management attributes.
KeyAttributes *KeyAttributes
// The policy rules under which the key can be exported.
ReleasePolicy *KeyReleasePolicy
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
}
// JSONWebKey - As of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18
type JSONWebKey struct {
// Elliptic curve name.
Crv *CurveName
// RSA private exponent, or the D component of an EC private key.
D []byte
// RSA private key parameter.
DP []byte
// RSA private key parameter.
DQ []byte
// RSA public exponent.
E []byte
// Symmetric key.
K []byte
// Key identifier.
KID *ID
KeyOps []*KeyOperation
// JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40.
Kty *KeyType
// RSA modulus.
N []byte
// RSA secret prime.
P []byte
// RSA secret prime, with p < q.
Q []byte
// RSA private key parameter.
QI []byte
// Protected Key, used with 'Bring Your Own Key'.
T []byte
// X component of an EC public key.
X []byte
// Y component of an EC public key.
Y []byte
}
// KeyAttributes - The attributes of a key managed by the key vault service.
type KeyAttributes struct {
// Determines whether the object is enabled.
Enabled *bool
// Expiry date in UTC.
Expires *time.Time
// Indicates if the private key can be exported. Release policy must be provided when creating the first version of an exportable
// key.
Exportable *bool
// Not before date in UTC.
NotBefore *time.Time
// READ-ONLY; Creation time in UTC.
Created *time.Time
// READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0.
RecoverableDays *int32
// READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable'
// the key can be permanently deleted by a privileged user; otherwise, only the system
// can purge the key, at the end of the retention interval.
RecoveryLevel *string
// READ-ONLY; Last updated time in UTC.
Updated *time.Time
}
// KeyBundle - A KeyBundle consisting of a WebKey plus its attributes.
type KeyBundle struct {
// The key management attributes.
Attributes *KeyAttributes
// The Json web key.
Key *JSONWebKey
// The policy rules under which the key can be exported.
ReleasePolicy *KeyReleasePolicy
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
// READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will
// be true.
Managed *bool
}
// KeyOperationParameters - The key operations parameters.
type KeyOperationParameters struct {
// REQUIRED; algorithm identifier
Algorithm *EncryptionAlgorithm
// REQUIRED
Value []byte
// Additional data to authenticate but not encrypt/decrypt when using authenticated crypto algorithms.
AdditionalAuthenticatedData []byte
// The tag to authenticate when performing decryption with an authenticated algorithm.
AuthenticationTag []byte
// Cryptographically random, non-repeating initialization vector for symmetric algorithms.
IV []byte
}
// KeyOperationResult - The key operation result.
type KeyOperationResult struct {
// READ-ONLY
AdditionalAuthenticatedData []byte
// READ-ONLY
AuthenticationTag []byte
// READ-ONLY
IV []byte
// READ-ONLY; Key identifier
KID *ID
// READ-ONLY
Result []byte
}
// KeyProperties - The key item containing key metadata.
type KeyProperties struct {
// The key management attributes.
Attributes *KeyAttributes
// Key identifier.
KID *ID
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
// READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will
// be true.
Managed *bool
}
// KeyPropertiesListResult - The key list result.
type KeyPropertiesListResult struct {
// READ-ONLY; The URL to get the next set of keys.
NextLink *string
// READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys.
Value []*KeyProperties
}
// KeyReleasePolicy - The policy rules under which the key can be exported.
type KeyReleasePolicy struct {
// Content type and version of key release policy
ContentType *string
// Blob encoding the policy rules under which the key can be released. Blob must be base64 URL encoded.
EncodedPolicy []byte
// Defines the mutability state of the policy. Once marked immutable, this flag cannot be reset and the policy cannot be changed
// under any circumstances.
Immutable *bool
}
// KeyReleaseResult - The release result, containing the released key.
type KeyReleaseResult struct {
// READ-ONLY; A signed object containing the released key.
Value *string
}
// KeyRotationPolicy - Management policy for a key.
type KeyRotationPolicy struct {
// The key rotation policy attributes.
Attributes *KeyRotationPolicyAttributes
// Actions that will be performed by Key Vault over the lifetime of a key. For preview, lifetimeActions can only have two
// items at maximum: one for rotate, one for notify. Notification time would be
// default to 30 days before expiry and it is not configurable.
LifetimeActions []*LifetimeAction
// READ-ONLY; The key policy id.
ID *string
}
// KeyRotationPolicyAttributes - The key rotation policy attributes.
type KeyRotationPolicyAttributes struct {
// The expiryTime will be applied on the new key version. It should be at least 28 days. It will be in ISO 8601 Format. Examples:
// 90 days: P90D, 3 months: P3M, 48 hours: PT48H, 1 year and 10 days: P1Y10D | Created *time.Time
// READ-ONLY; The key rotation policy's last updated time in UTC.
Updated *time.Time
}
// KeyVerifyResult - The key verify result.
type KeyVerifyResult struct {
// READ-ONLY; True if the signature is verified, otherwise false.
Value *bool
}
// LifetimeAction - Action and its trigger that will be performed by Key Vault over the lifetime of a key.
type LifetimeAction struct {
// The action that will be executed.
Action *LifetimeActionType
// The condition that will execute the action.
Trigger *LifetimeActionTrigger
}
// LifetimeActionTrigger - A condition to be satisfied for an action to be executed.
type LifetimeActionTrigger struct {
// Time after creation to attempt to rotate. It only applies to rotate. It will be in ISO 8601 duration format. Example: 90
// days : "P90D"
TimeAfterCreate *string
// Time before expiry to attempt to rotate or notify. It will be in ISO 8601 duration format. Example: 90 days : "P90D"
TimeBeforeExpiry *string
}
// LifetimeActionType - The action that will be executed.
type LifetimeActionType struct {
// The type of the action.
Type *KeyRotationPolicyAction
}
// RandomBytes - The get random bytes response object containing the bytes.
type RandomBytes struct {
// REQUIRED; The bytes encoded as a base64url string.
Value []byte
}
// ReleaseParameters - The release key parameters.
type ReleaseParameters struct {
// REQUIRED; The attestation assertion for the target of the key release.
TargetAttestationToken *string
// The encryption algorithm to use to protected the exported key material
Algorithm *KeyEncryptionAlgorithm
// A client provided nonce for freshness.
Nonce *string
}
// RestoreKeyParameters - The key restore parameters.
type RestoreKeyParameters struct {
// REQUIRED; The backup blob associated with a key bundle.
KeyBackup []byte
}
// SignParameters - The key operations parameters.
type SignParameters struct {
// REQUIRED; The signing/verification algorithm identifier.
Algorithm *SignatureAlgorithm
// REQUIRED
Value []byte
}
// UpdateKeyParameters - The key update parameters.
type UpdateKeyParameters struct {
// The attributes of a key managed by the key vault service.
KeyAttributes *KeyAttributes
// Json web key operations.
KeyOps []*KeyOperation
// The policy rules under which the key can be exported.
ReleasePolicy *KeyReleasePolicy
// Application specific metadata in the form of key-value pairs.
Tags map[string]*string
}
// VerifyParameters - The key verify parameters.
type VerifyParameters struct {
// REQUIRED; The signing/verification algorithm.
Algorithm *SignatureAlgorithm
// REQUIRED; The digest used for signing.
Digest []byte
// REQUIRED; The signature to be verified.
Signature []byte
} | ExpiryTime *string
// READ-ONLY; The key rotation policy created time in UTC. | random_line_split |
stateful.go | // Package stateful defines a nested stateful lexer.
//
// This lexer is based heavily on the approach used by Chroma (and Pygments).
//
// The lexer is a state machine defined by a map of rules keyed by state. Each rule
// is a named regex and optional operation to apply when the rule matches.
//
// As a convenience, any Rule starting with a lowercase letter will be elided from output.
//
// Lexing starts in the "Root" group. Each rule is matched in order, with the first
// successful match producing a lexeme. If the matching rule has an associated Action
// it will be executed. The name of each non-root rule is prefixed with the name
// of its group to yield the token identifier used during matching.
//
// A state change can be introduced with the Action `Push(state)`. `Pop()` will
// return to the previous state.
//
// To reuse rules from another state, use `Include(state)`.
//
// As a special case, regexes containing backrefs in the form \N (where N is a digit)
// will match the corresponding capture group from the immediate parent group. This
// can be used to parse, among other things, heredocs.
//
// See the README, example and tests in this package for details.
package stateful
import (
"errors"
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var (
backrefReplace = regexp.MustCompile(`(\\+)(\d)`)
)
// Option for modifying how the Lexer works.
type Option func(d *Definition)
// InitialState overrides the default initial state of "Root".
func InitialState(state string) Option {
return func(d *Definition) {
d.initialState = state
}
}
// A Rule matching input and possibly changing state.
type Rule struct {
Name string
Pattern string
Action Action
}
// Rules grouped by name.
type Rules map[string][]Rule
// compiledRule is a Rule with its pattern compiled.
type compiledRule struct {
Rule
ignore bool
RE *regexp.Regexp
}
// compiledRules grouped by name.
type compiledRules map[string][]compiledRule
// A Action is applied when a rule matches.
type Action interface {
// Actions are responsible for validating the match. ie. if they consumed any input.
applyAction(lexer *Lexer, groups []string) error
}
// RulesAction is an optional interface that Actions can implement.
//
// It is applied during rule construction to mutate the rule map.
type RulesAction interface {
applyRules(state string, rule int, rules compiledRules) error
}
// ActionPop pops to the previous state when the Rule matches.
type ActionPop struct{}
func (p ActionPop) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = lexer.stack[:len(lexer.stack)-1]
return nil
}
// Pop to the previous state.
func Pop() Action {
return ActionPop{}
}
// ReturnRule signals the lexer to return immediately.
var ReturnRule = Rule{"returnToParent", "", nil}
// Return to the parent state.
//
// Useful as the last rule in a sub-state.
func Return() Rule { return ReturnRule }
// ActionPush pushes the current state and switches to "State" when the Rule matches.
type ActionPush struct{ State string }
func (p ActionPush) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = append(lexer.stack, lexerState{name: p.State, groups: groups})
return nil
}
// Push to the given state.
//
// The target state will then be the set of rules used for matching
// until another Push or Pop is encountered.
func Push(state string) Action {
return ActionPush{state}
}
type include struct{ state string }
func (i include) applyAction(lexer *Lexer, groups []string) error { panic("should not be called") }
func (i include) applyRules(state string, rule int, rules compiledRules) error {
includedRules, ok := rules[i.state]
if !ok {
return fmt.Errorf("invalid include state %q", i.state)
}
clone := make([]compiledRule, len(includedRules))
copy(clone, includedRules)
rules[state] = append(rules[state][:rule], append(clone, rules[state][rule+1:]...)...) // nolint: makezero
return nil
}
// Include rules from another state in this one.
func Include(state string) Rule {
return Rule{Action: include{state}}
}
// Definition is the lexer.Definition.
type Definition struct {
rules compiledRules
symbols map[string]rune
// Map of key->*regexp.Regexp
backrefCache sync.Map
initialState string
}
// MustSimple creates a new lexer definition based on a single state described by `rules`.
// panics if the rules trigger an error
func MustSimple(rules []Rule, options ...Option) *Definition {
def, err := NewSimple(rules, options...)
if err != nil {
panic(err)
}
return def
}
// Must creates a new stateful lexer and panics if it is incorrect.
func Must(rules Rules, options ...Option) *Definition {
def, err := New(rules, options...)
if err != nil {
panic(err)
}
return def
}
// NewSimple creates a new stateful lexer with a single "Root" state.
func NewSimple(rules []Rule, options ...Option) (*Definition, error) {
return New(Rules{"Root": rules}, options...)
}
// New constructs a new stateful lexer from rules.
func New(rules Rules, options ...Option) (*Definition, error) {
compiled := compiledRules{}
for key, set := range rules {
for i, rule := range set {
pattern := "^(?:" + rule.Pattern + ")"
var (
re *regexp.Regexp
err error
)
var match = backrefReplace.FindStringSubmatch(rule.Pattern)
if match == nil || len(match[1])%2 == 0 {
re, err = regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("%s.%d: %s", key, i, err)
}
}
compiled[key] = append(compiled[key], compiledRule{
Rule: rule,
ignore: len(rule.Name) > 0 && unicode.IsLower(rune(rule.Name[0])),
RE: re,
})
}
}
restart:
for state, rules := range compiled {
for i, rule := range rules {
if action, ok := rule.Action.(RulesAction); ok {
if err := action.applyRules(state, i, compiled); err != nil {
return nil, fmt.Errorf("%s.%d: %s", state, i, err)
}
goto restart
}
}
}
keys := make([]string, 0, len(compiled))
for key := range compiled {
keys = append(keys, key)
}
symbols := map[string]rune{
"EOF": lexer.EOF,
}
sort.Strings(keys)
duplicates := map[string]compiledRule{}
rn := lexer.EOF - 1
for _, key := range keys {
for i, rule := range compiled[key] {
if dup, ok := duplicates[rule.Name]; ok && rule.Pattern != dup.Pattern {
panic(fmt.Sprintf("duplicate key %q with different patterns %q != %q", rule.Name, rule.Pattern, dup.Pattern))
}
duplicates[rule.Name] = rule
compiled[key][i] = rule
symbols[rule.Name] = rn
rn--
}
}
d := &Definition{
initialState: "Root",
rules: compiled,
symbols: symbols,
}
for _, option := range options {
option(d)
}
return d, nil
}
// Rules returns the user-provided Rules used to construct the lexer.
func (d *Definition) Rules() Rules {
out := Rules{}
for state, rules := range d.rules {
for _, rule := range rules {
out[state] = append(out[state], rule.Rule)
}
}
return out
}
func (d *Definition) LexString(filename string, s string) (lexer.Lexer, error) { // nolint: golint
return &Lexer{
def: d,
data: s,
stack: []lexerState{{name: d.initialState}},
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
}, nil
}
func (d *Definition) Lex(filename string, r io.Reader) (lexer.Lexer, error) { // nolint: golint
w := &strings.Builder{}
_, err := io.Copy(w, r)
if err != nil {
return nil, err
}
return d.LexString(filename, w.String())
}
func (d *Definition) Symbols() map[string]rune { // nolint: golint
return d.symbols
}
type lexerState struct {
name string
groups []string
}
// Lexer implementation.
type Lexer struct {
stack []lexerState
def *Definition
data string
pos lexer.Position
}
func (l *Lexer) Next() (lexer.Token, error) { // nolint: golint
parent := l.stack[len(l.stack)-1]
rules := l.def.rules[parent.name]
next:
for len(l.data) > 0 {
var (
rule *compiledRule
match []int
)
for _, candidate := range rules {
// Special case "Return()".
if candidate.Rule == ReturnRule {
l.stack = l.stack[:len(l.stack)-1]
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue next
}
re, err := l.getPattern(candidate)
if err != nil {
return lexer.Token{}, participle.Wrapf(l.pos, err, "rule %q", candidate.Name)
}
match = re.FindStringSubmatchIndex(l.data)
if match != nil {
rule = &candidate // nolint
break
}
}
if match == nil || rule == nil {
sample := []rune(l.data)
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample))
}
if rule.Action != nil {
groups := make([]string, 0, len(match)/2)
for i := 0; i < len(match); i += 2 {
groups = append(groups, l.data[match[i]:match[i+1]])
}
if err := rule.Action.applyAction(l, groups); err != nil {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q: %s", rule.Name, err)
}
} else if match[0] == match[1] {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q did not match any input", rule.Name)
}
span := l.data[match[0]:match[1]]
l.data = l.data[match[1]:]
// l.groups = groups
// Update position.
pos := l.pos
l.pos.Offset += match[1]
lines := strings.Count(span, "\n")
l.pos.Line += lines
// Update column.
if lines == 0 {
l.pos.Column += utf8.RuneCountInString(span)
} else {
l.pos.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):])
}
if rule.ignore {
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue
}
return lexer.Token{
Type: l.def.symbols[rule.Name],
Value: span,
Pos: pos,
}, nil
}
return lexer.EOFToken(l.pos), nil
}
func (l *Lexer) getPattern(candidate compiledRule) (*regexp.Regexp, error) | {
if candidate.RE != nil {
return candidate.RE, nil
}
// We don't have a compiled RE. This means there are back-references
// that need to be substituted first.
parent := l.stack[len(l.stack)-1]
key := candidate.Pattern + "\000" + strings.Join(parent.groups, "\000")
cached, ok := l.def.backrefCache.Load(key)
if ok {
return cached.(*regexp.Regexp), nil
}
var (
re *regexp.Regexp
err error
)
pattern := backrefReplace.ReplaceAllStringFunc(candidate.Pattern, func(s string) string {
var rematch = backrefReplace.FindStringSubmatch(s)
n, nerr := strconv.ParseInt(rematch[2], 10, 64)
if nerr != nil {
err = nerr
return s
}
if len(parent.groups) == 0 || int(n) >= len(parent.groups) {
err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(parent.groups))
return s
}
// concatenate the leading \\\\ which are already escaped to the quoted match.
return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(parent.groups[n])
})
if err == nil {
re, err = regexp.Compile("^(?:" + pattern + ")")
}
if err != nil {
return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err)
}
l.def.backrefCache.Store(key, re)
return re, nil
} | identifier_body | |
stateful.go | // Package stateful defines a nested stateful lexer.
//
// This lexer is based heavily on the approach used by Chroma (and Pygments).
//
// The lexer is a state machine defined by a map of rules keyed by state. Each rule
// is a named regex and optional operation to apply when the rule matches.
//
// As a convenience, any Rule starting with a lowercase letter will be elided from output.
//
// Lexing starts in the "Root" group. Each rule is matched in order, with the first
// successful match producing a lexeme. If the matching rule has an associated Action
// it will be executed. The name of each non-root rule is prefixed with the name
// of its group to yield the token identifier used during matching.
//
// A state change can be introduced with the Action `Push(state)`. `Pop()` will
// return to the previous state.
//
// To reuse rules from another state, use `Include(state)`.
//
// As a special case, regexes containing backrefs in the form \N (where N is a digit)
// will match the corresponding capture group from the immediate parent group. This
// can be used to parse, among other things, heredocs.
//
// See the README, example and tests in this package for details.
package stateful
import (
"errors"
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var (
backrefReplace = regexp.MustCompile(`(\\+)(\d)`)
)
// Option for modifying how the Lexer works.
type Option func(d *Definition)
// InitialState overrides the default initial state of "Root".
func InitialState(state string) Option {
return func(d *Definition) {
d.initialState = state
}
}
// A Rule matching input and possibly changing state.
type Rule struct {
Name string
Pattern string
Action Action
}
// Rules grouped by name.
type Rules map[string][]Rule
// compiledRule is a Rule with its pattern compiled.
type compiledRule struct {
Rule
ignore bool
RE *regexp.Regexp
}
// compiledRules grouped by name.
type compiledRules map[string][]compiledRule
// A Action is applied when a rule matches.
type Action interface {
// Actions are responsible for validating the match. ie. if they consumed any input.
applyAction(lexer *Lexer, groups []string) error
}
// RulesAction is an optional interface that Actions can implement.
//
// It is applied during rule construction to mutate the rule map.
type RulesAction interface {
applyRules(state string, rule int, rules compiledRules) error
}
// ActionPop pops to the previous state when the Rule matches. |
func (p ActionPop) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = lexer.stack[:len(lexer.stack)-1]
return nil
}
// Pop to the previous state.
func Pop() Action {
return ActionPop{}
}
// ReturnRule signals the lexer to return immediately.
var ReturnRule = Rule{"returnToParent", "", nil}
// Return to the parent state.
//
// Useful as the last rule in a sub-state.
func Return() Rule { return ReturnRule }
// ActionPush pushes the current state and switches to "State" when the Rule matches.
type ActionPush struct{ State string }
func (p ActionPush) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = append(lexer.stack, lexerState{name: p.State, groups: groups})
return nil
}
// Push to the given state.
//
// The target state will then be the set of rules used for matching
// until another Push or Pop is encountered.
func Push(state string) Action {
return ActionPush{state}
}
type include struct{ state string }
func (i include) applyAction(lexer *Lexer, groups []string) error { panic("should not be called") }
func (i include) applyRules(state string, rule int, rules compiledRules) error {
includedRules, ok := rules[i.state]
if !ok {
return fmt.Errorf("invalid include state %q", i.state)
}
clone := make([]compiledRule, len(includedRules))
copy(clone, includedRules)
rules[state] = append(rules[state][:rule], append(clone, rules[state][rule+1:]...)...) // nolint: makezero
return nil
}
// Include rules from another state in this one.
func Include(state string) Rule {
return Rule{Action: include{state}}
}
// Definition is the lexer.Definition.
type Definition struct {
rules compiledRules
symbols map[string]rune
// Map of key->*regexp.Regexp
backrefCache sync.Map
initialState string
}
// MustSimple creates a new lexer definition based on a single state described by `rules`.
// panics if the rules trigger an error
func MustSimple(rules []Rule, options ...Option) *Definition {
def, err := NewSimple(rules, options...)
if err != nil {
panic(err)
}
return def
}
// Must creates a new stateful lexer and panics if it is incorrect.
func Must(rules Rules, options ...Option) *Definition {
def, err := New(rules, options...)
if err != nil {
panic(err)
}
return def
}
// NewSimple creates a new stateful lexer with a single "Root" state.
func NewSimple(rules []Rule, options ...Option) (*Definition, error) {
return New(Rules{"Root": rules}, options...)
}
// New constructs a new stateful lexer from rules.
func New(rules Rules, options ...Option) (*Definition, error) {
compiled := compiledRules{}
for key, set := range rules {
for i, rule := range set {
pattern := "^(?:" + rule.Pattern + ")"
var (
re *regexp.Regexp
err error
)
var match = backrefReplace.FindStringSubmatch(rule.Pattern)
if match == nil || len(match[1])%2 == 0 {
re, err = regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("%s.%d: %s", key, i, err)
}
}
compiled[key] = append(compiled[key], compiledRule{
Rule: rule,
ignore: len(rule.Name) > 0 && unicode.IsLower(rune(rule.Name[0])),
RE: re,
})
}
}
restart:
for state, rules := range compiled {
for i, rule := range rules {
if action, ok := rule.Action.(RulesAction); ok {
if err := action.applyRules(state, i, compiled); err != nil {
return nil, fmt.Errorf("%s.%d: %s", state, i, err)
}
goto restart
}
}
}
keys := make([]string, 0, len(compiled))
for key := range compiled {
keys = append(keys, key)
}
symbols := map[string]rune{
"EOF": lexer.EOF,
}
sort.Strings(keys)
duplicates := map[string]compiledRule{}
rn := lexer.EOF - 1
for _, key := range keys {
for i, rule := range compiled[key] {
if dup, ok := duplicates[rule.Name]; ok && rule.Pattern != dup.Pattern {
panic(fmt.Sprintf("duplicate key %q with different patterns %q != %q", rule.Name, rule.Pattern, dup.Pattern))
}
duplicates[rule.Name] = rule
compiled[key][i] = rule
symbols[rule.Name] = rn
rn--
}
}
d := &Definition{
initialState: "Root",
rules: compiled,
symbols: symbols,
}
for _, option := range options {
option(d)
}
return d, nil
}
// Rules returns the user-provided Rules used to construct the lexer.
func (d *Definition) Rules() Rules {
out := Rules{}
for state, rules := range d.rules {
for _, rule := range rules {
out[state] = append(out[state], rule.Rule)
}
}
return out
}
func (d *Definition) LexString(filename string, s string) (lexer.Lexer, error) { // nolint: golint
return &Lexer{
def: d,
data: s,
stack: []lexerState{{name: d.initialState}},
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
}, nil
}
func (d *Definition) Lex(filename string, r io.Reader) (lexer.Lexer, error) { // nolint: golint
w := &strings.Builder{}
_, err := io.Copy(w, r)
if err != nil {
return nil, err
}
return d.LexString(filename, w.String())
}
func (d *Definition) Symbols() map[string]rune { // nolint: golint
return d.symbols
}
type lexerState struct {
name string
groups []string
}
// Lexer implementation.
type Lexer struct {
stack []lexerState
def *Definition
data string
pos lexer.Position
}
func (l *Lexer) Next() (lexer.Token, error) { // nolint: golint
parent := l.stack[len(l.stack)-1]
rules := l.def.rules[parent.name]
next:
for len(l.data) > 0 {
var (
rule *compiledRule
match []int
)
for _, candidate := range rules {
// Special case "Return()".
if candidate.Rule == ReturnRule {
l.stack = l.stack[:len(l.stack)-1]
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue next
}
re, err := l.getPattern(candidate)
if err != nil {
return lexer.Token{}, participle.Wrapf(l.pos, err, "rule %q", candidate.Name)
}
match = re.FindStringSubmatchIndex(l.data)
if match != nil {
rule = &candidate // nolint
break
}
}
if match == nil || rule == nil {
sample := []rune(l.data)
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample))
}
if rule.Action != nil {
groups := make([]string, 0, len(match)/2)
for i := 0; i < len(match); i += 2 {
groups = append(groups, l.data[match[i]:match[i+1]])
}
if err := rule.Action.applyAction(l, groups); err != nil {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q: %s", rule.Name, err)
}
} else if match[0] == match[1] {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q did not match any input", rule.Name)
}
span := l.data[match[0]:match[1]]
l.data = l.data[match[1]:]
// l.groups = groups
// Update position.
pos := l.pos
l.pos.Offset += match[1]
lines := strings.Count(span, "\n")
l.pos.Line += lines
// Update column.
if lines == 0 {
l.pos.Column += utf8.RuneCountInString(span)
} else {
l.pos.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):])
}
if rule.ignore {
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue
}
return lexer.Token{
Type: l.def.symbols[rule.Name],
Value: span,
Pos: pos,
}, nil
}
return lexer.EOFToken(l.pos), nil
}
func (l *Lexer) getPattern(candidate compiledRule) (*regexp.Regexp, error) {
if candidate.RE != nil {
return candidate.RE, nil
}
// We don't have a compiled RE. This means there are back-references
// that need to be substituted first.
parent := l.stack[len(l.stack)-1]
key := candidate.Pattern + "\000" + strings.Join(parent.groups, "\000")
cached, ok := l.def.backrefCache.Load(key)
if ok {
return cached.(*regexp.Regexp), nil
}
var (
re *regexp.Regexp
err error
)
pattern := backrefReplace.ReplaceAllStringFunc(candidate.Pattern, func(s string) string {
var rematch = backrefReplace.FindStringSubmatch(s)
n, nerr := strconv.ParseInt(rematch[2], 10, 64)
if nerr != nil {
err = nerr
return s
}
if len(parent.groups) == 0 || int(n) >= len(parent.groups) {
err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(parent.groups))
return s
}
// concatenate the leading \\\\ which are already escaped to the quoted match.
return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(parent.groups[n])
})
if err == nil {
re, err = regexp.Compile("^(?:" + pattern + ")")
}
if err != nil {
return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err)
}
l.def.backrefCache.Store(key, re)
return re, nil
} | type ActionPop struct{} | random_line_split |
stateful.go | // Package stateful defines a nested stateful lexer.
//
// This lexer is based heavily on the approach used by Chroma (and Pygments).
//
// The lexer is a state machine defined by a map of rules keyed by state. Each rule
// is a named regex and optional operation to apply when the rule matches.
//
// As a convenience, any Rule starting with a lowercase letter will be elided from output.
//
// Lexing starts in the "Root" group. Each rule is matched in order, with the first
// successful match producing a lexeme. If the matching rule has an associated Action
// it will be executed. The name of each non-root rule is prefixed with the name
// of its group to yield the token identifier used during matching.
//
// A state change can be introduced with the Action `Push(state)`. `Pop()` will
// return to the previous state.
//
// To reuse rules from another state, use `Include(state)`.
//
// As a special case, regexes containing backrefs in the form \N (where N is a digit)
// will match the corresponding capture group from the immediate parent group. This
// can be used to parse, among other things, heredocs.
//
// See the README, example and tests in this package for details.
package stateful
import (
"errors"
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var (
backrefReplace = regexp.MustCompile(`(\\+)(\d)`)
)
// Option for modifying how the Lexer works.
type Option func(d *Definition)
// InitialState overrides the default initial state of "Root".
func InitialState(state string) Option {
return func(d *Definition) {
d.initialState = state
}
}
// A Rule matching input and possibly changing state.
type Rule struct {
Name string
Pattern string
Action Action
}
// Rules grouped by name.
type Rules map[string][]Rule
// compiledRule is a Rule with its pattern compiled.
type compiledRule struct {
Rule
ignore bool
RE *regexp.Regexp
}
// compiledRules grouped by name.
type compiledRules map[string][]compiledRule
// A Action is applied when a rule matches.
type Action interface {
// Actions are responsible for validating the match. ie. if they consumed any input.
applyAction(lexer *Lexer, groups []string) error
}
// RulesAction is an optional interface that Actions can implement.
//
// It is applied during rule construction to mutate the rule map.
type RulesAction interface {
applyRules(state string, rule int, rules compiledRules) error
}
// ActionPop pops to the previous state when the Rule matches.
type ActionPop struct{}
func (p ActionPop) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = lexer.stack[:len(lexer.stack)-1]
return nil
}
// Pop to the previous state.
func Pop() Action {
return ActionPop{}
}
// ReturnRule signals the lexer to return immediately.
var ReturnRule = Rule{"returnToParent", "", nil}
// Return to the parent state.
//
// Useful as the last rule in a sub-state.
func Return() Rule { return ReturnRule }
// ActionPush pushes the current state and switches to "State" when the Rule matches.
type ActionPush struct{ State string }
func (p ActionPush) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = append(lexer.stack, lexerState{name: p.State, groups: groups})
return nil
}
// Push to the given state.
//
// The target state will then be the set of rules used for matching
// until another Push or Pop is encountered.
func Push(state string) Action {
return ActionPush{state}
}
type include struct{ state string }
func (i include) applyAction(lexer *Lexer, groups []string) error { panic("should not be called") }
func (i include) applyRules(state string, rule int, rules compiledRules) error {
includedRules, ok := rules[i.state]
if !ok {
return fmt.Errorf("invalid include state %q", i.state)
}
clone := make([]compiledRule, len(includedRules))
copy(clone, includedRules)
rules[state] = append(rules[state][:rule], append(clone, rules[state][rule+1:]...)...) // nolint: makezero
return nil
}
// Include rules from another state in this one.
func Include(state string) Rule {
return Rule{Action: include{state}}
}
// Definition is the lexer.Definition.
type Definition struct {
rules compiledRules
symbols map[string]rune
// Map of key->*regexp.Regexp
backrefCache sync.Map
initialState string
}
// MustSimple creates a new lexer definition based on a single state described by `rules`.
// panics if the rules trigger an error
func MustSimple(rules []Rule, options ...Option) *Definition {
def, err := NewSimple(rules, options...)
if err != nil {
panic(err)
}
return def
}
// Must creates a new stateful lexer and panics if it is incorrect.
func Must(rules Rules, options ...Option) *Definition {
def, err := New(rules, options...)
if err != nil {
panic(err)
}
return def
}
// NewSimple creates a new stateful lexer with a single "Root" state.
func NewSimple(rules []Rule, options ...Option) (*Definition, error) {
return New(Rules{"Root": rules}, options...)
}
// New constructs a new stateful lexer from rules.
func New(rules Rules, options ...Option) (*Definition, error) {
compiled := compiledRules{}
for key, set := range rules {
for i, rule := range set {
pattern := "^(?:" + rule.Pattern + ")"
var (
re *regexp.Regexp
err error
)
var match = backrefReplace.FindStringSubmatch(rule.Pattern)
if match == nil || len(match[1])%2 == 0 {
re, err = regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("%s.%d: %s", key, i, err)
}
}
compiled[key] = append(compiled[key], compiledRule{
Rule: rule,
ignore: len(rule.Name) > 0 && unicode.IsLower(rune(rule.Name[0])),
RE: re,
})
}
}
restart:
for state, rules := range compiled {
for i, rule := range rules {
if action, ok := rule.Action.(RulesAction); ok {
if err := action.applyRules(state, i, compiled); err != nil {
return nil, fmt.Errorf("%s.%d: %s", state, i, err)
}
goto restart
}
}
}
keys := make([]string, 0, len(compiled))
for key := range compiled {
keys = append(keys, key)
}
symbols := map[string]rune{
"EOF": lexer.EOF,
}
sort.Strings(keys)
duplicates := map[string]compiledRule{}
rn := lexer.EOF - 1
for _, key := range keys {
for i, rule := range compiled[key] {
if dup, ok := duplicates[rule.Name]; ok && rule.Pattern != dup.Pattern {
panic(fmt.Sprintf("duplicate key %q with different patterns %q != %q", rule.Name, rule.Pattern, dup.Pattern))
}
duplicates[rule.Name] = rule
compiled[key][i] = rule
symbols[rule.Name] = rn
rn--
}
}
d := &Definition{
initialState: "Root",
rules: compiled,
symbols: symbols,
}
for _, option := range options {
option(d)
}
return d, nil
}
// Rules returns the user-provided Rules used to construct the lexer.
func (d *Definition) Rules() Rules {
out := Rules{}
for state, rules := range d.rules {
for _, rule := range rules {
out[state] = append(out[state], rule.Rule)
}
}
return out
}
func (d *Definition) LexString(filename string, s string) (lexer.Lexer, error) { // nolint: golint
return &Lexer{
def: d,
data: s,
stack: []lexerState{{name: d.initialState}},
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
}, nil
}
func (d *Definition) Lex(filename string, r io.Reader) (lexer.Lexer, error) { // nolint: golint
w := &strings.Builder{}
_, err := io.Copy(w, r)
if err != nil {
return nil, err
}
return d.LexString(filename, w.String())
}
func (d *Definition) | () map[string]rune { // nolint: golint
return d.symbols
}
type lexerState struct {
name string
groups []string
}
// Lexer implementation.
type Lexer struct {
stack []lexerState
def *Definition
data string
pos lexer.Position
}
func (l *Lexer) Next() (lexer.Token, error) { // nolint: golint
parent := l.stack[len(l.stack)-1]
rules := l.def.rules[parent.name]
next:
for len(l.data) > 0 {
var (
rule *compiledRule
match []int
)
for _, candidate := range rules {
// Special case "Return()".
if candidate.Rule == ReturnRule {
l.stack = l.stack[:len(l.stack)-1]
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue next
}
re, err := l.getPattern(candidate)
if err != nil {
return lexer.Token{}, participle.Wrapf(l.pos, err, "rule %q", candidate.Name)
}
match = re.FindStringSubmatchIndex(l.data)
if match != nil {
rule = &candidate // nolint
break
}
}
if match == nil || rule == nil {
sample := []rune(l.data)
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample))
}
if rule.Action != nil {
groups := make([]string, 0, len(match)/2)
for i := 0; i < len(match); i += 2 {
groups = append(groups, l.data[match[i]:match[i+1]])
}
if err := rule.Action.applyAction(l, groups); err != nil {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q: %s", rule.Name, err)
}
} else if match[0] == match[1] {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q did not match any input", rule.Name)
}
span := l.data[match[0]:match[1]]
l.data = l.data[match[1]:]
// l.groups = groups
// Update position.
pos := l.pos
l.pos.Offset += match[1]
lines := strings.Count(span, "\n")
l.pos.Line += lines
// Update column.
if lines == 0 {
l.pos.Column += utf8.RuneCountInString(span)
} else {
l.pos.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):])
}
if rule.ignore {
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue
}
return lexer.Token{
Type: l.def.symbols[rule.Name],
Value: span,
Pos: pos,
}, nil
}
return lexer.EOFToken(l.pos), nil
}
func (l *Lexer) getPattern(candidate compiledRule) (*regexp.Regexp, error) {
if candidate.RE != nil {
return candidate.RE, nil
}
// We don't have a compiled RE. This means there are back-references
// that need to be substituted first.
parent := l.stack[len(l.stack)-1]
key := candidate.Pattern + "\000" + strings.Join(parent.groups, "\000")
cached, ok := l.def.backrefCache.Load(key)
if ok {
return cached.(*regexp.Regexp), nil
}
var (
re *regexp.Regexp
err error
)
pattern := backrefReplace.ReplaceAllStringFunc(candidate.Pattern, func(s string) string {
var rematch = backrefReplace.FindStringSubmatch(s)
n, nerr := strconv.ParseInt(rematch[2], 10, 64)
if nerr != nil {
err = nerr
return s
}
if len(parent.groups) == 0 || int(n) >= len(parent.groups) {
err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(parent.groups))
return s
}
// concatenate the leading \\\\ which are already escaped to the quoted match.
return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(parent.groups[n])
})
if err == nil {
re, err = regexp.Compile("^(?:" + pattern + ")")
}
if err != nil {
return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err)
}
l.def.backrefCache.Store(key, re)
return re, nil
}
| Symbols | identifier_name |
stateful.go | // Package stateful defines a nested stateful lexer.
//
// This lexer is based heavily on the approach used by Chroma (and Pygments).
//
// The lexer is a state machine defined by a map of rules keyed by state. Each rule
// is a named regex and optional operation to apply when the rule matches.
//
// As a convenience, any Rule starting with a lowercase letter will be elided from output.
//
// Lexing starts in the "Root" group. Each rule is matched in order, with the first
// successful match producing a lexeme. If the matching rule has an associated Action
// it will be executed. The name of each non-root rule is prefixed with the name
// of its group to yield the token identifier used during matching.
//
// A state change can be introduced with the Action `Push(state)`. `Pop()` will
// return to the previous state.
//
// To reuse rules from another state, use `Include(state)`.
//
// As a special case, regexes containing backrefs in the form \N (where N is a digit)
// will match the corresponding capture group from the immediate parent group. This
// can be used to parse, among other things, heredocs.
//
// See the README, example and tests in this package for details.
package stateful
import (
"errors"
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
"github.com/alecthomas/participle/v2"
"github.com/alecthomas/participle/v2/lexer"
)
var (
backrefReplace = regexp.MustCompile(`(\\+)(\d)`)
)
// Option for modifying how the Lexer works.
type Option func(d *Definition)
// InitialState overrides the default initial state of "Root".
func InitialState(state string) Option {
return func(d *Definition) {
d.initialState = state
}
}
// A Rule matching input and possibly changing state.
type Rule struct {
Name string
Pattern string
Action Action
}
// Rules grouped by name.
type Rules map[string][]Rule
// compiledRule is a Rule with its pattern compiled.
type compiledRule struct {
Rule
ignore bool
RE *regexp.Regexp
}
// compiledRules grouped by name.
type compiledRules map[string][]compiledRule
// A Action is applied when a rule matches.
type Action interface {
// Actions are responsible for validating the match. ie. if they consumed any input.
applyAction(lexer *Lexer, groups []string) error
}
// RulesAction is an optional interface that Actions can implement.
//
// It is applied during rule construction to mutate the rule map.
type RulesAction interface {
applyRules(state string, rule int, rules compiledRules) error
}
// ActionPop pops to the previous state when the Rule matches.
type ActionPop struct{}
func (p ActionPop) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" {
return errors.New("did not consume any input")
}
lexer.stack = lexer.stack[:len(lexer.stack)-1]
return nil
}
// Pop to the previous state.
func Pop() Action {
return ActionPop{}
}
// ReturnRule signals the lexer to return immediately.
var ReturnRule = Rule{"returnToParent", "", nil}
// Return to the parent state.
//
// Useful as the last rule in a sub-state.
func Return() Rule { return ReturnRule }
// ActionPush pushes the current state and switches to "State" when the Rule matches.
type ActionPush struct{ State string }
func (p ActionPush) applyAction(lexer *Lexer, groups []string) error {
if groups[0] == "" |
lexer.stack = append(lexer.stack, lexerState{name: p.State, groups: groups})
return nil
}
// Push to the given state.
//
// The target state will then be the set of rules used for matching
// until another Push or Pop is encountered.
func Push(state string) Action {
return ActionPush{state}
}
type include struct{ state string }
func (i include) applyAction(lexer *Lexer, groups []string) error { panic("should not be called") }
func (i include) applyRules(state string, rule int, rules compiledRules) error {
includedRules, ok := rules[i.state]
if !ok {
return fmt.Errorf("invalid include state %q", i.state)
}
clone := make([]compiledRule, len(includedRules))
copy(clone, includedRules)
rules[state] = append(rules[state][:rule], append(clone, rules[state][rule+1:]...)...) // nolint: makezero
return nil
}
// Include rules from another state in this one.
func Include(state string) Rule {
return Rule{Action: include{state}}
}
// Definition is the lexer.Definition.
type Definition struct {
rules compiledRules
symbols map[string]rune
// Map of key->*regexp.Regexp
backrefCache sync.Map
initialState string
}
// MustSimple creates a new lexer definition based on a single state described by `rules`.
// panics if the rules trigger an error
func MustSimple(rules []Rule, options ...Option) *Definition {
def, err := NewSimple(rules, options...)
if err != nil {
panic(err)
}
return def
}
// Must creates a new stateful lexer and panics if it is incorrect.
func Must(rules Rules, options ...Option) *Definition {
def, err := New(rules, options...)
if err != nil {
panic(err)
}
return def
}
// NewSimple creates a new stateful lexer with a single "Root" state.
func NewSimple(rules []Rule, options ...Option) (*Definition, error) {
return New(Rules{"Root": rules}, options...)
}
// New constructs a new stateful lexer from rules.
func New(rules Rules, options ...Option) (*Definition, error) {
compiled := compiledRules{}
for key, set := range rules {
for i, rule := range set {
pattern := "^(?:" + rule.Pattern + ")"
var (
re *regexp.Regexp
err error
)
var match = backrefReplace.FindStringSubmatch(rule.Pattern)
if match == nil || len(match[1])%2 == 0 {
re, err = regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("%s.%d: %s", key, i, err)
}
}
compiled[key] = append(compiled[key], compiledRule{
Rule: rule,
ignore: len(rule.Name) > 0 && unicode.IsLower(rune(rule.Name[0])),
RE: re,
})
}
}
restart:
for state, rules := range compiled {
for i, rule := range rules {
if action, ok := rule.Action.(RulesAction); ok {
if err := action.applyRules(state, i, compiled); err != nil {
return nil, fmt.Errorf("%s.%d: %s", state, i, err)
}
goto restart
}
}
}
keys := make([]string, 0, len(compiled))
for key := range compiled {
keys = append(keys, key)
}
symbols := map[string]rune{
"EOF": lexer.EOF,
}
sort.Strings(keys)
duplicates := map[string]compiledRule{}
rn := lexer.EOF - 1
for _, key := range keys {
for i, rule := range compiled[key] {
if dup, ok := duplicates[rule.Name]; ok && rule.Pattern != dup.Pattern {
panic(fmt.Sprintf("duplicate key %q with different patterns %q != %q", rule.Name, rule.Pattern, dup.Pattern))
}
duplicates[rule.Name] = rule
compiled[key][i] = rule
symbols[rule.Name] = rn
rn--
}
}
d := &Definition{
initialState: "Root",
rules: compiled,
symbols: symbols,
}
for _, option := range options {
option(d)
}
return d, nil
}
// Rules returns the user-provided Rules used to construct the lexer.
func (d *Definition) Rules() Rules {
out := Rules{}
for state, rules := range d.rules {
for _, rule := range rules {
out[state] = append(out[state], rule.Rule)
}
}
return out
}
func (d *Definition) LexString(filename string, s string) (lexer.Lexer, error) { // nolint: golint
return &Lexer{
def: d,
data: s,
stack: []lexerState{{name: d.initialState}},
pos: lexer.Position{
Filename: filename,
Line: 1,
Column: 1,
},
}, nil
}
func (d *Definition) Lex(filename string, r io.Reader) (lexer.Lexer, error) { // nolint: golint
w := &strings.Builder{}
_, err := io.Copy(w, r)
if err != nil {
return nil, err
}
return d.LexString(filename, w.String())
}
func (d *Definition) Symbols() map[string]rune { // nolint: golint
return d.symbols
}
type lexerState struct {
name string
groups []string
}
// Lexer implementation.
type Lexer struct {
stack []lexerState
def *Definition
data string
pos lexer.Position
}
func (l *Lexer) Next() (lexer.Token, error) { // nolint: golint
parent := l.stack[len(l.stack)-1]
rules := l.def.rules[parent.name]
next:
for len(l.data) > 0 {
var (
rule *compiledRule
match []int
)
for _, candidate := range rules {
// Special case "Return()".
if candidate.Rule == ReturnRule {
l.stack = l.stack[:len(l.stack)-1]
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue next
}
re, err := l.getPattern(candidate)
if err != nil {
return lexer.Token{}, participle.Wrapf(l.pos, err, "rule %q", candidate.Name)
}
match = re.FindStringSubmatchIndex(l.data)
if match != nil {
rule = &candidate // nolint
break
}
}
if match == nil || rule == nil {
sample := []rune(l.data)
if len(sample) > 16 {
sample = append(sample[:16], []rune("...")...)
}
return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", string(sample))
}
if rule.Action != nil {
groups := make([]string, 0, len(match)/2)
for i := 0; i < len(match); i += 2 {
groups = append(groups, l.data[match[i]:match[i+1]])
}
if err := rule.Action.applyAction(l, groups); err != nil {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q: %s", rule.Name, err)
}
} else if match[0] == match[1] {
return lexer.Token{}, participle.Errorf(l.pos, "rule %q did not match any input", rule.Name)
}
span := l.data[match[0]:match[1]]
l.data = l.data[match[1]:]
// l.groups = groups
// Update position.
pos := l.pos
l.pos.Offset += match[1]
lines := strings.Count(span, "\n")
l.pos.Line += lines
// Update column.
if lines == 0 {
l.pos.Column += utf8.RuneCountInString(span)
} else {
l.pos.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):])
}
if rule.ignore {
parent = l.stack[len(l.stack)-1]
rules = l.def.rules[parent.name]
continue
}
return lexer.Token{
Type: l.def.symbols[rule.Name],
Value: span,
Pos: pos,
}, nil
}
return lexer.EOFToken(l.pos), nil
}
func (l *Lexer) getPattern(candidate compiledRule) (*regexp.Regexp, error) {
if candidate.RE != nil {
return candidate.RE, nil
}
// We don't have a compiled RE. This means there are back-references
// that need to be substituted first.
parent := l.stack[len(l.stack)-1]
key := candidate.Pattern + "\000" + strings.Join(parent.groups, "\000")
cached, ok := l.def.backrefCache.Load(key)
if ok {
return cached.(*regexp.Regexp), nil
}
var (
re *regexp.Regexp
err error
)
pattern := backrefReplace.ReplaceAllStringFunc(candidate.Pattern, func(s string) string {
var rematch = backrefReplace.FindStringSubmatch(s)
n, nerr := strconv.ParseInt(rematch[2], 10, 64)
if nerr != nil {
err = nerr
return s
}
if len(parent.groups) == 0 || int(n) >= len(parent.groups) {
err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(parent.groups))
return s
}
// concatenate the leading \\\\ which are already escaped to the quoted match.
return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(parent.groups[n])
})
if err == nil {
re, err = regexp.Compile("^(?:" + pattern + ")")
}
if err != nil {
return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err)
}
l.def.backrefCache.Store(key, re)
return re, nil
}
| {
return errors.New("did not consume any input")
} | conditional_block |
gaiatools.py | import numpy as np
import pandas as pd
from scipy import interpolate
import warnings
import gala.integrate as gi
import gala.dynamics as gd
import gala.potential as gp
from gala.units import galactic
from astropy import coordinates as coord
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import table
import gaia_tools.load as gload
from pyia import GaiaData
def load_tgas():
"""
Creates pyia.GaiaData object from TGAS (a subclass of pandas DataFrame)
"""
tgas = GaiaData(gload.tgas())
return tgas
##############################################
## Columns from Gaia DR2 data model
## https://www.cosmos.esa.int/documents/29201/1645651/GDR2_DataModel_draft.pdf/938f48a2-a08d-b63c-67e7-eae778c9a657
##############################################
cols_astrometry = "ra,dec,parallax,pmra,pmdec"
ecol_astrometry = "ra_error,dec_error,parallax_error,parallax_over_error,"+\
"pmra_error,pmdec_error,ra_dec_corr,ra_parallax_corr,ra_pmra_corr,"+\
"ra_pmdec_corr,dec_parallax_corr,dec_pmra_corr,dec_pmdec_corr,parallax_pmra_corr,"+\
"parallax_pmdec_corr,pmra_pmdec_corr,duplicated_source"
qual_astrometry = "astrometric_n_obs_al,astrometric_n_obs_ac,astrometric_n_good_obs_al,astrometric_n_bad_obs_al,"+\
"astrometric_gof_al,astrometric_chi2_al,astrometric_excess_noise,astrometric_excess_noise_sig,"+\
"astrometric_params_solved,astrometric_primary_flag,astrometric_weight_al,"+\
"astrometric_pseudo_colour,astrometric_pseudo_colour_error,"+\
"mean_varpi_factor_al,astrometric_matched_observations,visibility_periods_used,"+\
"astrometric_sigma5d_max,frame_rotator_object_type,matched_observations"
cols_phot = "phot_g_mean_mag,phot_bp_mean_mag,phot_rp_mean_mag,phot_variable_flag"
ecol_phot = "phot_g_mean_flux,phot_bp_mean_flux,phot_rp_mean_flux,"+\
"phot_g_mean_flux_error,phot_g_mean_flux_over_error,"+\
"phot_bp_mean_flux_error,phot_bp_mean_flux_over_error,"+\
"phot_rp_mean_flux_error,phot_rp_mean_flux_over_error"
qual_phot = "phot_g_n_obs,phot_bp_n_obs,phot_rp_n_obs,phot_bp_rp_excess_factor,phot_proc_mode"
cols_redd = "bp_rp,bp_g,g_rp,a_g_val,e_bp_min_rp_val,"+\
"a_g_percentile_lower,a_g_percentile_upper,"+\
"e_bp_min_rp_percentile_lower,e_bp_min_rp_percentile_upper"
cols_spec = "radial_velocity,radial_velocity_error"
qual_spec = "rv_template_teff,rv_template_logg,rv_template_fe_h,rv_nb_transits"
cols_star = "teff_val,radius_val,lum_val"
ecol_star = "teff_percentile_lower,teff_percentile_upper,"+\
"radius_percentile_lower,radius_percentile_upper,"+\
"lum_percentile_lower,lum_percentile_upper"
cols_rave = ""
ecol_rave = ""
all_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry, qual_astrometry,
cols_phot, ecol_phot, qual_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a full set of things that I think will be useful
full_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry,
cols_phot, ecol_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a minimal set of things that I think will be useful
default_columns = ",".join(["source_id",cols_astrometry,ecol_astrometry,
cols_phot, cols_spec, cols_star])
def create_source_query_from_ids(ids, columns=default_columns,
source="gaiaedr3.gaia_source"):
out = "SELECT {} FROM {} WHERE ".format(
columns, source)
idstrs = " or ".join(["source_id = {}".format(x) for x in ids])
out += idstrs
return out
def create_source_query_from(coords, radius=1*u.arcsec,
columns=default_columns,
source="gaiaedr3.gaia_source",
Nmax=None):
"""
Generate a string selecting specific list of coordinates.
Built from https://gist.github.com/mfouesneau/b6b25ed645eab9da4710153fcf9a4cb8
"""
N = len(coords)
if Nmax is None: Nmax = 2*N
out = "SELECT TOP {} {} FROM {} WHERE ".format(
Nmax, columns, source)
def _make_contains_str(c):
cstr = "CONTAINS(POINT('ICRS',{0:}.ra,{0:}.dec),CIRCLE('ICRS',{1:},{2:},{3:}))=1".format(
source, c.ra.deg, c.dec.deg, radius.to("deg").value)
return cstr
cstrs = map(_make_contains_str, coords)
out += " or ".join(cstrs)
return out
def create_samples(Nsamp,mu,cov):
Nstars,Nparams = mu.shape
assert Nstars == len(cov)
assert Nparams == cov.shape[1]
output = np.zeros((Nsamp*Nstars, Nparams))
for i in range(Nstars):
i1 = Nsamp*i
i2 = Nsamp*(i+1)
output[i1:i2,:] = np.random.multivariate_normal(mu[i,:],cov[i,:,:],Nsamp)
output = output.reshape(Nstars, Nsamp, Nparams)
return output
def get_gc_frame():
v_sun = coord.CartesianDifferential([11.1, 250, 7.25]*u.km/u.s)
#gc_frame = coord.Galactocentric(galcen_distance=8.3*u.kpc,
# z_sun=0*u.pc,
# galcen_v_sun=v_sun)
gc_frame = coord.Galactocentric()
return gc_frame
def get_gccoo_w0(coo):
gc_frame = get_gc_frame()
gccoo = coo.transform_to(gc_frame)
w0 = gd.PhaseSpacePosition(gccoo.data)
return gccoo, w0
def get_orbit_params(orbits):
N = orbits.shape[1]
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
rp, ra = orbit.pericenter(), orbit.apocenter()
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def get_orbit_params_fast(orbits):
try:
N = orbits.shape[1]
except IndexError:
orbit = orbits
r = np.sqrt(np.sum(orbits.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
return u.Quantity(rp), u.Quantity(ra), u.Quantity((ra-rp)/(ra+rp))
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
r = np.sqrt(np.sum(orbit.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def calc_vtan_error(pmra, pmdec, parallax):
d = u.kpc / parallax.value
pmra = pmra.to(u.rad/u.yr, u.dimensionless_angles())
pmdec= pmdec.to(u.rad/u.yr, u.dimensionless_angles())
vtan = d * np.sqrt(pmra**2 + pmdec**2)
vtan = vtan.to(u.km/u.s, u.dimensionless_angles())
return vtan
def avgstd(x,ignore_nan=False, axis=None):
mean = np.nanmean if ignore_nan else np.mean
stdev = np.nanstd if ignore_nan else np.std
kws = {}
if axis is not None: kws['axis'] = axis
mu = mean(x,**kws)
sig = stdev(x,**kws)
return np.vstack([mu,sig]).T
def medscat(x,sigma=2,ignore_nan=False, axis=None, for_errorbar_plot=False):
percentile = np.nanpercentile if ignore_nan else np.percentile
pdict = {1:[16,50,84],2:[5,50,95],3:[.1,50,99.9]}
assert sigma in pdict
kws = {}
if axis is not None: kws['axis'] = axis
p1,p2,p3 = percentile(x, pdict[sigma], **kws)
e1 = p1-p2
e2 = p3-p2
if for_errorbar_plot:
e1 = -e1
return p2, np.stack([e1,e2])
return np.stack([e1,p2,e2])
def modefinder(x, bins="auto", dropna=True):
"""
Estimates the mode of a sample of points.
Assumes a unimodal system.
Take a histogram of the data and return the bin with the largest value.
TODO If an initial value is specified, find the local maximum closest to that value.
"""
if dropna: |
h,x = np.histogram(x, bins=bins)
xm = (x[1:]+x[:-1])/2.
ix = np.argmax(h)
return xm[ix]
def get_finite(x,y):
""" Get x and y that are both finite """
finite = np.logical_and(np.isfinite(x), np.isfinite(y))
xf = x[finite]; yf = y[finite]
return xf, yf
def fit_spline(x, y, **kwargs):
""" A simple wrapper to scipy.interpolate.UnivariateSpline (remove nan, sort x) """
xf, yf = get_finite(x,y)
iisort = np.argsort(xf)
return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)
def bin_medscat(x, y, percentiles=[5,50,95], for_errorbar_plot=False, dropna=True, bins="auto", **kwargs):
"""
Histogram x into bins.
Then in those bins, take percentiles of y.
"""
if dropna: x, y = get_finite(x, y)
h, xe = np.histogram(x, bins=bins, **kwargs)
xout = (xe[1:]+xe[:-1])/2.
indices = np.digitize(x, xe)
yout = np.zeros((len(xe)-1,len(percentiles)))+np.nan
for ix in np.unique(indices):
# Skip things outside the bin range
if ix >= len(yout): continue
# Percentile in this bin
ii = ix==indices
yout[ix,:] = np.percentile(y[ii], percentiles)
if for_errorbar_plot:
e1 = yout[:,1] - yout[:,0]
e2 = yout[:,2] - yout[:,1]
return xout, yout[:,1], [e1,e2]
return xout, yout
def calculate_actions(w0,pot=gp.MilkyWayPotential(), dt=0.5, n_steps=10000, full_output=False):
""" Approximate actions following https://github.com/adrn/gala/blob/master/docs/dynamics/actionangle.rst """
assert len(w0.shape)==0
w = gp.Hamiltonian(pot).integrate_orbit(w0, dt=dt, n_steps=n_steps)
toy_potential = gd.fit_isochrone(w)
toy_actions, toy_angles, toy_freqs = toy_potential.action_angle(w)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
result = gd.find_actions(w, N_max=8, toy_potential=toy_potential)
if full_output: return result, w
return result["actions"]
def query_and_match(coo, match_radius=1, columns=full_columns):
"""
Query gaia given coordinates
Return a table that is sorted, and an array saying which rows actually matched an object in gaia
"""
from pyia import GaiaDataNew
query = create_source_query_from(coo, columns=columns)
gaia = GaiaDataNew.from_query(query)
gcoo = SkyCoord(gaia.ra, gaia.dec)
idx, d2d, _ = coo.match_to_catalog_sky(gcoo)
iimatch = d2d.arcsec < match_radius
gtab = gaia.data[idx]
if iimatch.sum() != len(gtab):
print("Warning: only matched {}/{} stars".format(iimatch.sum(),len(gtab)))
return gtab, iimatch
def query_and_match_sourceid(source_ids, match_radius=1, columns=full_columns):
"""
Query gaia given source_ids
Return a table in the order of the source_ids
"""
from pyia import GaiaDataNew
unique_arr, indexes = np.unique(source_ids, return_inverse=True)
assert len(unique_arr) == len(source_ids), "Not all IDs are unique"
query = create_source_query_from_ids(source_ids, columns=columns)
gaia = GaiaDataNew.from_query(query)
# Sort by source id, find indices, then resort
gdat = gaia.data
gdat.sort("source_id")
assert np.all(unique_arr == gdat["source_id"])
gdat = gdat[indexes]
assert np.all(gdat["source_id"]==source_ids)
return gdat
| x = x[np.isfinite(x)] | conditional_block |
gaiatools.py | import numpy as np
import pandas as pd
from scipy import interpolate
import warnings
import gala.integrate as gi
import gala.dynamics as gd
import gala.potential as gp
from gala.units import galactic
from astropy import coordinates as coord
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import table
import gaia_tools.load as gload
from pyia import GaiaData
def load_tgas():
"""
Creates pyia.GaiaData object from TGAS (a subclass of pandas DataFrame)
"""
tgas = GaiaData(gload.tgas())
return tgas
##############################################
## Columns from Gaia DR2 data model
## https://www.cosmos.esa.int/documents/29201/1645651/GDR2_DataModel_draft.pdf/938f48a2-a08d-b63c-67e7-eae778c9a657
##############################################
cols_astrometry = "ra,dec,parallax,pmra,pmdec"
ecol_astrometry = "ra_error,dec_error,parallax_error,parallax_over_error,"+\
"pmra_error,pmdec_error,ra_dec_corr,ra_parallax_corr,ra_pmra_corr,"+\
"ra_pmdec_corr,dec_parallax_corr,dec_pmra_corr,dec_pmdec_corr,parallax_pmra_corr,"+\
"parallax_pmdec_corr,pmra_pmdec_corr,duplicated_source"
qual_astrometry = "astrometric_n_obs_al,astrometric_n_obs_ac,astrometric_n_good_obs_al,astrometric_n_bad_obs_al,"+\
"astrometric_gof_al,astrometric_chi2_al,astrometric_excess_noise,astrometric_excess_noise_sig,"+\
"astrometric_params_solved,astrometric_primary_flag,astrometric_weight_al,"+\
"astrometric_pseudo_colour,astrometric_pseudo_colour_error,"+\
"mean_varpi_factor_al,astrometric_matched_observations,visibility_periods_used,"+\
"astrometric_sigma5d_max,frame_rotator_object_type,matched_observations"
cols_phot = "phot_g_mean_mag,phot_bp_mean_mag,phot_rp_mean_mag,phot_variable_flag"
ecol_phot = "phot_g_mean_flux,phot_bp_mean_flux,phot_rp_mean_flux,"+\
"phot_g_mean_flux_error,phot_g_mean_flux_over_error,"+\
"phot_bp_mean_flux_error,phot_bp_mean_flux_over_error,"+\
"phot_rp_mean_flux_error,phot_rp_mean_flux_over_error"
qual_phot = "phot_g_n_obs,phot_bp_n_obs,phot_rp_n_obs,phot_bp_rp_excess_factor,phot_proc_mode"
cols_redd = "bp_rp,bp_g,g_rp,a_g_val,e_bp_min_rp_val,"+\
"a_g_percentile_lower,a_g_percentile_upper,"+\
"e_bp_min_rp_percentile_lower,e_bp_min_rp_percentile_upper"
cols_spec = "radial_velocity,radial_velocity_error"
qual_spec = "rv_template_teff,rv_template_logg,rv_template_fe_h,rv_nb_transits"
cols_star = "teff_val,radius_val,lum_val"
ecol_star = "teff_percentile_lower,teff_percentile_upper,"+\
"radius_percentile_lower,radius_percentile_upper,"+\
"lum_percentile_lower,lum_percentile_upper"
cols_rave = ""
ecol_rave = ""
all_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry, qual_astrometry,
cols_phot, ecol_phot, qual_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a full set of things that I think will be useful
full_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry,
cols_phot, ecol_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a minimal set of things that I think will be useful
default_columns = ",".join(["source_id",cols_astrometry,ecol_astrometry,
cols_phot, cols_spec, cols_star])
def create_source_query_from_ids(ids, columns=default_columns,
source="gaiaedr3.gaia_source"):
out = "SELECT {} FROM {} WHERE ".format(
columns, source)
idstrs = " or ".join(["source_id = {}".format(x) for x in ids])
out += idstrs
return out
def create_source_query_from(coords, radius=1*u.arcsec,
columns=default_columns,
source="gaiaedr3.gaia_source",
Nmax=None):
"""
Generate a string selecting specific list of coordinates.
Built from https://gist.github.com/mfouesneau/b6b25ed645eab9da4710153fcf9a4cb8
"""
N = len(coords)
if Nmax is None: Nmax = 2*N
out = "SELECT TOP {} {} FROM {} WHERE ".format(
Nmax, columns, source)
def | (c):
cstr = "CONTAINS(POINT('ICRS',{0:}.ra,{0:}.dec),CIRCLE('ICRS',{1:},{2:},{3:}))=1".format(
source, c.ra.deg, c.dec.deg, radius.to("deg").value)
return cstr
cstrs = map(_make_contains_str, coords)
out += " or ".join(cstrs)
return out
def create_samples(Nsamp,mu,cov):
Nstars,Nparams = mu.shape
assert Nstars == len(cov)
assert Nparams == cov.shape[1]
output = np.zeros((Nsamp*Nstars, Nparams))
for i in range(Nstars):
i1 = Nsamp*i
i2 = Nsamp*(i+1)
output[i1:i2,:] = np.random.multivariate_normal(mu[i,:],cov[i,:,:],Nsamp)
output = output.reshape(Nstars, Nsamp, Nparams)
return output
def get_gc_frame():
v_sun = coord.CartesianDifferential([11.1, 250, 7.25]*u.km/u.s)
#gc_frame = coord.Galactocentric(galcen_distance=8.3*u.kpc,
# z_sun=0*u.pc,
# galcen_v_sun=v_sun)
gc_frame = coord.Galactocentric()
return gc_frame
def get_gccoo_w0(coo):
gc_frame = get_gc_frame()
gccoo = coo.transform_to(gc_frame)
w0 = gd.PhaseSpacePosition(gccoo.data)
return gccoo, w0
def get_orbit_params(orbits):
N = orbits.shape[1]
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
rp, ra = orbit.pericenter(), orbit.apocenter()
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def get_orbit_params_fast(orbits):
try:
N = orbits.shape[1]
except IndexError:
orbit = orbits
r = np.sqrt(np.sum(orbits.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
return u.Quantity(rp), u.Quantity(ra), u.Quantity((ra-rp)/(ra+rp))
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
r = np.sqrt(np.sum(orbit.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def calc_vtan_error(pmra, pmdec, parallax):
d = u.kpc / parallax.value
pmra = pmra.to(u.rad/u.yr, u.dimensionless_angles())
pmdec= pmdec.to(u.rad/u.yr, u.dimensionless_angles())
vtan = d * np.sqrt(pmra**2 + pmdec**2)
vtan = vtan.to(u.km/u.s, u.dimensionless_angles())
return vtan
def avgstd(x,ignore_nan=False, axis=None):
mean = np.nanmean if ignore_nan else np.mean
stdev = np.nanstd if ignore_nan else np.std
kws = {}
if axis is not None: kws['axis'] = axis
mu = mean(x,**kws)
sig = stdev(x,**kws)
return np.vstack([mu,sig]).T
def medscat(x,sigma=2,ignore_nan=False, axis=None, for_errorbar_plot=False):
percentile = np.nanpercentile if ignore_nan else np.percentile
pdict = {1:[16,50,84],2:[5,50,95],3:[.1,50,99.9]}
assert sigma in pdict
kws = {}
if axis is not None: kws['axis'] = axis
p1,p2,p3 = percentile(x, pdict[sigma], **kws)
e1 = p1-p2
e2 = p3-p2
if for_errorbar_plot:
e1 = -e1
return p2, np.stack([e1,e2])
return np.stack([e1,p2,e2])
def modefinder(x, bins="auto", dropna=True):
"""
Estimates the mode of a sample of points.
Assumes a unimodal system.
Take a histogram of the data and return the bin with the largest value.
TODO If an initial value is specified, find the local maximum closest to that value.
"""
if dropna: x = x[np.isfinite(x)]
h,x = np.histogram(x, bins=bins)
xm = (x[1:]+x[:-1])/2.
ix = np.argmax(h)
return xm[ix]
def get_finite(x,y):
""" Get x and y that are both finite """
finite = np.logical_and(np.isfinite(x), np.isfinite(y))
xf = x[finite]; yf = y[finite]
return xf, yf
def fit_spline(x, y, **kwargs):
""" A simple wrapper to scipy.interpolate.UnivariateSpline (remove nan, sort x) """
xf, yf = get_finite(x,y)
iisort = np.argsort(xf)
return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)
def bin_medscat(x, y, percentiles=[5,50,95], for_errorbar_plot=False, dropna=True, bins="auto", **kwargs):
"""
Histogram x into bins.
Then in those bins, take percentiles of y.
"""
if dropna: x, y = get_finite(x, y)
h, xe = np.histogram(x, bins=bins, **kwargs)
xout = (xe[1:]+xe[:-1])/2.
indices = np.digitize(x, xe)
yout = np.zeros((len(xe)-1,len(percentiles)))+np.nan
for ix in np.unique(indices):
# Skip things outside the bin range
if ix >= len(yout): continue
# Percentile in this bin
ii = ix==indices
yout[ix,:] = np.percentile(y[ii], percentiles)
if for_errorbar_plot:
e1 = yout[:,1] - yout[:,0]
e2 = yout[:,2] - yout[:,1]
return xout, yout[:,1], [e1,e2]
return xout, yout
def calculate_actions(w0,pot=gp.MilkyWayPotential(), dt=0.5, n_steps=10000, full_output=False):
""" Approximate actions following https://github.com/adrn/gala/blob/master/docs/dynamics/actionangle.rst """
assert len(w0.shape)==0
w = gp.Hamiltonian(pot).integrate_orbit(w0, dt=dt, n_steps=n_steps)
toy_potential = gd.fit_isochrone(w)
toy_actions, toy_angles, toy_freqs = toy_potential.action_angle(w)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
result = gd.find_actions(w, N_max=8, toy_potential=toy_potential)
if full_output: return result, w
return result["actions"]
def query_and_match(coo, match_radius=1, columns=full_columns):
"""
Query gaia given coordinates
Return a table that is sorted, and an array saying which rows actually matched an object in gaia
"""
from pyia import GaiaDataNew
query = create_source_query_from(coo, columns=columns)
gaia = GaiaDataNew.from_query(query)
gcoo = SkyCoord(gaia.ra, gaia.dec)
idx, d2d, _ = coo.match_to_catalog_sky(gcoo)
iimatch = d2d.arcsec < match_radius
gtab = gaia.data[idx]
if iimatch.sum() != len(gtab):
print("Warning: only matched {}/{} stars".format(iimatch.sum(),len(gtab)))
return gtab, iimatch
def query_and_match_sourceid(source_ids, match_radius=1, columns=full_columns):
"""
Query gaia given source_ids
Return a table in the order of the source_ids
"""
from pyia import GaiaDataNew
unique_arr, indexes = np.unique(source_ids, return_inverse=True)
assert len(unique_arr) == len(source_ids), "Not all IDs are unique"
query = create_source_query_from_ids(source_ids, columns=columns)
gaia = GaiaDataNew.from_query(query)
# Sort by source id, find indices, then resort
gdat = gaia.data
gdat.sort("source_id")
assert np.all(unique_arr == gdat["source_id"])
gdat = gdat[indexes]
assert np.all(gdat["source_id"]==source_ids)
return gdat
| _make_contains_str | identifier_name |
gaiatools.py | import numpy as np
import pandas as pd
from scipy import interpolate
import warnings
import gala.integrate as gi
import gala.dynamics as gd
import gala.potential as gp
from gala.units import galactic
from astropy import coordinates as coord
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import table
import gaia_tools.load as gload
from pyia import GaiaData
def load_tgas():
"""
Creates pyia.GaiaData object from TGAS (a subclass of pandas DataFrame)
"""
tgas = GaiaData(gload.tgas())
return tgas
##############################################
## Columns from Gaia DR2 data model
## https://www.cosmos.esa.int/documents/29201/1645651/GDR2_DataModel_draft.pdf/938f48a2-a08d-b63c-67e7-eae778c9a657
##############################################
cols_astrometry = "ra,dec,parallax,pmra,pmdec"
ecol_astrometry = "ra_error,dec_error,parallax_error,parallax_over_error,"+\
"pmra_error,pmdec_error,ra_dec_corr,ra_parallax_corr,ra_pmra_corr,"+\
"ra_pmdec_corr,dec_parallax_corr,dec_pmra_corr,dec_pmdec_corr,parallax_pmra_corr,"+\
"parallax_pmdec_corr,pmra_pmdec_corr,duplicated_source"
qual_astrometry = "astrometric_n_obs_al,astrometric_n_obs_ac,astrometric_n_good_obs_al,astrometric_n_bad_obs_al,"+\
"astrometric_gof_al,astrometric_chi2_al,astrometric_excess_noise,astrometric_excess_noise_sig,"+\
"astrometric_params_solved,astrometric_primary_flag,astrometric_weight_al,"+\
"astrometric_pseudo_colour,astrometric_pseudo_colour_error,"+\
"mean_varpi_factor_al,astrometric_matched_observations,visibility_periods_used,"+\
"astrometric_sigma5d_max,frame_rotator_object_type,matched_observations"
cols_phot = "phot_g_mean_mag,phot_bp_mean_mag,phot_rp_mean_mag,phot_variable_flag"
ecol_phot = "phot_g_mean_flux,phot_bp_mean_flux,phot_rp_mean_flux,"+\
"phot_g_mean_flux_error,phot_g_mean_flux_over_error,"+\
"phot_bp_mean_flux_error,phot_bp_mean_flux_over_error,"+\
"phot_rp_mean_flux_error,phot_rp_mean_flux_over_error"
qual_phot = "phot_g_n_obs,phot_bp_n_obs,phot_rp_n_obs,phot_bp_rp_excess_factor,phot_proc_mode"
cols_redd = "bp_rp,bp_g,g_rp,a_g_val,e_bp_min_rp_val,"+\
"a_g_percentile_lower,a_g_percentile_upper,"+\
"e_bp_min_rp_percentile_lower,e_bp_min_rp_percentile_upper"
cols_spec = "radial_velocity,radial_velocity_error"
qual_spec = "rv_template_teff,rv_template_logg,rv_template_fe_h,rv_nb_transits"
cols_star = "teff_val,radius_val,lum_val"
ecol_star = "teff_percentile_lower,teff_percentile_upper,"+\
"radius_percentile_lower,radius_percentile_upper,"+\
"lum_percentile_lower,lum_percentile_upper"
cols_rave = ""
ecol_rave = ""
all_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry, qual_astrometry,
cols_phot, ecol_phot, qual_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a full set of things that I think will be useful
full_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry,
cols_phot, ecol_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a minimal set of things that I think will be useful
default_columns = ",".join(["source_id",cols_astrometry,ecol_astrometry,
cols_phot, cols_spec, cols_star])
def create_source_query_from_ids(ids, columns=default_columns,
source="gaiaedr3.gaia_source"):
out = "SELECT {} FROM {} WHERE ".format(
columns, source)
idstrs = " or ".join(["source_id = {}".format(x) for x in ids])
out += idstrs
return out
def create_source_query_from(coords, radius=1*u.arcsec,
columns=default_columns,
source="gaiaedr3.gaia_source",
Nmax=None):
"""
Generate a string selecting specific list of coordinates.
Built from https://gist.github.com/mfouesneau/b6b25ed645eab9da4710153fcf9a4cb8
"""
N = len(coords)
if Nmax is None: Nmax = 2*N
out = "SELECT TOP {} {} FROM {} WHERE ".format(
Nmax, columns, source)
def _make_contains_str(c):
cstr = "CONTAINS(POINT('ICRS',{0:}.ra,{0:}.dec),CIRCLE('ICRS',{1:},{2:},{3:}))=1".format(
source, c.ra.deg, c.dec.deg, radius.to("deg").value)
return cstr
cstrs = map(_make_contains_str, coords)
out += " or ".join(cstrs)
return out
def create_samples(Nsamp,mu,cov):
Nstars,Nparams = mu.shape
assert Nstars == len(cov)
assert Nparams == cov.shape[1]
output = np.zeros((Nsamp*Nstars, Nparams))
for i in range(Nstars):
i1 = Nsamp*i
i2 = Nsamp*(i+1)
output[i1:i2,:] = np.random.multivariate_normal(mu[i,:],cov[i,:,:],Nsamp)
output = output.reshape(Nstars, Nsamp, Nparams)
return output
def get_gc_frame():
v_sun = coord.CartesianDifferential([11.1, 250, 7.25]*u.km/u.s)
#gc_frame = coord.Galactocentric(galcen_distance=8.3*u.kpc,
# z_sun=0*u.pc,
# galcen_v_sun=v_sun)
gc_frame = coord.Galactocentric()
return gc_frame
def get_gccoo_w0(coo):
gc_frame = get_gc_frame()
gccoo = coo.transform_to(gc_frame)
w0 = gd.PhaseSpacePosition(gccoo.data)
return gccoo, w0
def get_orbit_params(orbits):
N = orbits.shape[1]
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
rp, ra = orbit.pericenter(), orbit.apocenter()
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def get_orbit_params_fast(orbits):
try:
N = orbits.shape[1]
except IndexError:
orbit = orbits
r = np.sqrt(np.sum(orbits.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
return u.Quantity(rp), u.Quantity(ra), u.Quantity((ra-rp)/(ra+rp))
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
r = np.sqrt(np.sum(orbit.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def calc_vtan_error(pmra, pmdec, parallax):
d = u.kpc / parallax.value
pmra = pmra.to(u.rad/u.yr, u.dimensionless_angles())
pmdec= pmdec.to(u.rad/u.yr, u.dimensionless_angles())
vtan = d * np.sqrt(pmra**2 + pmdec**2)
vtan = vtan.to(u.km/u.s, u.dimensionless_angles())
return vtan
| stdev = np.nanstd if ignore_nan else np.std
kws = {}
if axis is not None: kws['axis'] = axis
mu = mean(x,**kws)
sig = stdev(x,**kws)
return np.vstack([mu,sig]).T
def medscat(x,sigma=2,ignore_nan=False, axis=None, for_errorbar_plot=False):
percentile = np.nanpercentile if ignore_nan else np.percentile
pdict = {1:[16,50,84],2:[5,50,95],3:[.1,50,99.9]}
assert sigma in pdict
kws = {}
if axis is not None: kws['axis'] = axis
p1,p2,p3 = percentile(x, pdict[sigma], **kws)
e1 = p1-p2
e2 = p3-p2
if for_errorbar_plot:
e1 = -e1
return p2, np.stack([e1,e2])
return np.stack([e1,p2,e2])
def modefinder(x, bins="auto", dropna=True):
"""
Estimates the mode of a sample of points.
Assumes a unimodal system.
Take a histogram of the data and return the bin with the largest value.
TODO If an initial value is specified, find the local maximum closest to that value.
"""
if dropna: x = x[np.isfinite(x)]
h,x = np.histogram(x, bins=bins)
xm = (x[1:]+x[:-1])/2.
ix = np.argmax(h)
return xm[ix]
def get_finite(x,y):
""" Get x and y that are both finite """
finite = np.logical_and(np.isfinite(x), np.isfinite(y))
xf = x[finite]; yf = y[finite]
return xf, yf
def fit_spline(x, y, **kwargs):
""" A simple wrapper to scipy.interpolate.UnivariateSpline (remove nan, sort x) """
xf, yf = get_finite(x,y)
iisort = np.argsort(xf)
return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)
def bin_medscat(x, y, percentiles=[5,50,95], for_errorbar_plot=False, dropna=True, bins="auto", **kwargs):
"""
Histogram x into bins.
Then in those bins, take percentiles of y.
"""
if dropna: x, y = get_finite(x, y)
h, xe = np.histogram(x, bins=bins, **kwargs)
xout = (xe[1:]+xe[:-1])/2.
indices = np.digitize(x, xe)
yout = np.zeros((len(xe)-1,len(percentiles)))+np.nan
for ix in np.unique(indices):
# Skip things outside the bin range
if ix >= len(yout): continue
# Percentile in this bin
ii = ix==indices
yout[ix,:] = np.percentile(y[ii], percentiles)
if for_errorbar_plot:
e1 = yout[:,1] - yout[:,0]
e2 = yout[:,2] - yout[:,1]
return xout, yout[:,1], [e1,e2]
return xout, yout
def calculate_actions(w0,pot=gp.MilkyWayPotential(), dt=0.5, n_steps=10000, full_output=False):
""" Approximate actions following https://github.com/adrn/gala/blob/master/docs/dynamics/actionangle.rst """
assert len(w0.shape)==0
w = gp.Hamiltonian(pot).integrate_orbit(w0, dt=dt, n_steps=n_steps)
toy_potential = gd.fit_isochrone(w)
toy_actions, toy_angles, toy_freqs = toy_potential.action_angle(w)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
result = gd.find_actions(w, N_max=8, toy_potential=toy_potential)
if full_output: return result, w
return result["actions"]
def query_and_match(coo, match_radius=1, columns=full_columns):
"""
Query gaia given coordinates
Return a table that is sorted, and an array saying which rows actually matched an object in gaia
"""
from pyia import GaiaDataNew
query = create_source_query_from(coo, columns=columns)
gaia = GaiaDataNew.from_query(query)
gcoo = SkyCoord(gaia.ra, gaia.dec)
idx, d2d, _ = coo.match_to_catalog_sky(gcoo)
iimatch = d2d.arcsec < match_radius
gtab = gaia.data[idx]
if iimatch.sum() != len(gtab):
print("Warning: only matched {}/{} stars".format(iimatch.sum(),len(gtab)))
return gtab, iimatch
def query_and_match_sourceid(source_ids, match_radius=1, columns=full_columns):
"""
Query gaia given source_ids
Return a table in the order of the source_ids
"""
from pyia import GaiaDataNew
unique_arr, indexes = np.unique(source_ids, return_inverse=True)
assert len(unique_arr) == len(source_ids), "Not all IDs are unique"
query = create_source_query_from_ids(source_ids, columns=columns)
gaia = GaiaDataNew.from_query(query)
# Sort by source id, find indices, then resort
gdat = gaia.data
gdat.sort("source_id")
assert np.all(unique_arr == gdat["source_id"])
gdat = gdat[indexes]
assert np.all(gdat["source_id"]==source_ids)
return gdat | def avgstd(x,ignore_nan=False, axis=None):
mean = np.nanmean if ignore_nan else np.mean | random_line_split |
gaiatools.py | import numpy as np
import pandas as pd
from scipy import interpolate
import warnings
import gala.integrate as gi
import gala.dynamics as gd
import gala.potential as gp
from gala.units import galactic
from astropy import coordinates as coord
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import table
import gaia_tools.load as gload
from pyia import GaiaData
def load_tgas():
"""
Creates pyia.GaiaData object from TGAS (a subclass of pandas DataFrame)
"""
tgas = GaiaData(gload.tgas())
return tgas
##############################################
## Columns from Gaia DR2 data model
## https://www.cosmos.esa.int/documents/29201/1645651/GDR2_DataModel_draft.pdf/938f48a2-a08d-b63c-67e7-eae778c9a657
##############################################
cols_astrometry = "ra,dec,parallax,pmra,pmdec"
ecol_astrometry = "ra_error,dec_error,parallax_error,parallax_over_error,"+\
"pmra_error,pmdec_error,ra_dec_corr,ra_parallax_corr,ra_pmra_corr,"+\
"ra_pmdec_corr,dec_parallax_corr,dec_pmra_corr,dec_pmdec_corr,parallax_pmra_corr,"+\
"parallax_pmdec_corr,pmra_pmdec_corr,duplicated_source"
qual_astrometry = "astrometric_n_obs_al,astrometric_n_obs_ac,astrometric_n_good_obs_al,astrometric_n_bad_obs_al,"+\
"astrometric_gof_al,astrometric_chi2_al,astrometric_excess_noise,astrometric_excess_noise_sig,"+\
"astrometric_params_solved,astrometric_primary_flag,astrometric_weight_al,"+\
"astrometric_pseudo_colour,astrometric_pseudo_colour_error,"+\
"mean_varpi_factor_al,astrometric_matched_observations,visibility_periods_used,"+\
"astrometric_sigma5d_max,frame_rotator_object_type,matched_observations"
cols_phot = "phot_g_mean_mag,phot_bp_mean_mag,phot_rp_mean_mag,phot_variable_flag"
ecol_phot = "phot_g_mean_flux,phot_bp_mean_flux,phot_rp_mean_flux,"+\
"phot_g_mean_flux_error,phot_g_mean_flux_over_error,"+\
"phot_bp_mean_flux_error,phot_bp_mean_flux_over_error,"+\
"phot_rp_mean_flux_error,phot_rp_mean_flux_over_error"
qual_phot = "phot_g_n_obs,phot_bp_n_obs,phot_rp_n_obs,phot_bp_rp_excess_factor,phot_proc_mode"
cols_redd = "bp_rp,bp_g,g_rp,a_g_val,e_bp_min_rp_val,"+\
"a_g_percentile_lower,a_g_percentile_upper,"+\
"e_bp_min_rp_percentile_lower,e_bp_min_rp_percentile_upper"
cols_spec = "radial_velocity,radial_velocity_error"
qual_spec = "rv_template_teff,rv_template_logg,rv_template_fe_h,rv_nb_transits"
cols_star = "teff_val,radius_val,lum_val"
ecol_star = "teff_percentile_lower,teff_percentile_upper,"+\
"radius_percentile_lower,radius_percentile_upper,"+\
"lum_percentile_lower,lum_percentile_upper"
cols_rave = ""
ecol_rave = ""
all_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry, qual_astrometry,
cols_phot, ecol_phot, qual_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a full set of things that I think will be useful
full_columns = ",".join(["source_id", cols_astrometry, ecol_astrometry,
cols_phot, ecol_phot, cols_redd, cols_spec, qual_spec, cols_star, ecol_star])
## This is a minimal set of things that I think will be useful
default_columns = ",".join(["source_id",cols_astrometry,ecol_astrometry,
cols_phot, cols_spec, cols_star])
def create_source_query_from_ids(ids, columns=default_columns,
source="gaiaedr3.gaia_source"):
out = "SELECT {} FROM {} WHERE ".format(
columns, source)
idstrs = " or ".join(["source_id = {}".format(x) for x in ids])
out += idstrs
return out
def create_source_query_from(coords, radius=1*u.arcsec,
columns=default_columns,
source="gaiaedr3.gaia_source",
Nmax=None):
"""
Generate a string selecting specific list of coordinates.
Built from https://gist.github.com/mfouesneau/b6b25ed645eab9da4710153fcf9a4cb8
"""
N = len(coords)
if Nmax is None: Nmax = 2*N
out = "SELECT TOP {} {} FROM {} WHERE ".format(
Nmax, columns, source)
def _make_contains_str(c):
cstr = "CONTAINS(POINT('ICRS',{0:}.ra,{0:}.dec),CIRCLE('ICRS',{1:},{2:},{3:}))=1".format(
source, c.ra.deg, c.dec.deg, radius.to("deg").value)
return cstr
cstrs = map(_make_contains_str, coords)
out += " or ".join(cstrs)
return out
def create_samples(Nsamp,mu,cov):
Nstars,Nparams = mu.shape
assert Nstars == len(cov)
assert Nparams == cov.shape[1]
output = np.zeros((Nsamp*Nstars, Nparams))
for i in range(Nstars):
i1 = Nsamp*i
i2 = Nsamp*(i+1)
output[i1:i2,:] = np.random.multivariate_normal(mu[i,:],cov[i,:,:],Nsamp)
output = output.reshape(Nstars, Nsamp, Nparams)
return output
def get_gc_frame():
v_sun = coord.CartesianDifferential([11.1, 250, 7.25]*u.km/u.s)
#gc_frame = coord.Galactocentric(galcen_distance=8.3*u.kpc,
# z_sun=0*u.pc,
# galcen_v_sun=v_sun)
gc_frame = coord.Galactocentric()
return gc_frame
def get_gccoo_w0(coo):
gc_frame = get_gc_frame()
gccoo = coo.transform_to(gc_frame)
w0 = gd.PhaseSpacePosition(gccoo.data)
return gccoo, w0
def get_orbit_params(orbits):
N = orbits.shape[1]
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
rp, ra = orbit.pericenter(), orbit.apocenter()
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def get_orbit_params_fast(orbits):
try:
N = orbits.shape[1]
except IndexError:
orbit = orbits
r = np.sqrt(np.sum(orbits.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
return u.Quantity(rp), u.Quantity(ra), u.Quantity((ra-rp)/(ra+rp))
pers = []
apos = []
eccs = []
for i in range(N):
orbit = orbits[:,i]
r = np.sqrt(np.sum(orbit.xyz**2,axis=0))
rp, ra = np.min(r), np.max(r)
pers.append(rp)
apos.append(ra)
eccs.append((ra - rp) / (ra + rp))
return u.Quantity(pers), u.Quantity(apos), u.Quantity(eccs)
def calc_vtan_error(pmra, pmdec, parallax):
d = u.kpc / parallax.value
pmra = pmra.to(u.rad/u.yr, u.dimensionless_angles())
pmdec= pmdec.to(u.rad/u.yr, u.dimensionless_angles())
vtan = d * np.sqrt(pmra**2 + pmdec**2)
vtan = vtan.to(u.km/u.s, u.dimensionless_angles())
return vtan
def avgstd(x,ignore_nan=False, axis=None):
mean = np.nanmean if ignore_nan else np.mean
stdev = np.nanstd if ignore_nan else np.std
kws = {}
if axis is not None: kws['axis'] = axis
mu = mean(x,**kws)
sig = stdev(x,**kws)
return np.vstack([mu,sig]).T
def medscat(x,sigma=2,ignore_nan=False, axis=None, for_errorbar_plot=False):
percentile = np.nanpercentile if ignore_nan else np.percentile
pdict = {1:[16,50,84],2:[5,50,95],3:[.1,50,99.9]}
assert sigma in pdict
kws = {}
if axis is not None: kws['axis'] = axis
p1,p2,p3 = percentile(x, pdict[sigma], **kws)
e1 = p1-p2
e2 = p3-p2
if for_errorbar_plot:
e1 = -e1
return p2, np.stack([e1,e2])
return np.stack([e1,p2,e2])
def modefinder(x, bins="auto", dropna=True):
"""
Estimates the mode of a sample of points.
Assumes a unimodal system.
Take a histogram of the data and return the bin with the largest value.
TODO If an initial value is specified, find the local maximum closest to that value.
"""
if dropna: x = x[np.isfinite(x)]
h,x = np.histogram(x, bins=bins)
xm = (x[1:]+x[:-1])/2.
ix = np.argmax(h)
return xm[ix]
def get_finite(x,y):
""" Get x and y that are both finite """
finite = np.logical_and(np.isfinite(x), np.isfinite(y))
xf = x[finite]; yf = y[finite]
return xf, yf
def fit_spline(x, y, **kwargs):
""" A simple wrapper to scipy.interpolate.UnivariateSpline (remove nan, sort x) """
xf, yf = get_finite(x,y)
iisort = np.argsort(xf)
return interpolate.UnivariateSpline(xf[iisort],yf[iisort], **kwargs)
def bin_medscat(x, y, percentiles=[5,50,95], for_errorbar_plot=False, dropna=True, bins="auto", **kwargs):
"""
Histogram x into bins.
Then in those bins, take percentiles of y.
"""
if dropna: x, y = get_finite(x, y)
h, xe = np.histogram(x, bins=bins, **kwargs)
xout = (xe[1:]+xe[:-1])/2.
indices = np.digitize(x, xe)
yout = np.zeros((len(xe)-1,len(percentiles)))+np.nan
for ix in np.unique(indices):
# Skip things outside the bin range
if ix >= len(yout): continue
# Percentile in this bin
ii = ix==indices
yout[ix,:] = np.percentile(y[ii], percentiles)
if for_errorbar_plot:
e1 = yout[:,1] - yout[:,0]
e2 = yout[:,2] - yout[:,1]
return xout, yout[:,1], [e1,e2]
return xout, yout
def calculate_actions(w0,pot=gp.MilkyWayPotential(), dt=0.5, n_steps=10000, full_output=False):
""" Approximate actions following https://github.com/adrn/gala/blob/master/docs/dynamics/actionangle.rst """
assert len(w0.shape)==0
w = gp.Hamiltonian(pot).integrate_orbit(w0, dt=dt, n_steps=n_steps)
toy_potential = gd.fit_isochrone(w)
toy_actions, toy_angles, toy_freqs = toy_potential.action_angle(w)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
result = gd.find_actions(w, N_max=8, toy_potential=toy_potential)
if full_output: return result, w
return result["actions"]
def query_and_match(coo, match_radius=1, columns=full_columns):
"""
Query gaia given coordinates
Return a table that is sorted, and an array saying which rows actually matched an object in gaia
"""
from pyia import GaiaDataNew
query = create_source_query_from(coo, columns=columns)
gaia = GaiaDataNew.from_query(query)
gcoo = SkyCoord(gaia.ra, gaia.dec)
idx, d2d, _ = coo.match_to_catalog_sky(gcoo)
iimatch = d2d.arcsec < match_radius
gtab = gaia.data[idx]
if iimatch.sum() != len(gtab):
print("Warning: only matched {}/{} stars".format(iimatch.sum(),len(gtab)))
return gtab, iimatch
def query_and_match_sourceid(source_ids, match_radius=1, columns=full_columns):
| """
Query gaia given source_ids
Return a table in the order of the source_ids
"""
from pyia import GaiaDataNew
unique_arr, indexes = np.unique(source_ids, return_inverse=True)
assert len(unique_arr) == len(source_ids), "Not all IDs are unique"
query = create_source_query_from_ids(source_ids, columns=columns)
gaia = GaiaDataNew.from_query(query)
# Sort by source id, find indices, then resort
gdat = gaia.data
gdat.sort("source_id")
assert np.all(unique_arr == gdat["source_id"])
gdat = gdat[indexes]
assert np.all(gdat["source_id"]==source_ids)
return gdat | identifier_body | |
langviews.js | /**
* Langviews Analysis tool
* @file Main file for Langviews application
* @author MusikAnimal
* @copyright 2016 MusikAnimal
* @license MIT License: https://opensource.org/licenses/MIT
*/
const config = require('./config');
const siteMap = require('../shared/site_map');
const siteDomains = Object.keys(siteMap).map(key => siteMap[key]);
const Pv = require('../shared/pv');
/** Main LangViews class */
class LangViews extends Pv {
constructor() {
super(config);
}
/**
* Initialize the application.
* Called in `pv.js` after translations have loaded
* @return {null} Nothing
*/
initialize() {
this.assignDefaults();
this.setupDateRangeSelector();
this.popParams();
this.setupListeners();
this.updateInterAppLinks();
}
/**
* Copy default values over to class instance
* Use JSON stringify/parsing so to make a deep clone of the defaults
* @return {null} Nothing
*/
assignDefaults() {
Object.assign(this, JSON.parse(JSON.stringify(this.config.defaults.params)));
}
/**
* Add general event listeners
* @returns {null} nothing
*/
setupListeners() {
super.setupListeners();
$('#langviews_form').on('submit', e => {
e.preventDefault(); // prevent page from reloading
this.processArticle();
});
$('.another-query').on('click', () => {
this.setState('initial');
this.pushParams(true);
});
$('.sort-link').on('click', e => {
const sortType = $(e.currentTarget).data('type');
this.direction = this.sort === sortType ? -this.direction : 1;
this.sort = sortType;
this.renderData();
});
$(this.config.projectInput).on('change', () => {
this.validateProject();
this.updateInterAppLinks();
});
$('.download-csv').on('click', this.exportCSV.bind(this));
$('.download-json').on('click', this.exportJSON.bind(this));
}
/**
* Get the base project name (without language and the .org)
* @returns {boolean} projectname
*/
get baseProject() {
return this.project.split('.')[1];
}
/**
* @returns {Typeahead} instance
*/
get typeahead() {
return $(this.config.articleInput).data('typeahead');
}
/**
* Get all user-inputted parameters
* @param {boolean} [forCacheKey] whether or not to include the page name, and exclude sort and direction
* in the returned object. This is for the purposes of generating a unique cache key for params affecting the API queries
* @return {Object} project, platform, agent, etc.
*/
getParams(forCacheKey = false) {
let params = {
project: $(this.config.projectInput).val(),
platform: $(this.config.platformSelector).val(),
agent: $(this.config.agentSelector).val()
};
/**
* Override start and end with custom range values, if configured (set by URL params or setupDateRangeSelector)
* Valid values are those defined in this.config.specialRanges, constructed like `{range: 'last-month'}`,
* or a relative range like `{range: 'latest-N'}` where N is the number of days.
*/
if (this.specialRange && !forCacheKey) {
params.range = this.specialRange.range;
} else {
params.start = this.daterangepicker.startDate.format('YYYY-MM-DD');
params.end = this.daterangepicker.endDate.format('YYYY-MM-DD');
}
if (forCacheKey) {
params.page = $(this.config.articleInput).val();
} else {
params.sort = this.sort;
params.direction = this.direction;
}
return params;
}
/**
* Get params needed to create a permanent link of visible data
* @return {Object} hash of params
*/
getPermaLink() {
let params = this.getParams(true);
params.sort = this.sort;
params.direction = this.direction;
return params;
}
/**
* Push relevant class properties to the query string
* @param {Boolean} clear - wheter to clear the query string entirely
* @return {null} nothing
*/
pushParams(clear = false) {
if (!window.history || !window.history.replaceState) return;
if (clear) {
return history.replaceState(null, document.title, location.href.split('?')[0]);
}
/** only certain characters within the page name are escaped */
const page = $(this.config.articleInput).val().score().replace(/[&%]/g, escape);
window.history.replaceState({}, document.title, `?${$.param(this.getParams())}&page=${page}`);
$('.permalink').prop('href', `/langviews?${$.param(this.getPermaLink())}`);
}
/**
* Given the badge code provided by the Wikidata API, return a image tag of the badge
* @param {String} badgeCode - as defined in this.config.badges
* @return {String} HTML markup for the image
*/
getBadgeMarkup(badgeCode) {
if (!this.config.badges[badgeCode]) return '';
const badgeImage = this.config.badges[badgeCode].image,
badgeName = this.config.badges[badgeCode].name;
return `<img class='article-badge' src='${badgeImage}' alt='${badgeName}' title='${badgeName}' />`;
}
/**
* Render list of langviews into view
* @returns {null} nothing
*/
renderData() {
/** sort ascending by current sort setting */
const sortedLangViews = this.langData.sort((a, b) => {
const before = this.getSortProperty(a, this.sort),
after = this.getSortProperty(b, this.sort);
if (before < after) {
return this.direction;
} else if (before > after) {
return -this.direction;
} else {
return 0;
}
});
$('.sort-link span').removeClass('glyphicon-sort-by-alphabet-alt glyphicon-sort-by-alphabet').addClass('glyphicon-sort');
const newSortClassName = parseInt(this.direction) === 1 ? 'glyphicon-sort-by-alphabet-alt' : 'glyphicon-sort-by-alphabet';
$(`.sort-link--${this.sort} span`).addClass(newSortClassName).removeClass('glyphicon-sort');
const totalBadgesMarkup = Object.keys(this.totals.badges).map(badge => {
return `<span class='nowrap'>${this.getBadgeMarkup(badge)} × ${this.totals.badges[badge]}</span>`;
}).join(', ');
$('.output-totals').html(
`<th scope='row'>${$.i18n('totals')}</th>
<th>${$.i18n('num-languages', this.langData.length)}</th>
<th>${$.i18n('unique-titles', this.totals.titles.length)}</th>
<th>${totalBadgesMarkup}</th>
<th>${this.n(this.totals.views)}</th>
<th>${this.n(Math.round(this.totals.views / this.numDaysInRange()))} / ${$.i18n('day')}</th>`
);
$('#lang_list').html('');
sortedLangViews.forEach((item, index) => {
let badgeMarkup = '';
if (item.badges) {
badgeMarkup = item.badges.map(this.getBadgeMarkup.bind(this)).join();
}
$('#lang_list').append(
`<tr>
<th scope='row'>${index + 1}</th>
<td>${item.lang}</td>
<td><a href="${item.url}" target="_blank">${item.pageName}</a></td>
<td>${badgeMarkup}</td>
<td><a href='${this.getPageviewsURL(item.lang, this.baseProject, item.pageName)}'>${this.n(item.views)}</a></td>
<td>${this.n(item.average)} / ${$.i18n('day')}</td>
</tr>`
);
});
this.pushParams();
this.setState('complete');
}
/**
* Get value of given langview entry for the purposes of column sorting
* @param {object} item - langview entry within this.langData
* @param {String} type - type of property to get
* @return {String|Number} - value
*/
getSortProperty(item, type) {
switch (type) {
case 'lang':
return item.lang;
case 'title':
return item.pageName;
case 'badges':
return item.badges.sort().join('');
case 'views':
return Number(item.views);
}
}
/**
* Link to /pageviews for given article and chosen daterange
* @param {String} lang - two character language code
* @param {String} project - base project without lang, e.g. wikipedia.org
* @param {String} article - page name
* @returns {String} URL
*/
// FIXME: should include agent and platform, and use special ranges as currently specified
getPageviewsURL(lang, project, article) {
let startDate = moment(this.daterangepicker.startDate),
endDate = moment(this.daterangepicker.endDate);
const platform = $(this.config.platformSelector).val();
if (endDate.diff(startDate, 'days') === 0) {
startDate.subtract(3, 'days');
endDate.add(3, 'days');
}
return `/pageviews#start=${startDate.format('YYYY-MM-DD')}` +
`&end=${endDate.format('YYYY-MM-DD')}&project=${lang}.${project}.org&platform=${platform}&pages=${article}`;
}
/**
* Loop through given interwiki data and query the pageviews API for each
* Also updates this.langData with result
* @param {Object} interWikiData - as given by the getInterwikiData promise
* @return {Deferred} - Promise resolving with data ready to be rendered to view
*/
getPageViewsData(interWikiData) {
const startDate = this.daterangepicker.startDate.startOf('day'),
endDate = this.daterangepicker.endDate.startOf('day'),
interWikiKeys = Object.keys(interWikiData);
// XXX: throttling
let dfd = $.Deferred(), promises = [], count = 0, hadFailure, failureRetries = {},
totalRequestCount = interWikiKeys.length, failedPages = [];
/** clear out existing data */
this.langData = [];
const makeRequest = dbName => {
const data = interWikiData[dbName],
uriEncodedPageName = encodeURIComponent(data.title);
const url = (
`https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/${data.lang}.${this.baseProject}` +
`/${$(this.config.platformSelector).val()}/${$(this.config.agentSelector).val()}/${uriEncodedPageName}/daily` +
`/${startDate.format(this.config.timestampFormat)}/${endDate.format(this.config.timestampFormat)}`
);
const promise = $.ajax({ url, dataType: 'json' });
promises.push(promise);
promise.done(pvData => {
const viewCounts = pvData.items.map(el => el.views),
views = viewCounts.reduce((a, b) => a + b);
this.langData.push({
badges: data.badges,
dbName,
lang: data.lang,
pageName: data.title,
views,
url: data.url,
average: Math.round(views / this.numDaysInRange())
});
/** keep track of unique badges/titles and total pageviews */
this.totals.views += views;
if (!this.totals.titles.includes(data.title)) {
this.totals.titles.push(data.title);
}
data.badges.forEach(badge => {
if (this.totals.badges[badge] === undefined) {
this.totals.badges[badge] = 1;
} else {
this.totals.badges[badge] += 1;
}
});
}).fail(errorData => {
// XXX: throttling
/** first detect if this was a Cassandra backend error, and if so, schedule a re-try */
const cassandraError = errorData.responseJSON.title === 'Error in Cassandra table storage backend',
failedPageLink = this.getPageLink(data.title, `${data.lang}.${this.baseProject}.org`);
if (cassandraError) {
if (failureRetries[dbName]) {
failureRetries[dbName]++;
} else {
failureRetries[dbName] = 1;
}
/** maximum of 3 retries */
if (failureRetries[dbName] < 3) {
totalRequestCount++;
return this.rateLimit(makeRequest, 100, this)(dbName);
}
/** retries exceeded */
failedPages.push(failedPageLink);
} else {
this.writeMessage(
`${failedPageLink}: ${$.i18n('api-error', 'Pageviews API')} - ${errorData.responseJSON.title}`
);
}
hadFailure = true; // don't treat this series of requests as being cached by server
}).always(() => {
this.updateProgressBar((++count / totalRequestCount) * 100);
// XXX: throttling, totalRequestCount can just be interWikiKeys.length
if (count === totalRequestCount) {
dfd.resolve(this.langData);
if (failedPages.length) {
this.writeMessage($.i18n(
'api-error-timeout',
'<ul>' +
failedPages.map(failedPage => `<li>${failedPage}</li>`).join('') +
'</ul>'
));
}
/**
* if there were no failures, assume the resource is now cached by the server
* and save this assumption to our own cache so we don't throttle the same requests
*/
// XXX: throttling
if (!hadFailure) {
simpleStorage.set(this.getCacheKey(), true, {TTL: 600000});
}
}
});
};
/**
* We don't want to throttle requests for cached resources. However in our case,
* we're unable to check response headers to see if the resource was cached,
* so we use simpleStorage to keep track of what the user has recently queried.
*/
// XXX: throttling
const requestFn = this.isRequestCached() ? makeRequest : this.rateLimit(makeRequest, 100, this);
interWikiKeys.forEach((dbName, index) => {
requestFn(dbName);
});
return dfd;
}
/**
* Return cache key for current params
* @return {String} key
*/
getCacheKey() {
return `lv-cache-${this.hashCode(
JSON.stringify(this.getParams(true))
)}`;
}
/**
* Check simple storage to see if a request with the current params would be cached
* @return {Boolean} cached or not
*/
isRequestCached() {
return simpleStorage.hasKey(this.getCacheKey());
}
/**
* Query Wikidata to find data about a given page across all sister projects
* @param {String} dbName - database name of source project
* @param {String} pageName - name of page we want to get data about
* @return {Deferred} - Promise resolving with interwiki data
*/
getInterwikiData(dbName, pageName) {
const dfd = $.Deferred();
const url = `https://www.wikidata.org/w/api.php?action=wbgetentities&sites=${dbName}` +
`&titles=${encodeURIComponent(pageName)}&props=sitelinks/urls|datatype&format=json&callback=?`;
$.getJSON(url).done(data => {
if (data.error) {
return dfd.reject(`${$.i18n('api-error', 'Wikidata')}: ${data.error.info}`);
} else if (data.entities['-1']) {
return dfd.reject(
`<a href='${this.getPageURL(pageName)}'>${pageName.descore()}</a> - ${$.i18n('api-error-no-data')}`
);
}
const key = Object.keys(data.entities)[0],
sitelinks = data.entities[key].sitelinks,
filteredLinks = {},
matchRegex = new RegExp(`^https://[\\w-]+\\.${this.baseProject}\\.org`);
/** restrict to selected base project (e.g. wikipedias, not wikipedias and wikivoyages) */
Object.keys(sitelinks).forEach(key => {
const siteMapKey = sitelinks[key].site.replace(/-/g, '_');
if (matchRegex.test(sitelinks[key].url) && siteMap[siteMapKey]) {
sitelinks[key].lang = siteMap[siteMapKey].replace(/\.wiki.*$/, '');
filteredLinks[key] = sitelinks[key];
}
});
return dfd.resolve(filteredLinks);
});
return dfd;
}
/**
* Parse wiki URL for the page name
* @param {String} url - full URL to a wiki page
* @return {String|null} page name
*/
getPageNameFromURL(url) {
if (url.includes('?')) {
return url.match(/\?(?:.*\b)?title=(.*?)(?:&|$)/)[1];
} else {
return url.match(/\/wiki\/(.*?)(?:\?|$)/)[1];
}
}
/**
* Parses the URL query string and sets all the inputs accordingly
* Should only be called on initial page load, until we decide to support pop states (probably never)
* @returns {null} nothing
*/
popParams() |
getState() {
const classList = $('main')[0].classList;
return this.config.formStates.filter(stateName => {
return classList.contains(stateName);
})[0];
}
/**
* Helper to set a CSS class on the `main` node,
* styling the document based on a 'state'
* @param {String} state - class to be added;
* should be one of ['initial', 'processing', 'complete']
* @returns {null} nothing
*/
setState(state) {
$('main').removeClass(this.config.formStates.join(' ')).addClass(state);
switch (state) {
case 'initial':
this.clearMessages();
this.assignDefaults();
if (this.typeahead) this.typeahead.hide();
$(this.config.articleInput).val('').focus();
break;
case 'processing':
this.processStarted();
this.clearMessages();
document.activeElement.blur();
$('.progress-bar').addClass('active');
break;
case 'complete':
this.processEnded();
/** stop hidden animation for slight performance improvement */
this.updateProgressBar(0);
$('.progress-bar').removeClass('active');
break;
case 'invalid':
break;
}
}
/**
* sets up the daterange selector and adds listeners
* @returns {null} - nothing
*/
setupDateRangeSelector() {
super.setupDateRangeSelector();
$(this.config.dateRangeSelector).on('apply.daterangepicker', (e, action) => {
if (action.chosenLabel === $.i18n('custom-range')) {
this.specialRange = null;
/** force events to re-fire since apply.daterangepicker occurs before 'change' event */
this.daterangepicker.updateElement();
}
});
}
/**
* Process the langviews for the article and options entered
* Called when submitting the form
* @return {null} nothing
*/
processArticle() {
// XXX: throttling
/** allow resubmission of queries that are cached */
if (!this.isRequestCached()) {
/** Check if user has exceeded request limit and throw error */
if (simpleStorage.hasKey('pageviews-throttle')) {
const timeRemaining = Math.round(simpleStorage.getTTL('pageviews-throttle') / 1000);
/** > 0 check to combat race conditions */
if (timeRemaining > 0) {
return this.writeMessage($.i18n(
'api-throttle-wait', `<b>${timeRemaining}</b>`,
'<a href="https://phabricator.wikimedia.org/T124314" target="_blank">phab:T124314</a>'
), true);
}
}
}
const page = $(this.config.articleInput).val();
this.setState('processing');
const dbName = Object.keys(siteMap).find(key => siteMap[key] === $(this.config.projectInput).val());
this.getInterwikiData(dbName, page).done(interWikiData => {
/**
* XXX: throttling
* At this point we know we have data to process,
* so set the throttle flag to disallow additional requests for the next 90 seconds
*/
this.setThrottle();
this.getPageViewsData(interWikiData).done(() => {
$('.langviews-page-name').text(page).prop('href', this.getPageURL(page));
$('.langviews-params').text($(this.config.dateRangeSelector).val());
this.updateProgressBar(100);
this.renderData();
/**
* XXX: throttling
* Reset throttling again; the first one was in case they aborted
*/
this.setThrottle();
});
}).fail(error => {
this.setState('initial');
/** structured error comes back as a string, otherwise we don't know what happened */
if (typeof error === 'string') {
this.writeMessage(error);
} else {
this.writeMessage($.i18n('api-error-unknown', 'Wikidata'));
}
});
}
/**
* Setup typeahead on the article input, killing the prevous instance if present
* Called in validateProject, which is called in popParams when the app is first loaded
* @return {null} Nothing
*/
setupArticleInput() {
if (this.typeahead) this.typeahead.destroy();
$(this.config.articleInput).typeahead({
ajax: {
url: `https://${this.project}.org/w/api.php`,
timeout: 200,
triggerLength: 1,
method: 'get',
preDispatch: query => {
return {
action: 'query',
list: 'prefixsearch',
format: 'json',
pssearch: query
};
},
preProcess: data => {
const results = data.query.prefixsearch.map(elem => elem.title);
return results;
}
}
});
}
/**
* Set value of progress bar
* @param {Number} value - percentage as float
* @return {null} nothing
*/
updateProgressBar(value) {
$('.progress-bar').css('width', `${value.toFixed(2)}%`);
}
/**
* Validate the currently entered project. Called when the value is changed
* @return {boolean} true if validation failed
*/
validateProject() {
const project = $(this.config.projectInput).val();
if (!this.isMultilangProject()) {
this.writeMessage(
$.i18n('invalid-lang-project', `<a href='//${project}'>${project}</a>`),
true
);
this.setState('invalid');
return true;
}
this.setState('initial');
/** kill and re-init typeahead to point to new project */
this.setupArticleInput();
return false;
}
/**
* Exports current mass data to CSV format and loads it in a new tab
* With the prepended data:text/csv this should cause the browser to download the data
* @returns {string} CSV content
*/
exportCSV() {
let csvContent = 'data:text/csv;charset=utf-8,Language,Title,Badges,Pageviews,Average\n';
// Add the rows to the CSV
this.langData.forEach(page => {
const pageName = '"' + page.pageName.descore().replace(/"/g, '""') + '"',
badges = '"' + page.badges.map(badge => this.config.badges[badge].name.replace(/"/g, '""')) + '"';
csvContent += [
page.lang,
pageName,
badges,
page.views,
page.average
].join(',') + '\n';
});
// Output the CSV file to the browser
const encodedUri = encodeURI(csvContent);
window.open(encodedUri);
}
/**
* Exports current mass data to JSON format and loads it in a new tab
* @returns {string} stringified JSON
*/
exportJSON() {
const jsonContent = 'data:text/json;charset=utf-8,' + JSON.stringify(this.langData),
encodedUri = encodeURI(jsonContent);
window.open(encodedUri);
return jsonContent;
}
}
$(document).ready(() => {
/** assume hash params are supposed to be query params */
if (document.location.hash && !document.location.search) {
return document.location.href = document.location.href.replace('#', '?');
} else if (document.location.hash) {
return document.location.href = document.location.href.replace(/\#.*/, '');
}
new LangViews();
});
| {
let startDate, endDate, params = this.parseQueryString('pages');
$(this.config.projectInput).val(params.project || this.config.defaults.project);
if (this.validateProject()) return;
// FIXME: only run this when they actually submit
this.patchUsage('lv');
/**
* Check if we're using a valid range, and if so ignore any start/end dates.
* If an invalid range, throw and error and use default dates.
*/
if (params.range) {
if (!this.setSpecialRange(params.range)) {
this.addSiteNotice('danger', $.i18n('param-error-3'), $.i18n('invalid-params'), true);
this.setSpecialRange(this.config.defaults.dateRange);
}
} else if (params.start) {
startDate = moment(params.start || moment().subtract(this.config.defaults.daysAgo, 'days'));
endDate = moment(params.end || Date.now());
if (startDate < this.config.minDate || endDate < this.config.minDate) {
this.addSiteNotice('danger', $.i18n('param-error-1', `${$.i18n('july')} 2015`), $.i18n('invalid-params'), true);
return;
} else if (startDate > endDate) {
this.addSiteNotice('warning', $.i18n('param-error-2'), $.i18n('invalid-params'), true);
return;
}
this.daterangepicker.setStartDate(startDate);
this.daterangepicker.setEndDate(endDate);
} else {
this.setSpecialRange(this.config.defaults.dateRange);
}
$(this.config.platformSelector).val(params.platform || 'all-access');
$(this.config.agentSelector).val(params.agent || 'user');
this.sort = params.sort || this.config.defaults.params.sort;
this.direction = params.direction || this.config.defaults.params.direction;
/** start up processing if page name is present */
if (params.page) {
$(this.config.articleInput).val(decodeURIComponent(params.page).descore());
this.processArticle();
}
} | identifier_body |
langviews.js | /**
* Langviews Analysis tool
* @file Main file for Langviews application
* @author MusikAnimal
* @copyright 2016 MusikAnimal
* @license MIT License: https://opensource.org/licenses/MIT
*/
const config = require('./config');
const siteMap = require('../shared/site_map');
const siteDomains = Object.keys(siteMap).map(key => siteMap[key]);
const Pv = require('../shared/pv');
/** Main LangViews class */
class LangViews extends Pv {
constructor() {
super(config);
}
/**
* Initialize the application.
* Called in `pv.js` after translations have loaded
* @return {null} Nothing
*/
initialize() {
this.assignDefaults();
this.setupDateRangeSelector();
this.popParams();
this.setupListeners();
this.updateInterAppLinks();
}
/**
* Copy default values over to class instance
* Use JSON stringify/parsing so to make a deep clone of the defaults
* @return {null} Nothing
*/
assignDefaults() {
Object.assign(this, JSON.parse(JSON.stringify(this.config.defaults.params)));
}
/**
* Add general event listeners
* @returns {null} nothing
*/
setupListeners() {
super.setupListeners();
$('#langviews_form').on('submit', e => {
e.preventDefault(); // prevent page from reloading
this.processArticle();
});
$('.another-query').on('click', () => {
this.setState('initial');
this.pushParams(true);
});
$('.sort-link').on('click', e => {
const sortType = $(e.currentTarget).data('type');
this.direction = this.sort === sortType ? -this.direction : 1;
this.sort = sortType;
this.renderData();
});
$(this.config.projectInput).on('change', () => {
this.validateProject();
this.updateInterAppLinks();
});
$('.download-csv').on('click', this.exportCSV.bind(this));
$('.download-json').on('click', this.exportJSON.bind(this));
}
/**
* Get the base project name (without language and the .org)
* @returns {boolean} projectname
*/
get baseProject() {
return this.project.split('.')[1];
}
/**
* @returns {Typeahead} instance
*/
get typeahead() {
return $(this.config.articleInput).data('typeahead');
}
/**
* Get all user-inputted parameters
* @param {boolean} [forCacheKey] whether or not to include the page name, and exclude sort and direction
* in the returned object. This is for the purposes of generating a unique cache key for params affecting the API queries
* @return {Object} project, platform, agent, etc.
*/
getParams(forCacheKey = false) {
let params = {
project: $(this.config.projectInput).val(),
platform: $(this.config.platformSelector).val(),
agent: $(this.config.agentSelector).val()
};
/**
* Override start and end with custom range values, if configured (set by URL params or setupDateRangeSelector)
* Valid values are those defined in this.config.specialRanges, constructed like `{range: 'last-month'}`,
* or a relative range like `{range: 'latest-N'}` where N is the number of days.
*/
if (this.specialRange && !forCacheKey) {
params.range = this.specialRange.range;
} else {
params.start = this.daterangepicker.startDate.format('YYYY-MM-DD');
params.end = this.daterangepicker.endDate.format('YYYY-MM-DD');
}
if (forCacheKey) {
params.page = $(this.config.articleInput).val();
} else {
params.sort = this.sort;
params.direction = this.direction;
}
return params;
}
/**
* Get params needed to create a permanent link of visible data
* @return {Object} hash of params
*/
getPermaLink() {
let params = this.getParams(true);
params.sort = this.sort;
params.direction = this.direction;
return params;
}
/**
* Push relevant class properties to the query string
* @param {Boolean} clear - wheter to clear the query string entirely
* @return {null} nothing
*/
pushParams(clear = false) {
if (!window.history || !window.history.replaceState) return;
if (clear) {
return history.replaceState(null, document.title, location.href.split('?')[0]);
}
/** only certain characters within the page name are escaped */
const page = $(this.config.articleInput).val().score().replace(/[&%]/g, escape);
window.history.replaceState({}, document.title, `?${$.param(this.getParams())}&page=${page}`);
$('.permalink').prop('href', `/langviews?${$.param(this.getPermaLink())}`);
}
/**
* Given the badge code provided by the Wikidata API, return a image tag of the badge
* @param {String} badgeCode - as defined in this.config.badges
* @return {String} HTML markup for the image
*/
getBadgeMarkup(badgeCode) {
if (!this.config.badges[badgeCode]) return '';
const badgeImage = this.config.badges[badgeCode].image,
badgeName = this.config.badges[badgeCode].name;
return `<img class='article-badge' src='${badgeImage}' alt='${badgeName}' title='${badgeName}' />`;
}
/**
* Render list of langviews into view
* @returns {null} nothing
*/
renderData() {
/** sort ascending by current sort setting */
const sortedLangViews = this.langData.sort((a, b) => {
const before = this.getSortProperty(a, this.sort),
after = this.getSortProperty(b, this.sort);
if (before < after) {
return this.direction;
} else if (before > after) {
return -this.direction;
} else {
return 0;
}
});
$('.sort-link span').removeClass('glyphicon-sort-by-alphabet-alt glyphicon-sort-by-alphabet').addClass('glyphicon-sort');
const newSortClassName = parseInt(this.direction) === 1 ? 'glyphicon-sort-by-alphabet-alt' : 'glyphicon-sort-by-alphabet';
$(`.sort-link--${this.sort} span`).addClass(newSortClassName).removeClass('glyphicon-sort');
const totalBadgesMarkup = Object.keys(this.totals.badges).map(badge => {
return `<span class='nowrap'>${this.getBadgeMarkup(badge)} × ${this.totals.badges[badge]}</span>`;
}).join(', ');
$('.output-totals').html(
`<th scope='row'>${$.i18n('totals')}</th>
<th>${$.i18n('num-languages', this.langData.length)}</th>
<th>${$.i18n('unique-titles', this.totals.titles.length)}</th>
<th>${totalBadgesMarkup}</th>
<th>${this.n(this.totals.views)}</th>
<th>${this.n(Math.round(this.totals.views / this.numDaysInRange()))} / ${$.i18n('day')}</th>`
);
$('#lang_list').html('');
sortedLangViews.forEach((item, index) => {
let badgeMarkup = '';
if (item.badges) {
badgeMarkup = item.badges.map(this.getBadgeMarkup.bind(this)).join();
}
$('#lang_list').append(
`<tr>
<th scope='row'>${index + 1}</th>
<td>${item.lang}</td>
<td><a href="${item.url}" target="_blank">${item.pageName}</a></td>
<td>${badgeMarkup}</td>
<td><a href='${this.getPageviewsURL(item.lang, this.baseProject, item.pageName)}'>${this.n(item.views)}</a></td>
<td>${this.n(item.average)} / ${$.i18n('day')}</td>
</tr>`
);
});
this.pushParams();
this.setState('complete');
}
/**
* Get value of given langview entry for the purposes of column sorting
* @param {object} item - langview entry within this.langData
* @param {String} type - type of property to get
* @return {String|Number} - value
*/
getSortProperty(item, type) {
switch (type) {
case 'lang':
return item.lang;
case 'title':
return item.pageName;
case 'badges':
return item.badges.sort().join('');
case 'views':
return Number(item.views);
}
}
/**
* Link to /pageviews for given article and chosen daterange
* @param {String} lang - two character language code
* @param {String} project - base project without lang, e.g. wikipedia.org
* @param {String} article - page name
* @returns {String} URL
*/
// FIXME: should include agent and platform, and use special ranges as currently specified
getPageviewsURL(lang, project, article) {
let startDate = moment(this.daterangepicker.startDate),
endDate = moment(this.daterangepicker.endDate);
const platform = $(this.config.platformSelector).val();
if (endDate.diff(startDate, 'days') === 0) {
startDate.subtract(3, 'days');
endDate.add(3, 'days');
}
return `/pageviews#start=${startDate.format('YYYY-MM-DD')}` +
`&end=${endDate.format('YYYY-MM-DD')}&project=${lang}.${project}.org&platform=${platform}&pages=${article}`;
}
/**
* Loop through given interwiki data and query the pageviews API for each
* Also updates this.langData with result
* @param {Object} interWikiData - as given by the getInterwikiData promise
* @return {Deferred} - Promise resolving with data ready to be rendered to view
*/
getPageViewsData(interWikiData) {
const startDate = this.daterangepicker.startDate.startOf('day'),
endDate = this.daterangepicker.endDate.startOf('day'),
interWikiKeys = Object.keys(interWikiData);
// XXX: throttling
let dfd = $.Deferred(), promises = [], count = 0, hadFailure, failureRetries = {},
totalRequestCount = interWikiKeys.length, failedPages = [];
/** clear out existing data */
this.langData = [];
const makeRequest = dbName => {
const data = interWikiData[dbName],
uriEncodedPageName = encodeURIComponent(data.title);
const url = (
`https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/${data.lang}.${this.baseProject}` +
`/${$(this.config.platformSelector).val()}/${$(this.config.agentSelector).val()}/${uriEncodedPageName}/daily` +
`/${startDate.format(this.config.timestampFormat)}/${endDate.format(this.config.timestampFormat)}`
);
const promise = $.ajax({ url, dataType: 'json' });
promises.push(promise);
promise.done(pvData => {
const viewCounts = pvData.items.map(el => el.views),
views = viewCounts.reduce((a, b) => a + b);
this.langData.push({
badges: data.badges,
dbName,
lang: data.lang,
pageName: data.title,
views,
url: data.url,
average: Math.round(views / this.numDaysInRange())
});
/** keep track of unique badges/titles and total pageviews */
this.totals.views += views;
if (!this.totals.titles.includes(data.title)) {
this.totals.titles.push(data.title);
}
data.badges.forEach(badge => {
if (this.totals.badges[badge] === undefined) {
this.totals.badges[badge] = 1;
} else {
this.totals.badges[badge] += 1;
}
});
}).fail(errorData => {
// XXX: throttling
/** first detect if this was a Cassandra backend error, and if so, schedule a re-try */
const cassandraError = errorData.responseJSON.title === 'Error in Cassandra table storage backend',
failedPageLink = this.getPageLink(data.title, `${data.lang}.${this.baseProject}.org`);
if (cassandraError) {
if (failureRetries[dbName]) {
failureRetries[dbName]++;
} else {
failureRetries[dbName] = 1;
}
/** maximum of 3 retries */
if (failureRetries[dbName] < 3) {
totalRequestCount++;
return this.rateLimit(makeRequest, 100, this)(dbName);
}
/** retries exceeded */
failedPages.push(failedPageLink);
} else {
this.writeMessage(
`${failedPageLink}: ${$.i18n('api-error', 'Pageviews API')} - ${errorData.responseJSON.title}`
);
}
hadFailure = true; // don't treat this series of requests as being cached by server
}).always(() => {
this.updateProgressBar((++count / totalRequestCount) * 100);
// XXX: throttling, totalRequestCount can just be interWikiKeys.length
if (count === totalRequestCount) {
dfd.resolve(this.langData);
if (failedPages.length) {
this.writeMessage($.i18n(
'api-error-timeout',
'<ul>' +
failedPages.map(failedPage => `<li>${failedPage}</li>`).join('') +
'</ul>'
));
}
/**
* if there were no failures, assume the resource is now cached by the server
* and save this assumption to our own cache so we don't throttle the same requests
*/
// XXX: throttling
if (!hadFailure) {
simpleStorage.set(this.getCacheKey(), true, {TTL: 600000});
}
}
});
};
/**
* We don't want to throttle requests for cached resources. However in our case,
* we're unable to check response headers to see if the resource was cached,
* so we use simpleStorage to keep track of what the user has recently queried.
*/
// XXX: throttling
const requestFn = this.isRequestCached() ? makeRequest : this.rateLimit(makeRequest, 100, this);
interWikiKeys.forEach((dbName, index) => {
requestFn(dbName);
});
return dfd;
}
/**
* Return cache key for current params
* @return {String} key
*/
getCacheKey() {
return `lv-cache-${this.hashCode(
JSON.stringify(this.getParams(true))
)}`;
}
/**
* Check simple storage to see if a request with the current params would be cached
* @return {Boolean} cached or not
*/
isRequestCached() {
return simpleStorage.hasKey(this.getCacheKey());
}
/**
* Query Wikidata to find data about a given page across all sister projects
* @param {String} dbName - database name of source project
* @param {String} pageName - name of page we want to get data about
* @return {Deferred} - Promise resolving with interwiki data
*/
getInterwikiData(dbName, pageName) {
const dfd = $.Deferred();
const url = `https://www.wikidata.org/w/api.php?action=wbgetentities&sites=${dbName}` +
`&titles=${encodeURIComponent(pageName)}&props=sitelinks/urls|datatype&format=json&callback=?`;
$.getJSON(url).done(data => {
if (data.error) {
return dfd.reject(`${$.i18n('api-error', 'Wikidata')}: ${data.error.info}`);
} else if (data.entities['-1']) {
return dfd.reject(
`<a href='${this.getPageURL(pageName)}'>${pageName.descore()}</a> - ${$.i18n('api-error-no-data')}`
);
}
const key = Object.keys(data.entities)[0],
sitelinks = data.entities[key].sitelinks,
filteredLinks = {},
matchRegex = new RegExp(`^https://[\\w-]+\\.${this.baseProject}\\.org`);
/** restrict to selected base project (e.g. wikipedias, not wikipedias and wikivoyages) */
Object.keys(sitelinks).forEach(key => {
const siteMapKey = sitelinks[key].site.replace(/-/g, '_');
if (matchRegex.test(sitelinks[key].url) && siteMap[siteMapKey]) {
sitelinks[key].lang = siteMap[siteMapKey].replace(/\.wiki.*$/, '');
filteredLinks[key] = sitelinks[key];
}
});
return dfd.resolve(filteredLinks);
});
return dfd;
}
/**
* Parse wiki URL for the page name
* @param {String} url - full URL to a wiki page
* @return {String|null} page name
*/
getPageNameFromURL(url) {
if (url.includes('?')) {
return url.match(/\?(?:.*\b)?title=(.*?)(?:&|$)/)[1];
} else {
return url.match(/\/wiki\/(.*?)(?:\?|$)/)[1];
}
}
/**
* Parses the URL query string and sets all the inputs accordingly
* Should only be called on initial page load, until we decide to support pop states (probably never)
* @returns {null} nothing
*/
popParams() {
let startDate, endDate, params = this.parseQueryString('pages');
$(this.config.projectInput).val(params.project || this.config.defaults.project);
if (this.validateProject()) return;
// FIXME: only run this when they actually submit
this.patchUsage('lv');
/**
* Check if we're using a valid range, and if so ignore any start/end dates.
* If an invalid range, throw and error and use default dates.
*/
if (params.range) {
if (!this.setSpecialRange(params.range)) {
this.addSiteNotice('danger', $.i18n('param-error-3'), $.i18n('invalid-params'), true);
this.setSpecialRange(this.config.defaults.dateRange);
}
} else if (params.start) {
startDate = moment(params.start || moment().subtract(this.config.defaults.daysAgo, 'days'));
endDate = moment(params.end || Date.now());
if (startDate < this.config.minDate || endDate < this.config.minDate) {
this.addSiteNotice('danger', $.i18n('param-error-1', `${$.i18n('july')} 2015`), $.i18n('invalid-params'), true);
return;
} else if (startDate > endDate) {
this.addSiteNotice('warning', $.i18n('param-error-2'), $.i18n('invalid-params'), true);
return;
}
this.daterangepicker.setStartDate(startDate);
this.daterangepicker.setEndDate(endDate);
} else {
this.setSpecialRange(this.config.defaults.dateRange);
}
$(this.config.platformSelector).val(params.platform || 'all-access');
$(this.config.agentSelector).val(params.agent || 'user');
this.sort = params.sort || this.config.defaults.params.sort;
this.direction = params.direction || this.config.defaults.params.direction;
/** start up processing if page name is present */
if (params.page) {
$(this.config.articleInput).val(decodeURIComponent(params.page).descore());
this.processArticle();
}
}
getState() {
const classList = $('main')[0].classList;
return this.config.formStates.filter(stateName => {
return classList.contains(stateName);
})[0];
}
/**
* Helper to set a CSS class on the `main` node,
* styling the document based on a 'state'
* @param {String} state - class to be added;
* should be one of ['initial', 'processing', 'complete']
* @returns {null} nothing
*/
setState(state) {
$('main').removeClass(this.config.formStates.join(' ')).addClass(state);
switch (state) {
case 'initial':
this.clearMessages();
this.assignDefaults();
if (this.typeahead) this.typeahead.hide();
$(this.config.articleInput).val('').focus();
break;
case 'processing':
this.processStarted();
this.clearMessages();
document.activeElement.blur();
$('.progress-bar').addClass('active');
break;
case 'complete':
this.processEnded();
/** stop hidden animation for slight performance improvement */
this.updateProgressBar(0);
$('.progress-bar').removeClass('active');
break;
case 'invalid':
break;
}
}
/**
* sets up the daterange selector and adds listeners
* @returns {null} - nothing
*/
setupDateRangeSelector() {
super.setupDateRangeSelector();
$(this.config.dateRangeSelector).on('apply.daterangepicker', (e, action) => {
if (action.chosenLabel === $.i18n('custom-range')) {
this.specialRange = null;
/** force events to re-fire since apply.daterangepicker occurs before 'change' event */
this.daterangepicker.updateElement();
}
});
}
/**
* Process the langviews for the article and options entered
* Called when submitting the form
* @return {null} nothing
*/
| () {
// XXX: throttling
/** allow resubmission of queries that are cached */
if (!this.isRequestCached()) {
/** Check if user has exceeded request limit and throw error */
if (simpleStorage.hasKey('pageviews-throttle')) {
const timeRemaining = Math.round(simpleStorage.getTTL('pageviews-throttle') / 1000);
/** > 0 check to combat race conditions */
if (timeRemaining > 0) {
return this.writeMessage($.i18n(
'api-throttle-wait', `<b>${timeRemaining}</b>`,
'<a href="https://phabricator.wikimedia.org/T124314" target="_blank">phab:T124314</a>'
), true);
}
}
}
const page = $(this.config.articleInput).val();
this.setState('processing');
const dbName = Object.keys(siteMap).find(key => siteMap[key] === $(this.config.projectInput).val());
this.getInterwikiData(dbName, page).done(interWikiData => {
/**
* XXX: throttling
* At this point we know we have data to process,
* so set the throttle flag to disallow additional requests for the next 90 seconds
*/
this.setThrottle();
this.getPageViewsData(interWikiData).done(() => {
$('.langviews-page-name').text(page).prop('href', this.getPageURL(page));
$('.langviews-params').text($(this.config.dateRangeSelector).val());
this.updateProgressBar(100);
this.renderData();
/**
* XXX: throttling
* Reset throttling again; the first one was in case they aborted
*/
this.setThrottle();
});
}).fail(error => {
this.setState('initial');
/** structured error comes back as a string, otherwise we don't know what happened */
if (typeof error === 'string') {
this.writeMessage(error);
} else {
this.writeMessage($.i18n('api-error-unknown', 'Wikidata'));
}
});
}
/**
* Setup typeahead on the article input, killing the prevous instance if present
* Called in validateProject, which is called in popParams when the app is first loaded
* @return {null} Nothing
*/
setupArticleInput() {
if (this.typeahead) this.typeahead.destroy();
$(this.config.articleInput).typeahead({
ajax: {
url: `https://${this.project}.org/w/api.php`,
timeout: 200,
triggerLength: 1,
method: 'get',
preDispatch: query => {
return {
action: 'query',
list: 'prefixsearch',
format: 'json',
pssearch: query
};
},
preProcess: data => {
const results = data.query.prefixsearch.map(elem => elem.title);
return results;
}
}
});
}
/**
* Set value of progress bar
* @param {Number} value - percentage as float
* @return {null} nothing
*/
updateProgressBar(value) {
$('.progress-bar').css('width', `${value.toFixed(2)}%`);
}
/**
* Validate the currently entered project. Called when the value is changed
* @return {boolean} true if validation failed
*/
validateProject() {
const project = $(this.config.projectInput).val();
if (!this.isMultilangProject()) {
this.writeMessage(
$.i18n('invalid-lang-project', `<a href='//${project}'>${project}</a>`),
true
);
this.setState('invalid');
return true;
}
this.setState('initial');
/** kill and re-init typeahead to point to new project */
this.setupArticleInput();
return false;
}
/**
* Exports current mass data to CSV format and loads it in a new tab
* With the prepended data:text/csv this should cause the browser to download the data
* @returns {string} CSV content
*/
exportCSV() {
let csvContent = 'data:text/csv;charset=utf-8,Language,Title,Badges,Pageviews,Average\n';
// Add the rows to the CSV
this.langData.forEach(page => {
const pageName = '"' + page.pageName.descore().replace(/"/g, '""') + '"',
badges = '"' + page.badges.map(badge => this.config.badges[badge].name.replace(/"/g, '""')) + '"';
csvContent += [
page.lang,
pageName,
badges,
page.views,
page.average
].join(',') + '\n';
});
// Output the CSV file to the browser
const encodedUri = encodeURI(csvContent);
window.open(encodedUri);
}
/**
* Exports current mass data to JSON format and loads it in a new tab
* @returns {string} stringified JSON
*/
exportJSON() {
const jsonContent = 'data:text/json;charset=utf-8,' + JSON.stringify(this.langData),
encodedUri = encodeURI(jsonContent);
window.open(encodedUri);
return jsonContent;
}
}
$(document).ready(() => {
/** assume hash params are supposed to be query params */
if (document.location.hash && !document.location.search) {
return document.location.href = document.location.href.replace('#', '?');
} else if (document.location.hash) {
return document.location.href = document.location.href.replace(/\#.*/, '');
}
new LangViews();
});
| processArticle | identifier_name |
langviews.js | /**
* Langviews Analysis tool
* @file Main file for Langviews application
* @author MusikAnimal
* @copyright 2016 MusikAnimal
* @license MIT License: https://opensource.org/licenses/MIT
*/
const config = require('./config');
const siteMap = require('../shared/site_map');
const siteDomains = Object.keys(siteMap).map(key => siteMap[key]);
const Pv = require('../shared/pv');
/** Main LangViews class */
class LangViews extends Pv {
constructor() {
super(config);
}
/**
* Initialize the application.
* Called in `pv.js` after translations have loaded
* @return {null} Nothing
*/
initialize() {
this.assignDefaults();
this.setupDateRangeSelector();
this.popParams();
this.setupListeners();
this.updateInterAppLinks();
}
/**
* Copy default values over to class instance
* Use JSON stringify/parsing so to make a deep clone of the defaults
* @return {null} Nothing
*/
assignDefaults() {
Object.assign(this, JSON.parse(JSON.stringify(this.config.defaults.params)));
}
/**
* Add general event listeners
* @returns {null} nothing
*/
setupListeners() {
super.setupListeners();
$('#langviews_form').on('submit', e => {
e.preventDefault(); // prevent page from reloading
this.processArticle();
});
$('.another-query').on('click', () => {
this.setState('initial');
this.pushParams(true);
});
$('.sort-link').on('click', e => {
const sortType = $(e.currentTarget).data('type');
this.direction = this.sort === sortType ? -this.direction : 1;
this.sort = sortType;
this.renderData();
});
$(this.config.projectInput).on('change', () => {
this.validateProject();
this.updateInterAppLinks();
});
$('.download-csv').on('click', this.exportCSV.bind(this));
$('.download-json').on('click', this.exportJSON.bind(this));
}
/**
* Get the base project name (without language and the .org)
* @returns {boolean} projectname
*/
get baseProject() {
return this.project.split('.')[1];
}
/**
* @returns {Typeahead} instance
*/
get typeahead() {
return $(this.config.articleInput).data('typeahead');
}
/**
* Get all user-inputted parameters
* @param {boolean} [forCacheKey] whether or not to include the page name, and exclude sort and direction
* in the returned object. This is for the purposes of generating a unique cache key for params affecting the API queries
* @return {Object} project, platform, agent, etc.
*/
getParams(forCacheKey = false) {
let params = {
project: $(this.config.projectInput).val(),
platform: $(this.config.platformSelector).val(),
agent: $(this.config.agentSelector).val()
};
/**
* Override start and end with custom range values, if configured (set by URL params or setupDateRangeSelector)
* Valid values are those defined in this.config.specialRanges, constructed like `{range: 'last-month'}`,
* or a relative range like `{range: 'latest-N'}` where N is the number of days.
*/
if (this.specialRange && !forCacheKey) {
params.range = this.specialRange.range;
} else {
params.start = this.daterangepicker.startDate.format('YYYY-MM-DD');
params.end = this.daterangepicker.endDate.format('YYYY-MM-DD');
}
if (forCacheKey) {
params.page = $(this.config.articleInput).val();
} else {
params.sort = this.sort;
params.direction = this.direction;
}
return params;
}
/**
* Get params needed to create a permanent link of visible data
* @return {Object} hash of params
*/
getPermaLink() {
let params = this.getParams(true);
params.sort = this.sort;
params.direction = this.direction;
return params;
}
/**
* Push relevant class properties to the query string
* @param {Boolean} clear - wheter to clear the query string entirely
* @return {null} nothing
*/
pushParams(clear = false) {
if (!window.history || !window.history.replaceState) return;
if (clear) {
return history.replaceState(null, document.title, location.href.split('?')[0]);
}
/** only certain characters within the page name are escaped */
const page = $(this.config.articleInput).val().score().replace(/[&%]/g, escape);
window.history.replaceState({}, document.title, `?${$.param(this.getParams())}&page=${page}`);
$('.permalink').prop('href', `/langviews?${$.param(this.getPermaLink())}`);
}
/**
* Given the badge code provided by the Wikidata API, return a image tag of the badge
* @param {String} badgeCode - as defined in this.config.badges
* @return {String} HTML markup for the image
*/
getBadgeMarkup(badgeCode) {
if (!this.config.badges[badgeCode]) return '';
const badgeImage = this.config.badges[badgeCode].image,
badgeName = this.config.badges[badgeCode].name;
return `<img class='article-badge' src='${badgeImage}' alt='${badgeName}' title='${badgeName}' />`;
}
/**
* Render list of langviews into view
* @returns {null} nothing
*/
renderData() {
/** sort ascending by current sort setting */
const sortedLangViews = this.langData.sort((a, b) => {
const before = this.getSortProperty(a, this.sort),
after = this.getSortProperty(b, this.sort);
if (before < after) {
return this.direction;
} else if (before > after) {
return -this.direction;
} else {
return 0;
}
});
$('.sort-link span').removeClass('glyphicon-sort-by-alphabet-alt glyphicon-sort-by-alphabet').addClass('glyphicon-sort');
const newSortClassName = parseInt(this.direction) === 1 ? 'glyphicon-sort-by-alphabet-alt' : 'glyphicon-sort-by-alphabet';
$(`.sort-link--${this.sort} span`).addClass(newSortClassName).removeClass('glyphicon-sort');
const totalBadgesMarkup = Object.keys(this.totals.badges).map(badge => {
return `<span class='nowrap'>${this.getBadgeMarkup(badge)} × ${this.totals.badges[badge]}</span>`;
}).join(', ');
$('.output-totals').html(
`<th scope='row'>${$.i18n('totals')}</th>
<th>${$.i18n('num-languages', this.langData.length)}</th>
<th>${$.i18n('unique-titles', this.totals.titles.length)}</th>
<th>${totalBadgesMarkup}</th>
<th>${this.n(this.totals.views)}</th>
<th>${this.n(Math.round(this.totals.views / this.numDaysInRange()))} / ${$.i18n('day')}</th>`
);
$('#lang_list').html('');
sortedLangViews.forEach((item, index) => {
let badgeMarkup = '';
if (item.badges) {
badgeMarkup = item.badges.map(this.getBadgeMarkup.bind(this)).join();
}
$('#lang_list').append(
`<tr>
<th scope='row'>${index + 1}</th>
<td>${item.lang}</td>
<td><a href="${item.url}" target="_blank">${item.pageName}</a></td>
<td>${badgeMarkup}</td>
<td><a href='${this.getPageviewsURL(item.lang, this.baseProject, item.pageName)}'>${this.n(item.views)}</a></td>
<td>${this.n(item.average)} / ${$.i18n('day')}</td>
</tr>`
);
});
this.pushParams();
this.setState('complete');
}
/**
* Get value of given langview entry for the purposes of column sorting
* @param {object} item - langview entry within this.langData
* @param {String} type - type of property to get
* @return {String|Number} - value
*/
getSortProperty(item, type) {
switch (type) {
case 'lang':
return item.lang;
case 'title':
return item.pageName;
case 'badges':
return item.badges.sort().join('');
case 'views':
return Number(item.views);
}
}
/**
* Link to /pageviews for given article and chosen daterange
* @param {String} lang - two character language code
* @param {String} project - base project without lang, e.g. wikipedia.org
* @param {String} article - page name
* @returns {String} URL
*/
// FIXME: should include agent and platform, and use special ranges as currently specified
getPageviewsURL(lang, project, article) {
let startDate = moment(this.daterangepicker.startDate),
endDate = moment(this.daterangepicker.endDate);
const platform = $(this.config.platformSelector).val();
if (endDate.diff(startDate, 'days') === 0) {
startDate.subtract(3, 'days');
endDate.add(3, 'days');
}
return `/pageviews#start=${startDate.format('YYYY-MM-DD')}` +
`&end=${endDate.format('YYYY-MM-DD')}&project=${lang}.${project}.org&platform=${platform}&pages=${article}`;
}
/**
* Loop through given interwiki data and query the pageviews API for each
* Also updates this.langData with result
* @param {Object} interWikiData - as given by the getInterwikiData promise
* @return {Deferred} - Promise resolving with data ready to be rendered to view
*/
getPageViewsData(interWikiData) {
const startDate = this.daterangepicker.startDate.startOf('day'),
endDate = this.daterangepicker.endDate.startOf('day'),
interWikiKeys = Object.keys(interWikiData);
// XXX: throttling
let dfd = $.Deferred(), promises = [], count = 0, hadFailure, failureRetries = {},
totalRequestCount = interWikiKeys.length, failedPages = [];
/** clear out existing data */
this.langData = [];
const makeRequest = dbName => {
const data = interWikiData[dbName],
uriEncodedPageName = encodeURIComponent(data.title);
const url = (
`https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/${data.lang}.${this.baseProject}` +
`/${$(this.config.platformSelector).val()}/${$(this.config.agentSelector).val()}/${uriEncodedPageName}/daily` +
`/${startDate.format(this.config.timestampFormat)}/${endDate.format(this.config.timestampFormat)}`
);
const promise = $.ajax({ url, dataType: 'json' });
promises.push(promise);
promise.done(pvData => {
const viewCounts = pvData.items.map(el => el.views),
views = viewCounts.reduce((a, b) => a + b);
this.langData.push({
badges: data.badges,
dbName,
lang: data.lang,
pageName: data.title,
views,
url: data.url,
average: Math.round(views / this.numDaysInRange())
});
/** keep track of unique badges/titles and total pageviews */
this.totals.views += views;
if (!this.totals.titles.includes(data.title)) {
this.totals.titles.push(data.title);
}
data.badges.forEach(badge => {
if (this.totals.badges[badge] === undefined) {
this.totals.badges[badge] = 1;
} else {
this.totals.badges[badge] += 1;
}
});
}).fail(errorData => {
// XXX: throttling
/** first detect if this was a Cassandra backend error, and if so, schedule a re-try */
const cassandraError = errorData.responseJSON.title === 'Error in Cassandra table storage backend',
failedPageLink = this.getPageLink(data.title, `${data.lang}.${this.baseProject}.org`);
if (cassandraError) {
if (failureRetries[dbName]) {
failureRetries[dbName]++;
} else { | totalRequestCount++;
return this.rateLimit(makeRequest, 100, this)(dbName);
}
/** retries exceeded */
failedPages.push(failedPageLink);
} else {
this.writeMessage(
`${failedPageLink}: ${$.i18n('api-error', 'Pageviews API')} - ${errorData.responseJSON.title}`
);
}
hadFailure = true; // don't treat this series of requests as being cached by server
}).always(() => {
this.updateProgressBar((++count / totalRequestCount) * 100);
// XXX: throttling, totalRequestCount can just be interWikiKeys.length
if (count === totalRequestCount) {
dfd.resolve(this.langData);
if (failedPages.length) {
this.writeMessage($.i18n(
'api-error-timeout',
'<ul>' +
failedPages.map(failedPage => `<li>${failedPage}</li>`).join('') +
'</ul>'
));
}
/**
* if there were no failures, assume the resource is now cached by the server
* and save this assumption to our own cache so we don't throttle the same requests
*/
// XXX: throttling
if (!hadFailure) {
simpleStorage.set(this.getCacheKey(), true, {TTL: 600000});
}
}
});
};
/**
* We don't want to throttle requests for cached resources. However in our case,
* we're unable to check response headers to see if the resource was cached,
* so we use simpleStorage to keep track of what the user has recently queried.
*/
// XXX: throttling
const requestFn = this.isRequestCached() ? makeRequest : this.rateLimit(makeRequest, 100, this);
interWikiKeys.forEach((dbName, index) => {
requestFn(dbName);
});
return dfd;
}
/**
* Return cache key for current params
* @return {String} key
*/
getCacheKey() {
return `lv-cache-${this.hashCode(
JSON.stringify(this.getParams(true))
)}`;
}
/**
* Check simple storage to see if a request with the current params would be cached
* @return {Boolean} cached or not
*/
isRequestCached() {
return simpleStorage.hasKey(this.getCacheKey());
}
/**
* Query Wikidata to find data about a given page across all sister projects
* @param {String} dbName - database name of source project
* @param {String} pageName - name of page we want to get data about
* @return {Deferred} - Promise resolving with interwiki data
*/
getInterwikiData(dbName, pageName) {
const dfd = $.Deferred();
const url = `https://www.wikidata.org/w/api.php?action=wbgetentities&sites=${dbName}` +
`&titles=${encodeURIComponent(pageName)}&props=sitelinks/urls|datatype&format=json&callback=?`;
$.getJSON(url).done(data => {
if (data.error) {
return dfd.reject(`${$.i18n('api-error', 'Wikidata')}: ${data.error.info}`);
} else if (data.entities['-1']) {
return dfd.reject(
`<a href='${this.getPageURL(pageName)}'>${pageName.descore()}</a> - ${$.i18n('api-error-no-data')}`
);
}
const key = Object.keys(data.entities)[0],
sitelinks = data.entities[key].sitelinks,
filteredLinks = {},
matchRegex = new RegExp(`^https://[\\w-]+\\.${this.baseProject}\\.org`);
/** restrict to selected base project (e.g. wikipedias, not wikipedias and wikivoyages) */
Object.keys(sitelinks).forEach(key => {
const siteMapKey = sitelinks[key].site.replace(/-/g, '_');
if (matchRegex.test(sitelinks[key].url) && siteMap[siteMapKey]) {
sitelinks[key].lang = siteMap[siteMapKey].replace(/\.wiki.*$/, '');
filteredLinks[key] = sitelinks[key];
}
});
return dfd.resolve(filteredLinks);
});
return dfd;
}
/**
* Parse wiki URL for the page name
* @param {String} url - full URL to a wiki page
* @return {String|null} page name
*/
getPageNameFromURL(url) {
if (url.includes('?')) {
return url.match(/\?(?:.*\b)?title=(.*?)(?:&|$)/)[1];
} else {
return url.match(/\/wiki\/(.*?)(?:\?|$)/)[1];
}
}
/**
* Parses the URL query string and sets all the inputs accordingly
* Should only be called on initial page load, until we decide to support pop states (probably never)
* @returns {null} nothing
*/
popParams() {
let startDate, endDate, params = this.parseQueryString('pages');
$(this.config.projectInput).val(params.project || this.config.defaults.project);
if (this.validateProject()) return;
// FIXME: only run this when they actually submit
this.patchUsage('lv');
/**
* Check if we're using a valid range, and if so ignore any start/end dates.
* If an invalid range, throw and error and use default dates.
*/
if (params.range) {
if (!this.setSpecialRange(params.range)) {
this.addSiteNotice('danger', $.i18n('param-error-3'), $.i18n('invalid-params'), true);
this.setSpecialRange(this.config.defaults.dateRange);
}
} else if (params.start) {
startDate = moment(params.start || moment().subtract(this.config.defaults.daysAgo, 'days'));
endDate = moment(params.end || Date.now());
if (startDate < this.config.minDate || endDate < this.config.minDate) {
this.addSiteNotice('danger', $.i18n('param-error-1', `${$.i18n('july')} 2015`), $.i18n('invalid-params'), true);
return;
} else if (startDate > endDate) {
this.addSiteNotice('warning', $.i18n('param-error-2'), $.i18n('invalid-params'), true);
return;
}
this.daterangepicker.setStartDate(startDate);
this.daterangepicker.setEndDate(endDate);
} else {
this.setSpecialRange(this.config.defaults.dateRange);
}
$(this.config.platformSelector).val(params.platform || 'all-access');
$(this.config.agentSelector).val(params.agent || 'user');
this.sort = params.sort || this.config.defaults.params.sort;
this.direction = params.direction || this.config.defaults.params.direction;
/** start up processing if page name is present */
if (params.page) {
$(this.config.articleInput).val(decodeURIComponent(params.page).descore());
this.processArticle();
}
}
getState() {
const classList = $('main')[0].classList;
return this.config.formStates.filter(stateName => {
return classList.contains(stateName);
})[0];
}
/**
* Helper to set a CSS class on the `main` node,
* styling the document based on a 'state'
* @param {String} state - class to be added;
* should be one of ['initial', 'processing', 'complete']
* @returns {null} nothing
*/
setState(state) {
$('main').removeClass(this.config.formStates.join(' ')).addClass(state);
switch (state) {
case 'initial':
this.clearMessages();
this.assignDefaults();
if (this.typeahead) this.typeahead.hide();
$(this.config.articleInput).val('').focus();
break;
case 'processing':
this.processStarted();
this.clearMessages();
document.activeElement.blur();
$('.progress-bar').addClass('active');
break;
case 'complete':
this.processEnded();
/** stop hidden animation for slight performance improvement */
this.updateProgressBar(0);
$('.progress-bar').removeClass('active');
break;
case 'invalid':
break;
}
}
/**
* sets up the daterange selector and adds listeners
* @returns {null} - nothing
*/
setupDateRangeSelector() {
super.setupDateRangeSelector();
$(this.config.dateRangeSelector).on('apply.daterangepicker', (e, action) => {
if (action.chosenLabel === $.i18n('custom-range')) {
this.specialRange = null;
/** force events to re-fire since apply.daterangepicker occurs before 'change' event */
this.daterangepicker.updateElement();
}
});
}
/**
* Process the langviews for the article and options entered
* Called when submitting the form
* @return {null} nothing
*/
processArticle() {
// XXX: throttling
/** allow resubmission of queries that are cached */
if (!this.isRequestCached()) {
/** Check if user has exceeded request limit and throw error */
if (simpleStorage.hasKey('pageviews-throttle')) {
const timeRemaining = Math.round(simpleStorage.getTTL('pageviews-throttle') / 1000);
/** > 0 check to combat race conditions */
if (timeRemaining > 0) {
return this.writeMessage($.i18n(
'api-throttle-wait', `<b>${timeRemaining}</b>`,
'<a href="https://phabricator.wikimedia.org/T124314" target="_blank">phab:T124314</a>'
), true);
}
}
}
const page = $(this.config.articleInput).val();
this.setState('processing');
const dbName = Object.keys(siteMap).find(key => siteMap[key] === $(this.config.projectInput).val());
this.getInterwikiData(dbName, page).done(interWikiData => {
/**
* XXX: throttling
* At this point we know we have data to process,
* so set the throttle flag to disallow additional requests for the next 90 seconds
*/
this.setThrottle();
this.getPageViewsData(interWikiData).done(() => {
$('.langviews-page-name').text(page).prop('href', this.getPageURL(page));
$('.langviews-params').text($(this.config.dateRangeSelector).val());
this.updateProgressBar(100);
this.renderData();
/**
* XXX: throttling
* Reset throttling again; the first one was in case they aborted
*/
this.setThrottle();
});
}).fail(error => {
this.setState('initial');
/** structured error comes back as a string, otherwise we don't know what happened */
if (typeof error === 'string') {
this.writeMessage(error);
} else {
this.writeMessage($.i18n('api-error-unknown', 'Wikidata'));
}
});
}
/**
* Setup typeahead on the article input, killing the prevous instance if present
* Called in validateProject, which is called in popParams when the app is first loaded
* @return {null} Nothing
*/
setupArticleInput() {
if (this.typeahead) this.typeahead.destroy();
$(this.config.articleInput).typeahead({
ajax: {
url: `https://${this.project}.org/w/api.php`,
timeout: 200,
triggerLength: 1,
method: 'get',
preDispatch: query => {
return {
action: 'query',
list: 'prefixsearch',
format: 'json',
pssearch: query
};
},
preProcess: data => {
const results = data.query.prefixsearch.map(elem => elem.title);
return results;
}
}
});
}
/**
* Set value of progress bar
* @param {Number} value - percentage as float
* @return {null} nothing
*/
updateProgressBar(value) {
$('.progress-bar').css('width', `${value.toFixed(2)}%`);
}
/**
* Validate the currently entered project. Called when the value is changed
* @return {boolean} true if validation failed
*/
validateProject() {
const project = $(this.config.projectInput).val();
if (!this.isMultilangProject()) {
this.writeMessage(
$.i18n('invalid-lang-project', `<a href='//${project}'>${project}</a>`),
true
);
this.setState('invalid');
return true;
}
this.setState('initial');
/** kill and re-init typeahead to point to new project */
this.setupArticleInput();
return false;
}
/**
* Exports current mass data to CSV format and loads it in a new tab
* With the prepended data:text/csv this should cause the browser to download the data
* @returns {string} CSV content
*/
exportCSV() {
let csvContent = 'data:text/csv;charset=utf-8,Language,Title,Badges,Pageviews,Average\n';
// Add the rows to the CSV
this.langData.forEach(page => {
const pageName = '"' + page.pageName.descore().replace(/"/g, '""') + '"',
badges = '"' + page.badges.map(badge => this.config.badges[badge].name.replace(/"/g, '""')) + '"';
csvContent += [
page.lang,
pageName,
badges,
page.views,
page.average
].join(',') + '\n';
});
// Output the CSV file to the browser
const encodedUri = encodeURI(csvContent);
window.open(encodedUri);
}
/**
* Exports current mass data to JSON format and loads it in a new tab
* @returns {string} stringified JSON
*/
exportJSON() {
const jsonContent = 'data:text/json;charset=utf-8,' + JSON.stringify(this.langData),
encodedUri = encodeURI(jsonContent);
window.open(encodedUri);
return jsonContent;
}
}
$(document).ready(() => {
/** assume hash params are supposed to be query params */
if (document.location.hash && !document.location.search) {
return document.location.href = document.location.href.replace('#', '?');
} else if (document.location.hash) {
return document.location.href = document.location.href.replace(/\#.*/, '');
}
new LangViews();
}); | failureRetries[dbName] = 1;
}
/** maximum of 3 retries */
if (failureRetries[dbName] < 3) { | random_line_split |
langviews.js | /**
* Langviews Analysis tool
* @file Main file for Langviews application
* @author MusikAnimal
* @copyright 2016 MusikAnimal
* @license MIT License: https://opensource.org/licenses/MIT
*/
const config = require('./config');
const siteMap = require('../shared/site_map');
const siteDomains = Object.keys(siteMap).map(key => siteMap[key]);
const Pv = require('../shared/pv');
/** Main LangViews class */
class LangViews extends Pv {
constructor() {
super(config);
}
/**
* Initialize the application.
* Called in `pv.js` after translations have loaded
* @return {null} Nothing
*/
initialize() {
this.assignDefaults();
this.setupDateRangeSelector();
this.popParams();
this.setupListeners();
this.updateInterAppLinks();
}
/**
* Copy default values over to class instance
* Use JSON stringify/parsing so to make a deep clone of the defaults
* @return {null} Nothing
*/
assignDefaults() {
Object.assign(this, JSON.parse(JSON.stringify(this.config.defaults.params)));
}
/**
* Add general event listeners
* @returns {null} nothing
*/
setupListeners() {
super.setupListeners();
$('#langviews_form').on('submit', e => {
e.preventDefault(); // prevent page from reloading
this.processArticle();
});
$('.another-query').on('click', () => {
this.setState('initial');
this.pushParams(true);
});
$('.sort-link').on('click', e => {
const sortType = $(e.currentTarget).data('type');
this.direction = this.sort === sortType ? -this.direction : 1;
this.sort = sortType;
this.renderData();
});
$(this.config.projectInput).on('change', () => {
this.validateProject();
this.updateInterAppLinks();
});
$('.download-csv').on('click', this.exportCSV.bind(this));
$('.download-json').on('click', this.exportJSON.bind(this));
}
/**
* Get the base project name (without language and the .org)
* @returns {boolean} projectname
*/
get baseProject() {
return this.project.split('.')[1];
}
/**
* @returns {Typeahead} instance
*/
get typeahead() {
return $(this.config.articleInput).data('typeahead');
}
/**
* Get all user-inputted parameters
* @param {boolean} [forCacheKey] whether or not to include the page name, and exclude sort and direction
* in the returned object. This is for the purposes of generating a unique cache key for params affecting the API queries
* @return {Object} project, platform, agent, etc.
*/
getParams(forCacheKey = false) {
let params = {
project: $(this.config.projectInput).val(),
platform: $(this.config.platformSelector).val(),
agent: $(this.config.agentSelector).val()
};
/**
* Override start and end with custom range values, if configured (set by URL params or setupDateRangeSelector)
* Valid values are those defined in this.config.specialRanges, constructed like `{range: 'last-month'}`,
* or a relative range like `{range: 'latest-N'}` where N is the number of days.
*/
if (this.specialRange && !forCacheKey) {
params.range = this.specialRange.range;
} else {
params.start = this.daterangepicker.startDate.format('YYYY-MM-DD');
params.end = this.daterangepicker.endDate.format('YYYY-MM-DD');
}
if (forCacheKey) {
params.page = $(this.config.articleInput).val();
} else {
params.sort = this.sort;
params.direction = this.direction;
}
return params;
}
/**
* Get params needed to create a permanent link of visible data
* @return {Object} hash of params
*/
getPermaLink() {
let params = this.getParams(true);
params.sort = this.sort;
params.direction = this.direction;
return params;
}
/**
* Push relevant class properties to the query string
* @param {Boolean} clear - wheter to clear the query string entirely
* @return {null} nothing
*/
pushParams(clear = false) {
if (!window.history || !window.history.replaceState) return;
if (clear) {
return history.replaceState(null, document.title, location.href.split('?')[0]);
}
/** only certain characters within the page name are escaped */
const page = $(this.config.articleInput).val().score().replace(/[&%]/g, escape);
window.history.replaceState({}, document.title, `?${$.param(this.getParams())}&page=${page}`);
$('.permalink').prop('href', `/langviews?${$.param(this.getPermaLink())}`);
}
/**
* Given the badge code provided by the Wikidata API, return a image tag of the badge
* @param {String} badgeCode - as defined in this.config.badges
* @return {String} HTML markup for the image
*/
getBadgeMarkup(badgeCode) {
if (!this.config.badges[badgeCode]) return '';
const badgeImage = this.config.badges[badgeCode].image,
badgeName = this.config.badges[badgeCode].name;
return `<img class='article-badge' src='${badgeImage}' alt='${badgeName}' title='${badgeName}' />`;
}
/**
* Render list of langviews into view
* @returns {null} nothing
*/
renderData() {
/** sort ascending by current sort setting */
const sortedLangViews = this.langData.sort((a, b) => {
const before = this.getSortProperty(a, this.sort),
after = this.getSortProperty(b, this.sort);
if (before < after) {
return this.direction;
} else if (before > after) {
return -this.direction;
} else {
return 0;
}
});
$('.sort-link span').removeClass('glyphicon-sort-by-alphabet-alt glyphicon-sort-by-alphabet').addClass('glyphicon-sort');
const newSortClassName = parseInt(this.direction) === 1 ? 'glyphicon-sort-by-alphabet-alt' : 'glyphicon-sort-by-alphabet';
$(`.sort-link--${this.sort} span`).addClass(newSortClassName).removeClass('glyphicon-sort');
const totalBadgesMarkup = Object.keys(this.totals.badges).map(badge => {
return `<span class='nowrap'>${this.getBadgeMarkup(badge)} × ${this.totals.badges[badge]}</span>`;
}).join(', ');
$('.output-totals').html(
`<th scope='row'>${$.i18n('totals')}</th>
<th>${$.i18n('num-languages', this.langData.length)}</th>
<th>${$.i18n('unique-titles', this.totals.titles.length)}</th>
<th>${totalBadgesMarkup}</th>
<th>${this.n(this.totals.views)}</th>
<th>${this.n(Math.round(this.totals.views / this.numDaysInRange()))} / ${$.i18n('day')}</th>`
);
$('#lang_list').html('');
sortedLangViews.forEach((item, index) => {
let badgeMarkup = '';
if (item.badges) {
badgeMarkup = item.badges.map(this.getBadgeMarkup.bind(this)).join();
}
$('#lang_list').append(
`<tr>
<th scope='row'>${index + 1}</th>
<td>${item.lang}</td>
<td><a href="${item.url}" target="_blank">${item.pageName}</a></td>
<td>${badgeMarkup}</td>
<td><a href='${this.getPageviewsURL(item.lang, this.baseProject, item.pageName)}'>${this.n(item.views)}</a></td>
<td>${this.n(item.average)} / ${$.i18n('day')}</td>
</tr>`
);
});
this.pushParams();
this.setState('complete');
}
/**
* Get value of given langview entry for the purposes of column sorting
* @param {object} item - langview entry within this.langData
* @param {String} type - type of property to get
* @return {String|Number} - value
*/
getSortProperty(item, type) {
switch (type) {
case 'lang':
return item.lang;
case 'title':
return item.pageName;
case 'badges':
return item.badges.sort().join('');
case 'views':
return Number(item.views);
}
}
/**
* Link to /pageviews for given article and chosen daterange
* @param {String} lang - two character language code
* @param {String} project - base project without lang, e.g. wikipedia.org
* @param {String} article - page name
* @returns {String} URL
*/
// FIXME: should include agent and platform, and use special ranges as currently specified
getPageviewsURL(lang, project, article) {
let startDate = moment(this.daterangepicker.startDate),
endDate = moment(this.daterangepicker.endDate);
const platform = $(this.config.platformSelector).val();
if (endDate.diff(startDate, 'days') === 0) {
startDate.subtract(3, 'days');
endDate.add(3, 'days');
}
return `/pageviews#start=${startDate.format('YYYY-MM-DD')}` +
`&end=${endDate.format('YYYY-MM-DD')}&project=${lang}.${project}.org&platform=${platform}&pages=${article}`;
}
/**
* Loop through given interwiki data and query the pageviews API for each
* Also updates this.langData with result
* @param {Object} interWikiData - as given by the getInterwikiData promise
* @return {Deferred} - Promise resolving with data ready to be rendered to view
*/
getPageViewsData(interWikiData) {
const startDate = this.daterangepicker.startDate.startOf('day'),
endDate = this.daterangepicker.endDate.startOf('day'),
interWikiKeys = Object.keys(interWikiData);
// XXX: throttling
let dfd = $.Deferred(), promises = [], count = 0, hadFailure, failureRetries = {},
totalRequestCount = interWikiKeys.length, failedPages = [];
/** clear out existing data */
this.langData = [];
const makeRequest = dbName => {
const data = interWikiData[dbName],
uriEncodedPageName = encodeURIComponent(data.title);
const url = (
`https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article/${data.lang}.${this.baseProject}` +
`/${$(this.config.platformSelector).val()}/${$(this.config.agentSelector).val()}/${uriEncodedPageName}/daily` +
`/${startDate.format(this.config.timestampFormat)}/${endDate.format(this.config.timestampFormat)}`
);
const promise = $.ajax({ url, dataType: 'json' });
promises.push(promise);
promise.done(pvData => {
const viewCounts = pvData.items.map(el => el.views),
views = viewCounts.reduce((a, b) => a + b);
this.langData.push({
badges: data.badges,
dbName,
lang: data.lang,
pageName: data.title,
views,
url: data.url,
average: Math.round(views / this.numDaysInRange())
});
/** keep track of unique badges/titles and total pageviews */
this.totals.views += views;
if (!this.totals.titles.includes(data.title)) {
this.totals.titles.push(data.title);
}
data.badges.forEach(badge => {
if (this.totals.badges[badge] === undefined) {
this.totals.badges[badge] = 1;
} else {
this.totals.badges[badge] += 1;
}
});
}).fail(errorData => {
// XXX: throttling
/** first detect if this was a Cassandra backend error, and if so, schedule a re-try */
const cassandraError = errorData.responseJSON.title === 'Error in Cassandra table storage backend',
failedPageLink = this.getPageLink(data.title, `${data.lang}.${this.baseProject}.org`);
if (cassandraError) {
if (failureRetries[dbName]) {
failureRetries[dbName]++;
} else {
failureRetries[dbName] = 1;
}
/** maximum of 3 retries */
if (failureRetries[dbName] < 3) {
totalRequestCount++;
return this.rateLimit(makeRequest, 100, this)(dbName);
}
/** retries exceeded */
failedPages.push(failedPageLink);
} else {
this.writeMessage(
`${failedPageLink}: ${$.i18n('api-error', 'Pageviews API')} - ${errorData.responseJSON.title}`
);
}
hadFailure = true; // don't treat this series of requests as being cached by server
}).always(() => {
this.updateProgressBar((++count / totalRequestCount) * 100);
// XXX: throttling, totalRequestCount can just be interWikiKeys.length
if (count === totalRequestCount) {
dfd.resolve(this.langData);
if (failedPages.length) {
this.writeMessage($.i18n(
'api-error-timeout',
'<ul>' +
failedPages.map(failedPage => `<li>${failedPage}</li>`).join('') +
'</ul>'
));
}
/**
* if there were no failures, assume the resource is now cached by the server
* and save this assumption to our own cache so we don't throttle the same requests
*/
// XXX: throttling
if (!hadFailure) {
simpleStorage.set(this.getCacheKey(), true, {TTL: 600000});
}
}
});
};
/**
* We don't want to throttle requests for cached resources. However in our case,
* we're unable to check response headers to see if the resource was cached,
* so we use simpleStorage to keep track of what the user has recently queried.
*/
// XXX: throttling
const requestFn = this.isRequestCached() ? makeRequest : this.rateLimit(makeRequest, 100, this);
interWikiKeys.forEach((dbName, index) => {
requestFn(dbName);
});
return dfd;
}
/**
* Return cache key for current params
* @return {String} key
*/
getCacheKey() {
return `lv-cache-${this.hashCode(
JSON.stringify(this.getParams(true))
)}`;
}
/**
* Check simple storage to see if a request with the current params would be cached
* @return {Boolean} cached or not
*/
isRequestCached() {
return simpleStorage.hasKey(this.getCacheKey());
}
/**
* Query Wikidata to find data about a given page across all sister projects
* @param {String} dbName - database name of source project
* @param {String} pageName - name of page we want to get data about
* @return {Deferred} - Promise resolving with interwiki data
*/
getInterwikiData(dbName, pageName) {
const dfd = $.Deferred();
const url = `https://www.wikidata.org/w/api.php?action=wbgetentities&sites=${dbName}` +
`&titles=${encodeURIComponent(pageName)}&props=sitelinks/urls|datatype&format=json&callback=?`;
$.getJSON(url).done(data => {
if (data.error) {
return dfd.reject(`${$.i18n('api-error', 'Wikidata')}: ${data.error.info}`);
} else if (data.entities['-1']) {
return dfd.reject(
`<a href='${this.getPageURL(pageName)}'>${pageName.descore()}</a> - ${$.i18n('api-error-no-data')}`
);
}
const key = Object.keys(data.entities)[0],
sitelinks = data.entities[key].sitelinks,
filteredLinks = {},
matchRegex = new RegExp(`^https://[\\w-]+\\.${this.baseProject}\\.org`);
/** restrict to selected base project (e.g. wikipedias, not wikipedias and wikivoyages) */
Object.keys(sitelinks).forEach(key => {
const siteMapKey = sitelinks[key].site.replace(/-/g, '_');
if (matchRegex.test(sitelinks[key].url) && siteMap[siteMapKey]) {
sitelinks[key].lang = siteMap[siteMapKey].replace(/\.wiki.*$/, '');
filteredLinks[key] = sitelinks[key];
}
});
return dfd.resolve(filteredLinks);
});
return dfd;
}
/**
* Parse wiki URL for the page name
* @param {String} url - full URL to a wiki page
* @return {String|null} page name
*/
getPageNameFromURL(url) {
if (url.includes('?')) {
return url.match(/\?(?:.*\b)?title=(.*?)(?:&|$)/)[1];
} else |
}
/**
* Parses the URL query string and sets all the inputs accordingly
* Should only be called on initial page load, until we decide to support pop states (probably never)
* @returns {null} nothing
*/
popParams() {
let startDate, endDate, params = this.parseQueryString('pages');
$(this.config.projectInput).val(params.project || this.config.defaults.project);
if (this.validateProject()) return;
// FIXME: only run this when they actually submit
this.patchUsage('lv');
/**
* Check if we're using a valid range, and if so ignore any start/end dates.
* If an invalid range, throw and error and use default dates.
*/
if (params.range) {
if (!this.setSpecialRange(params.range)) {
this.addSiteNotice('danger', $.i18n('param-error-3'), $.i18n('invalid-params'), true);
this.setSpecialRange(this.config.defaults.dateRange);
}
} else if (params.start) {
startDate = moment(params.start || moment().subtract(this.config.defaults.daysAgo, 'days'));
endDate = moment(params.end || Date.now());
if (startDate < this.config.minDate || endDate < this.config.minDate) {
this.addSiteNotice('danger', $.i18n('param-error-1', `${$.i18n('july')} 2015`), $.i18n('invalid-params'), true);
return;
} else if (startDate > endDate) {
this.addSiteNotice('warning', $.i18n('param-error-2'), $.i18n('invalid-params'), true);
return;
}
this.daterangepicker.setStartDate(startDate);
this.daterangepicker.setEndDate(endDate);
} else {
this.setSpecialRange(this.config.defaults.dateRange);
}
$(this.config.platformSelector).val(params.platform || 'all-access');
$(this.config.agentSelector).val(params.agent || 'user');
this.sort = params.sort || this.config.defaults.params.sort;
this.direction = params.direction || this.config.defaults.params.direction;
/** start up processing if page name is present */
if (params.page) {
$(this.config.articleInput).val(decodeURIComponent(params.page).descore());
this.processArticle();
}
}
getState() {
const classList = $('main')[0].classList;
return this.config.formStates.filter(stateName => {
return classList.contains(stateName);
})[0];
}
/**
* Helper to set a CSS class on the `main` node,
* styling the document based on a 'state'
* @param {String} state - class to be added;
* should be one of ['initial', 'processing', 'complete']
* @returns {null} nothing
*/
setState(state) {
$('main').removeClass(this.config.formStates.join(' ')).addClass(state);
switch (state) {
case 'initial':
this.clearMessages();
this.assignDefaults();
if (this.typeahead) this.typeahead.hide();
$(this.config.articleInput).val('').focus();
break;
case 'processing':
this.processStarted();
this.clearMessages();
document.activeElement.blur();
$('.progress-bar').addClass('active');
break;
case 'complete':
this.processEnded();
/** stop hidden animation for slight performance improvement */
this.updateProgressBar(0);
$('.progress-bar').removeClass('active');
break;
case 'invalid':
break;
}
}
/**
* sets up the daterange selector and adds listeners
* @returns {null} - nothing
*/
setupDateRangeSelector() {
super.setupDateRangeSelector();
$(this.config.dateRangeSelector).on('apply.daterangepicker', (e, action) => {
if (action.chosenLabel === $.i18n('custom-range')) {
this.specialRange = null;
/** force events to re-fire since apply.daterangepicker occurs before 'change' event */
this.daterangepicker.updateElement();
}
});
}
/**
* Process the langviews for the article and options entered
* Called when submitting the form
* @return {null} nothing
*/
processArticle() {
// XXX: throttling
/** allow resubmission of queries that are cached */
if (!this.isRequestCached()) {
/** Check if user has exceeded request limit and throw error */
if (simpleStorage.hasKey('pageviews-throttle')) {
const timeRemaining = Math.round(simpleStorage.getTTL('pageviews-throttle') / 1000);
/** > 0 check to combat race conditions */
if (timeRemaining > 0) {
return this.writeMessage($.i18n(
'api-throttle-wait', `<b>${timeRemaining}</b>`,
'<a href="https://phabricator.wikimedia.org/T124314" target="_blank">phab:T124314</a>'
), true);
}
}
}
const page = $(this.config.articleInput).val();
this.setState('processing');
const dbName = Object.keys(siteMap).find(key => siteMap[key] === $(this.config.projectInput).val());
this.getInterwikiData(dbName, page).done(interWikiData => {
/**
* XXX: throttling
* At this point we know we have data to process,
* so set the throttle flag to disallow additional requests for the next 90 seconds
*/
this.setThrottle();
this.getPageViewsData(interWikiData).done(() => {
$('.langviews-page-name').text(page).prop('href', this.getPageURL(page));
$('.langviews-params').text($(this.config.dateRangeSelector).val());
this.updateProgressBar(100);
this.renderData();
/**
* XXX: throttling
* Reset throttling again; the first one was in case they aborted
*/
this.setThrottle();
});
}).fail(error => {
this.setState('initial');
/** structured error comes back as a string, otherwise we don't know what happened */
if (typeof error === 'string') {
this.writeMessage(error);
} else {
this.writeMessage($.i18n('api-error-unknown', 'Wikidata'));
}
});
}
/**
* Setup typeahead on the article input, killing the prevous instance if present
* Called in validateProject, which is called in popParams when the app is first loaded
* @return {null} Nothing
*/
setupArticleInput() {
if (this.typeahead) this.typeahead.destroy();
$(this.config.articleInput).typeahead({
ajax: {
url: `https://${this.project}.org/w/api.php`,
timeout: 200,
triggerLength: 1,
method: 'get',
preDispatch: query => {
return {
action: 'query',
list: 'prefixsearch',
format: 'json',
pssearch: query
};
},
preProcess: data => {
const results = data.query.prefixsearch.map(elem => elem.title);
return results;
}
}
});
}
/**
* Set value of progress bar
* @param {Number} value - percentage as float
* @return {null} nothing
*/
updateProgressBar(value) {
$('.progress-bar').css('width', `${value.toFixed(2)}%`);
}
/**
* Validate the currently entered project. Called when the value is changed
* @return {boolean} true if validation failed
*/
validateProject() {
const project = $(this.config.projectInput).val();
if (!this.isMultilangProject()) {
this.writeMessage(
$.i18n('invalid-lang-project', `<a href='//${project}'>${project}</a>`),
true
);
this.setState('invalid');
return true;
}
this.setState('initial');
/** kill and re-init typeahead to point to new project */
this.setupArticleInput();
return false;
}
/**
* Exports current mass data to CSV format and loads it in a new tab
* With the prepended data:text/csv this should cause the browser to download the data
* @returns {string} CSV content
*/
exportCSV() {
let csvContent = 'data:text/csv;charset=utf-8,Language,Title,Badges,Pageviews,Average\n';
// Add the rows to the CSV
this.langData.forEach(page => {
const pageName = '"' + page.pageName.descore().replace(/"/g, '""') + '"',
badges = '"' + page.badges.map(badge => this.config.badges[badge].name.replace(/"/g, '""')) + '"';
csvContent += [
page.lang,
pageName,
badges,
page.views,
page.average
].join(',') + '\n';
});
// Output the CSV file to the browser
const encodedUri = encodeURI(csvContent);
window.open(encodedUri);
}
/**
* Exports current mass data to JSON format and loads it in a new tab
* @returns {string} stringified JSON
*/
exportJSON() {
const jsonContent = 'data:text/json;charset=utf-8,' + JSON.stringify(this.langData),
encodedUri = encodeURI(jsonContent);
window.open(encodedUri);
return jsonContent;
}
}
$(document).ready(() => {
/** assume hash params are supposed to be query params */
if (document.location.hash && !document.location.search) {
return document.location.href = document.location.href.replace('#', '?');
} else if (document.location.hash) {
return document.location.href = document.location.href.replace(/\#.*/, '');
}
new LangViews();
});
| {
return url.match(/\/wiki\/(.*?)(?:\?|$)/)[1];
} | conditional_block |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") {
util::get_build_info();
return;
}
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn run_main<V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server(
server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV: 'static + Vertex,
VVI: 'static + Iterator<Item = VV> + Send,
EE: 'static + Edge,
EEI: 'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) |
}
| {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
} | identifier_body |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") |
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn run_main<V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server(
server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV: 'static + Vertex,
VVI: 'static + Iterator<Item = VV> + Send,
EE: 'static + Edge,
EEI: 'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
}
}
| {
util::get_build_info();
return;
} | conditional_block |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") {
util::get_build_info();
return;
}
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn run_main<V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server( | server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV: 'static + Vertex,
VVI: 'static + Iterator<Item = VV> + Send,
EE: 'static + Edge,
EEI: 'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
}
} | random_line_split | |
gaia_executor.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
#![allow(bare_trait_objects)]
extern crate futures;
extern crate grpcio;
#[macro_use]
extern crate log;
extern crate gaia_pegasus;
extern crate gs_gremlin;
extern crate log4rs;
extern crate maxgraph_common;
extern crate maxgraph_runtime;
extern crate maxgraph_server;
extern crate maxgraph_store;
extern crate pegasus;
extern crate pegasus_server;
extern crate protobuf;
extern crate structopt;
use gaia_runtime::server::init_with_rpc_service;
use gaia_runtime::server::manager::GaiaServerManager;
use grpcio::ChannelBuilder;
use grpcio::EnvBuilder;
use gs_gremlin::{InitializeJobCompiler, QueryVineyard};
use maxgraph_common::proto::data::*;
use maxgraph_common::proto::hb::*;
use maxgraph_common::proto::query_flow::*;
use maxgraph_common::util;
use maxgraph_common::util::get_local_ip;
use maxgraph_common::util::log4rs::init_log4rs;
use maxgraph_runtime::server::manager::*;
use maxgraph_runtime::server::RuntimeInfo;
use maxgraph_server::StoreContext;
use maxgraph_store::api::graph_partition::GraphPartitionManager;
use maxgraph_store::api::prelude::*;
use maxgraph_store::config::{StoreConfig, VINEYARD_GRAPH};
use pegasus_server::rpc::start_rpc_server;
use pegasus_server::service::Service;
use protobuf::Message;
use std::collections::HashMap;
use std::env;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
use std::time::Duration;
use tokio::runtime::Runtime;
fn main() {
if let Some(_) = env::args().find(|arg| arg == "--show-build-info") {
util::get_build_info();
return;
}
init_log4rs();
let mut store_config = {
let args: Vec<String> = std::env::args().collect();
if args.len() <= 6 && args[1] == "--config" {
let mut store_config = StoreConfig::init_from_file(&args[2], &args[4]);
if args.len() == 6 {
store_config.graph_name = (&args[5]).to_string();
}
store_config
} else {
StoreConfig::init()
}
};
let (alive_id, partitions) = get_init_info(&store_config);
info!("alive_id: {:?}, partitions: {:?}", alive_id, partitions);
store_config.update_alive_id(alive_id);
info!("{:?}", store_config);
let worker_num = store_config.timely_worker_per_process;
let store_config = Arc::new(store_config);
if store_config.graph_type.to_lowercase().eq(VINEYARD_GRAPH) {
if cfg!(target_os = "linux") {
info!(
"Start executor with vineyard graph object id {:?}",
store_config.vineyard_graph_id
);
use maxgraph_runtime::store::ffi::FFIGraphStore;
let ffi_store = FFIGraphStore::new(store_config.vineyard_graph_id, worker_num as i32);
let partition_manager = ffi_store.get_partition_manager();
run_main(
store_config,
Arc::new(ffi_store),
Arc::new(partition_manager),
);
} else {
unimplemented!("Mac not support vineyard graph")
}
} else {
unimplemented!("only start vineyard graph from executor")
}
}
fn | <V, VI, E, EI>(
store_config: Arc<StoreConfig>,
graph: Arc<GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<GraphPartitionManager>,
) where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
let process_partition_list = partition_manager.get_process_partition_list();
info!("process_partition_list: {:?}", process_partition_list);
let runtime_info = Arc::new(Mutex::new(RuntimeInfo::new(
store_config.timely_worker_per_process,
process_partition_list,
)));
let runtime_info_clone = runtime_info.clone();
let (hb_resp_sender, hb_resp_receiver) = channel();
let signal = Arc::new(AtomicBool::new(false));
let gaia_server_manager =
GaiaServerManager::new(hb_resp_receiver, runtime_info, signal.clone());
let partition_worker_mapping = gaia_server_manager.get_partition_worker_mapping();
let worker_partition_list_mapping = gaia_server_manager.get_worker_partition_list_mapping();
let server_manager = Box::new(gaia_server_manager);
let _manager_guards = ServerManager::start_server(
server_manager,
store_config.clone(),
Box::new(recover_prepare),
)
.unwrap();
let gaia_service = GaiaService::new(
store_config.clone(),
graph.clone(),
partition_manager.clone(),
partition_worker_mapping,
worker_partition_list_mapping,
);
let (_, gaia_rpc_service_port) = gaia_service.start_rpc_service();
let store_context = StoreContext::new(graph, partition_manager);
start_hb_rpc_service(
runtime_info_clone,
store_config,
gaia_rpc_service_port,
hb_resp_sender,
store_context,
);
thread::sleep(Duration::from_secs(u64::max_value()));
}
fn recover_prepare(prepared: &[u8]) -> Result<Vec<u8>, String> {
::protobuf::parse_from_bytes::<QueryFlow>(prepared)
.map_err(|err| err.to_string())
.and_then(move |desc| {
info!("parse {} bytes to {:?} ", prepared.len(), desc);
Ok(desc.write_to_bytes().expect("query flow to bytes"))
})
}
fn start_hb_rpc_service<VV, VVI, EE, EEI>(
runtime_info: Arc<Mutex<RuntimeInfo>>,
store_config: Arc<StoreConfig>,
gaia_service_port: u16,
hb_resp_sender: Sender<Arc<ServerHBResp>>,
store_context: StoreContext<VV, VVI, EE, EEI>,
) where
VV: 'static + Vertex,
VVI: 'static + Iterator<Item = VV> + Send,
EE: 'static + Edge,
EEI: 'static + Iterator<Item = EE> + Send,
{
// build hb information
let mut hb_providers = Vec::new();
let mut hb_resp_senders = Vec::new();
let hb_provider = move |ref mut server_hb_req: &mut ServerHBReq| {
server_hb_req.set_runtimeReq(build_runtime_req(runtime_info.clone()));
server_hb_req
.mut_endpoint()
.set_runtimCtrlAndAsyncPort(gaia_service_port as i32);
};
hb_providers.push(Box::new(hb_provider));
hb_resp_senders.push(hb_resp_sender);
let store_config_clone = store_config.clone();
init_with_rpc_service(
store_config_clone,
hb_providers,
hb_resp_senders,
store_context,
);
}
fn build_runtime_req(runtime_info: Arc<Mutex<RuntimeInfo>>) -> RuntimeHBReq {
let hb_req = runtime_info.lock().expect("Lock runtime hb req failed");
let mut runtime_req = RuntimeHBReq::new();
runtime_req.set_serverStatus(hb_req.get_server_status());
runtime_req.set_runtimePort(hb_req.get_server_port() as i32);
runtime_req.set_worker_num_per_process(hb_req.get_worker_num_per_process());
runtime_req.set_process_partition_list(hb_req.get_process_partition_list().to_vec());
debug!("Build runtime request {:?} in heartbeat", &runtime_req);
runtime_req
}
/// return: (aliveId, partiiton assignments)
fn get_init_info(config: &StoreConfig) -> (u64, Vec<PartitionId>) {
use maxgraph_common::proto::data_grpc::*;
use maxgraph_common::util::ip;
use maxgraph_server::client::ZKClient;
let zk_url = format!("{}/{}", config.zk_url, config.graph_name);
let zk = ZKClient::new(&zk_url, config.zk_timeout_ms, config.get_zk_auth());
let addr = zk.get_coordinator_addr();
let channel =
ChannelBuilder::new(Arc::new(EnvBuilder::new().build())).connect(addr.to_string().as_str());
let client = ServerDataApiClient::new(channel);
let mut request = GetExecutorAliveIdRequest::new();
request.set_serverId(config.worker_id);
request.set_ip(ip::get_local_ip());
let response = client.get_executor_alive_id(&request).unwrap();
let alive_id = response.get_aliveId();
let mut request = GetPartitionAssignmentRequest::new();
request.set_serverId(config.worker_id);
let response = client.get_partition_assignment(&request).unwrap();
let partitions = response.get_partitionId().to_vec();
(alive_id, partitions)
}
pub struct GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
// mapping of partition id -> worker id
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
// mapping of worker id -> partition list
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
rpc_runtime: Runtime,
}
impl<V, VI, E, EI> GaiaService<V, VI, E, EI>
where
V: Vertex + 'static,
VI: Iterator<Item = V> + Send + 'static,
E: Edge + 'static,
EI: Iterator<Item = E> + Send + 'static,
{
pub fn new(
store_config: Arc<StoreConfig>,
graph: Arc<dyn GlobalGraphQuery<V = V, E = E, VI = VI, EI = EI>>,
partition_manager: Arc<dyn GraphPartitionManager>,
partition_worker_mapping: Arc<RwLock<Option<HashMap<u32, u32>>>>,
worker_partition_list_mapping: Arc<RwLock<Option<HashMap<u32, Vec<u32>>>>>,
) -> GaiaService<V, VI, E, EI> {
GaiaService {
store_config,
graph,
partition_manager,
partition_worker_mapping,
worker_partition_list_mapping,
rpc_runtime: Runtime::new().unwrap(),
}
}
pub fn start_rpc_service(&self) -> (String, u16) {
let rpc_port = self.rpc_runtime.block_on(async {
let query_vineyard = QueryVineyard::new(
self.graph.clone(),
self.partition_manager.clone(),
self.partition_worker_mapping.clone(),
self.worker_partition_list_mapping.clone(),
self.store_config.worker_num as usize,
self.store_config.worker_id as u64,
);
let job_compiler = query_vineyard.initialize_job_compiler();
let service = Service::new(job_compiler);
let addr = format!("{}:{}", "0.0.0.0", self.store_config.rpc_port);
let local_addr = start_rpc_server(addr.parse().unwrap(), service, true, false)
.await
.unwrap();
local_addr.port()
});
let ip = get_local_ip();
info!("start rpc server on {} {}", ip, rpc_port);
(ip, rpc_port)
}
}
| run_main | identifier_name |
conn.go | // This code is either inspired from or taken directly from go's tls package
package noise
import (
"errors"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Conn represents a secured connection.
// It implements the net.Conn interface.
type Conn struct {
conn net.Conn
isClient bool
// handshake
config *Config // configuration passed to constructor
hs *HandshakeState
handshakeComplete bool
handshakeMutex sync.Mutex
// Authentication
isRemoteAuthenticated bool
// input/output
in, out *CipherState
inLock, outLock sync.Mutex
inputBuffer []byte
}
// LocalAddr returns the local network address.
func (c *Conn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// RemoteAddr returns the remote network address.
func (c *Conn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the read and write deadlines associated with the connection.
// A zero value for t means Read and Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
// SetReadDeadline sets the read deadline on the underlying connection.
// A zero value for t means Read will not time out.
func (c *Conn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetWriteDeadline sets the write deadline on the underlying connection.
// A zero value for t means Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// Write writes data to the connection.
func (c *Conn) Write(b []byte) (int, error) {
//
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A server should not write on one-way patterns")
}
// Make sure to go through the handshake first
if err := c.Handshake(); err != nil {
return 0, err
}
// Lock the write socket
c.outLock.Lock()
defer c.outLock.Unlock()
// process the data in a loop
var n int
data := b
for len(data) > 0 {
// fragment the data
m := len(data)
if m > MaxMsgLen {
m = MaxMsgLen
}
// Encrypt
ciphertext := c.out.Encrypt(nil, nil, data[:m])
// header (length)
length := []byte{byte(len(ciphertext) >> 8), byte(len(ciphertext) % 256)}
// Send data
_, err := c.conn.Write(append(length, ciphertext...))
if err != nil {
return n, err
}
n += m
data = data[m:]
}
return n, nil
}
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (int, error) {
var err error
// Make sure to go through the handshake first
if err = c.Handshake(); err != nil {
return 0, err
}
// Put this after Handshake, in case people were calling
// Read(nil) for the side effect of the Handshake.
if len(b) == 0 {
return 0, err
}
// If this is a one-way pattern, do some checks
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A client should not read on one-way patterns")
}
// Lock the read socket
c.inLock.Lock()
defer c.inLock.Unlock()
// read whatever there is to read in the buffer
readSoFar := 0
if len(c.inputBuffer) > 0 {
copy(b, c.inputBuffer)
if len(c.inputBuffer) >= len(b) {
c.inputBuffer = c.inputBuffer[len(b):]
return len(b), nil
}
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
}
// read header from socket
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return 0, err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return 2, errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
// read noise message from socket
noiseMessage, err := readBytes(c.conn, length)
if err != nil {
return 0, err
}
// decrypt
plaintext, err := c.in.Decrypt(nil, nil, noiseMessage)
if err != nil {
return 0, err
}
// append to the input buffer
c.inputBuffer = append(c.inputBuffer, plaintext...)
// read whatever we can read
rest := len(b) - readSoFar
copy(b[readSoFar:], c.inputBuffer)
if len(c.inputBuffer) >= rest {
c.inputBuffer = c.inputBuffer[rest:]
return len(b), nil
}
// we haven't filled the buffer
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
return readSoFar, nil
}
// Close closes the connection.
func (c *Conn) Close() error {
return c.conn.Close()
}
// Noise-related functions
// Handshake runs the client or server handshake protocol if
// it has not yet been run.
// Most uses of this package need not call Handshake explicitly: | // the first Read or Write will call it automatically.
func (c *Conn) Handshake() (err error) {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if c.handshakeComplete {
return nil
}
var remoteKeyPair *DHKey
if c.config.PeerStatic != nil {
if len(c.config.PeerStatic) != 32 {
return errors.New("noise: the provided remote key is not 32-byte")
}
remoteKeyPair = &DHKey{}
copy(remoteKeyPair.Public[:], c.config.PeerStatic)
}
c.hs, err = NewHandshakeState(*c.config)
if err != nil {
return err
}
// start handshake
var c1, c2 *CipherState
var state bool
var msg []byte
state = c.isClient
for _ = range c.config.Pattern.Messages {
if state {
msg, c1, c2, err = c.hs.WriteMessage(nil, nil)
if err != nil {
return err
}
// header (length)
length := []byte{byte(len(msg) >> 8), byte(len(msg) % 256)}
// write
_, err = c.conn.Write(append(length, msg...))
if err != nil {
return err
}
} else {
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
msg, err = readBytes(c.conn, length)
if err != nil {
return err
}
_, c1, c2, err = c.hs.ReadMessage(nil, msg)
if err != nil {
return err
}
}
state = !state
}
if c.isClient {
c.out, c.in = c1, c2
} else {
c.out, c.in = c2, c1
}
c.handshakeComplete = true
return nil
}
// IsRemoteAuthenticated can be used to check if the remote peer has been
// properly authenticated. It serves no real purpose for the moment as the
// handshake will not go through if a peer is not properly authenticated in
// patterns where the peer needs to be authenticated.
func (c *Conn) IsRemoteAuthenticated() bool {
return c.isRemoteAuthenticated
}
// RemoteKey returns the static key of the remote peer.
// It is useful in case the static key is only transmitted during the handshake.
func (c *Conn) RemoteKey() ([]byte, error) {
if !c.handshakeComplete {
return nil, errors.New("handshake not completed")
}
return c.hs.rs, nil
}
// Server returns a new Noise server side connection
// using net.Conn as the underlying transport.
// The configuration config must be non-nil and must include
// at least one certificate or else set GetCertificate.
func Server(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: false}
}
// Client returns a new Noise client side connection
// using conn as the underlying transport.
// The config cannot be nil: users must set either ServerName or
// InsecureSkipVerify in the config.
func Client(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: true}
}
// A Listener implements a network Listener (net.Listener) for Noise connections.
type Listener struct {
net.Listener
config *Config
}
// Accept waits for and returns the next incoming Noise connection.
// The returned connection is of type *Conn.
func (l *Listener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return &Conn{}, err
}
return Server(c, l.config), nil
}
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.
func (l *Listener) Close() error {
return l.Listener.Close()
}
// Addr returns the listener's network address.
func (l *Listener) Addr() net.Addr {
return l.Listener.Addr()
}
// Listen creates a Noise Listener accepting connections on the
// given network address using net.Listen.
// The configuration config must be non-nil.
func Listen(network, laddr string, config *Config) (net.Listener, error) {
if config == nil {
return &Listener{}, errors.New("Noise: no Config set")
}
l, err := net.Listen(network, laddr)
if err != nil {
return &Listener{}, err
}
noiseListener := &Listener{}
noiseListener.Listener = l
noiseListener.config = config
return noiseListener, nil
}
type timeoutError struct{}
func (timeoutError) Error() string { return "noise: DialWithDialer timed out" }
func (timeoutError) Timeout() bool { return true }
func (timeoutError) Temporary() bool { return true }
// DialWithDialer connects to the given network address using dialer.Dial and
// then initiates a Noise handshake, returning the resulting Noise connection. Any
// timeout or deadline given in the dialer apply to connection and Noise
// handshake as a whole.
func DialWithDialer(dialer *net.Dialer, network, addr, localAddr string, config *Config) (*Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and Noise handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
// check Config
if config == nil {
return nil, errors.New("empty noise.Config")
}
// Dial the net.Conn first
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- timeoutError{}
})
}
localAddrArray := strings.Split(localAddr, ":")
if len(localAddrArray) != 2 {
return nil, errors.New("invalid source address")
}
localPort, err := strconv.Atoi(localAddrArray[1])
if err != nil {
return nil, errors.New("invalid source port")
}
localAddress := net.ParseIP(localAddrArray[0])
dialer.LocalAddr = &net.TCPAddr{
IP: localAddress,
Port: localPort,
}
rawConn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
// Create the noise.Conn
conn := Client(rawConn, config)
// Do the handshake
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
return conn, nil
}
// Dial connects to the given network address using net.Dial
// and then initiates a Noise handshake, returning the resulting
// Noise connection.
func Dial(network, addr string, localAddr string, config *Config) (*Conn, error) {
return DialWithDialer(new(net.Dialer), network, addr, localAddr, config)
}
func readBytes(r io.Reader, n int) ([]byte, error) {
result := make([]byte, n)
offset := 0
for {
m, err := r.Read(result[offset:])
if err != nil {
return result, err
}
offset += m
if offset == n {
break
}
}
return result, nil
} | random_line_split | |
conn.go | // This code is either inspired from or taken directly from go's tls package
package noise
import (
"errors"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Conn represents a secured connection.
// It implements the net.Conn interface.
type Conn struct {
conn net.Conn
isClient bool
// handshake
config *Config // configuration passed to constructor
hs *HandshakeState
handshakeComplete bool
handshakeMutex sync.Mutex
// Authentication
isRemoteAuthenticated bool
// input/output
in, out *CipherState
inLock, outLock sync.Mutex
inputBuffer []byte
}
// LocalAddr returns the local network address.
func (c *Conn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// RemoteAddr returns the remote network address.
func (c *Conn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the read and write deadlines associated with the connection.
// A zero value for t means Read and Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
// SetReadDeadline sets the read deadline on the underlying connection.
// A zero value for t means Read will not time out.
func (c *Conn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetWriteDeadline sets the write deadline on the underlying connection.
// A zero value for t means Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// Write writes data to the connection.
func (c *Conn) Write(b []byte) (int, error) {
//
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A server should not write on one-way patterns")
}
// Make sure to go through the handshake first
if err := c.Handshake(); err != nil {
return 0, err
}
// Lock the write socket
c.outLock.Lock()
defer c.outLock.Unlock()
// process the data in a loop
var n int
data := b
for len(data) > 0 {
// fragment the data
m := len(data)
if m > MaxMsgLen {
m = MaxMsgLen
}
// Encrypt
ciphertext := c.out.Encrypt(nil, nil, data[:m])
// header (length)
length := []byte{byte(len(ciphertext) >> 8), byte(len(ciphertext) % 256)}
// Send data
_, err := c.conn.Write(append(length, ciphertext...))
if err != nil {
return n, err
}
n += m
data = data[m:]
}
return n, nil
}
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (int, error) {
var err error
// Make sure to go through the handshake first
if err = c.Handshake(); err != nil {
return 0, err
}
// Put this after Handshake, in case people were calling
// Read(nil) for the side effect of the Handshake.
if len(b) == 0 {
return 0, err
}
// If this is a one-way pattern, do some checks
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A client should not read on one-way patterns")
}
// Lock the read socket
c.inLock.Lock()
defer c.inLock.Unlock()
// read whatever there is to read in the buffer
readSoFar := 0
if len(c.inputBuffer) > 0 {
copy(b, c.inputBuffer)
if len(c.inputBuffer) >= len(b) {
c.inputBuffer = c.inputBuffer[len(b):]
return len(b), nil
}
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
}
// read header from socket
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return 0, err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return 2, errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
// read noise message from socket
noiseMessage, err := readBytes(c.conn, length)
if err != nil {
return 0, err
}
// decrypt
plaintext, err := c.in.Decrypt(nil, nil, noiseMessage)
if err != nil {
return 0, err
}
// append to the input buffer
c.inputBuffer = append(c.inputBuffer, plaintext...)
// read whatever we can read
rest := len(b) - readSoFar
copy(b[readSoFar:], c.inputBuffer)
if len(c.inputBuffer) >= rest {
c.inputBuffer = c.inputBuffer[rest:]
return len(b), nil
}
// we haven't filled the buffer
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
return readSoFar, nil
}
// Close closes the connection.
func (c *Conn) Close() error {
return c.conn.Close()
}
// Noise-related functions
// Handshake runs the client or server handshake protocol if
// it has not yet been run.
// Most uses of this package need not call Handshake explicitly:
// the first Read or Write will call it automatically.
func (c *Conn) Handshake() (err error) {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if c.handshakeComplete {
return nil
}
var remoteKeyPair *DHKey
if c.config.PeerStatic != nil {
if len(c.config.PeerStatic) != 32 {
return errors.New("noise: the provided remote key is not 32-byte")
}
remoteKeyPair = &DHKey{}
copy(remoteKeyPair.Public[:], c.config.PeerStatic)
}
c.hs, err = NewHandshakeState(*c.config)
if err != nil {
return err
}
// start handshake
var c1, c2 *CipherState
var state bool
var msg []byte
state = c.isClient
for _ = range c.config.Pattern.Messages {
if state {
msg, c1, c2, err = c.hs.WriteMessage(nil, nil)
if err != nil {
return err
}
// header (length)
length := []byte{byte(len(msg) >> 8), byte(len(msg) % 256)}
// write
_, err = c.conn.Write(append(length, msg...))
if err != nil {
return err
}
} else {
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
msg, err = readBytes(c.conn, length)
if err != nil {
return err
}
_, c1, c2, err = c.hs.ReadMessage(nil, msg)
if err != nil {
return err
}
}
state = !state
}
if c.isClient {
c.out, c.in = c1, c2
} else {
c.out, c.in = c2, c1
}
c.handshakeComplete = true
return nil
}
// IsRemoteAuthenticated can be used to check if the remote peer has been
// properly authenticated. It serves no real purpose for the moment as the
// handshake will not go through if a peer is not properly authenticated in
// patterns where the peer needs to be authenticated.
func (c *Conn) | () bool {
return c.isRemoteAuthenticated
}
// RemoteKey returns the static key of the remote peer.
// It is useful in case the static key is only transmitted during the handshake.
func (c *Conn) RemoteKey() ([]byte, error) {
if !c.handshakeComplete {
return nil, errors.New("handshake not completed")
}
return c.hs.rs, nil
}
// Server returns a new Noise server side connection
// using net.Conn as the underlying transport.
// The configuration config must be non-nil and must include
// at least one certificate or else set GetCertificate.
func Server(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: false}
}
// Client returns a new Noise client side connection
// using conn as the underlying transport.
// The config cannot be nil: users must set either ServerName or
// InsecureSkipVerify in the config.
func Client(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: true}
}
// A Listener implements a network Listener (net.Listener) for Noise connections.
type Listener struct {
net.Listener
config *Config
}
// Accept waits for and returns the next incoming Noise connection.
// The returned connection is of type *Conn.
func (l *Listener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return &Conn{}, err
}
return Server(c, l.config), nil
}
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.
func (l *Listener) Close() error {
return l.Listener.Close()
}
// Addr returns the listener's network address.
func (l *Listener) Addr() net.Addr {
return l.Listener.Addr()
}
// Listen creates a Noise Listener accepting connections on the
// given network address using net.Listen.
// The configuration config must be non-nil.
func Listen(network, laddr string, config *Config) (net.Listener, error) {
if config == nil {
return &Listener{}, errors.New("Noise: no Config set")
}
l, err := net.Listen(network, laddr)
if err != nil {
return &Listener{}, err
}
noiseListener := &Listener{}
noiseListener.Listener = l
noiseListener.config = config
return noiseListener, nil
}
type timeoutError struct{}
func (timeoutError) Error() string { return "noise: DialWithDialer timed out" }
func (timeoutError) Timeout() bool { return true }
func (timeoutError) Temporary() bool { return true }
// DialWithDialer connects to the given network address using dialer.Dial and
// then initiates a Noise handshake, returning the resulting Noise connection. Any
// timeout or deadline given in the dialer apply to connection and Noise
// handshake as a whole.
func DialWithDialer(dialer *net.Dialer, network, addr, localAddr string, config *Config) (*Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and Noise handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
// check Config
if config == nil {
return nil, errors.New("empty noise.Config")
}
// Dial the net.Conn first
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- timeoutError{}
})
}
localAddrArray := strings.Split(localAddr, ":")
if len(localAddrArray) != 2 {
return nil, errors.New("invalid source address")
}
localPort, err := strconv.Atoi(localAddrArray[1])
if err != nil {
return nil, errors.New("invalid source port")
}
localAddress := net.ParseIP(localAddrArray[0])
dialer.LocalAddr = &net.TCPAddr{
IP: localAddress,
Port: localPort,
}
rawConn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
// Create the noise.Conn
conn := Client(rawConn, config)
// Do the handshake
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
return conn, nil
}
// Dial connects to the given network address using net.Dial
// and then initiates a Noise handshake, returning the resulting
// Noise connection.
func Dial(network, addr string, localAddr string, config *Config) (*Conn, error) {
return DialWithDialer(new(net.Dialer), network, addr, localAddr, config)
}
func readBytes(r io.Reader, n int) ([]byte, error) {
result := make([]byte, n)
offset := 0
for {
m, err := r.Read(result[offset:])
if err != nil {
return result, err
}
offset += m
if offset == n {
break
}
}
return result, nil
}
| IsRemoteAuthenticated | identifier_name |
conn.go | // This code is either inspired from or taken directly from go's tls package
package noise
import (
"errors"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Conn represents a secured connection.
// It implements the net.Conn interface.
type Conn struct {
conn net.Conn
isClient bool
// handshake
config *Config // configuration passed to constructor
hs *HandshakeState
handshakeComplete bool
handshakeMutex sync.Mutex
// Authentication
isRemoteAuthenticated bool
// input/output
in, out *CipherState
inLock, outLock sync.Mutex
inputBuffer []byte
}
// LocalAddr returns the local network address.
func (c *Conn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// RemoteAddr returns the remote network address.
func (c *Conn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the read and write deadlines associated with the connection.
// A zero value for t means Read and Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
// SetReadDeadline sets the read deadline on the underlying connection.
// A zero value for t means Read will not time out.
func (c *Conn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetWriteDeadline sets the write deadline on the underlying connection.
// A zero value for t means Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// Write writes data to the connection.
func (c *Conn) Write(b []byte) (int, error) {
//
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A server should not write on one-way patterns")
}
// Make sure to go through the handshake first
if err := c.Handshake(); err != nil {
return 0, err
}
// Lock the write socket
c.outLock.Lock()
defer c.outLock.Unlock()
// process the data in a loop
var n int
data := b
for len(data) > 0 {
// fragment the data
m := len(data)
if m > MaxMsgLen {
m = MaxMsgLen
}
// Encrypt
ciphertext := c.out.Encrypt(nil, nil, data[:m])
// header (length)
length := []byte{byte(len(ciphertext) >> 8), byte(len(ciphertext) % 256)}
// Send data
_, err := c.conn.Write(append(length, ciphertext...))
if err != nil {
return n, err
}
n += m
data = data[m:]
}
return n, nil
}
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (int, error) {
var err error
// Make sure to go through the handshake first
if err = c.Handshake(); err != nil {
return 0, err
}
// Put this after Handshake, in case people were calling
// Read(nil) for the side effect of the Handshake.
if len(b) == 0 {
return 0, err
}
// If this is a one-way pattern, do some checks
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A client should not read on one-way patterns")
}
// Lock the read socket
c.inLock.Lock()
defer c.inLock.Unlock()
// read whatever there is to read in the buffer
readSoFar := 0
if len(c.inputBuffer) > 0 {
copy(b, c.inputBuffer)
if len(c.inputBuffer) >= len(b) {
c.inputBuffer = c.inputBuffer[len(b):]
return len(b), nil
}
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
}
// read header from socket
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return 0, err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return 2, errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
// read noise message from socket
noiseMessage, err := readBytes(c.conn, length)
if err != nil {
return 0, err
}
// decrypt
plaintext, err := c.in.Decrypt(nil, nil, noiseMessage)
if err != nil {
return 0, err
}
// append to the input buffer
c.inputBuffer = append(c.inputBuffer, plaintext...)
// read whatever we can read
rest := len(b) - readSoFar
copy(b[readSoFar:], c.inputBuffer)
if len(c.inputBuffer) >= rest {
c.inputBuffer = c.inputBuffer[rest:]
return len(b), nil
}
// we haven't filled the buffer
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
return readSoFar, nil
}
// Close closes the connection.
func (c *Conn) Close() error |
// Noise-related functions
// Handshake runs the client or server handshake protocol if
// it has not yet been run.
// Most uses of this package need not call Handshake explicitly:
// the first Read or Write will call it automatically.
func (c *Conn) Handshake() (err error) {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if c.handshakeComplete {
return nil
}
var remoteKeyPair *DHKey
if c.config.PeerStatic != nil {
if len(c.config.PeerStatic) != 32 {
return errors.New("noise: the provided remote key is not 32-byte")
}
remoteKeyPair = &DHKey{}
copy(remoteKeyPair.Public[:], c.config.PeerStatic)
}
c.hs, err = NewHandshakeState(*c.config)
if err != nil {
return err
}
// start handshake
var c1, c2 *CipherState
var state bool
var msg []byte
state = c.isClient
for _ = range c.config.Pattern.Messages {
if state {
msg, c1, c2, err = c.hs.WriteMessage(nil, nil)
if err != nil {
return err
}
// header (length)
length := []byte{byte(len(msg) >> 8), byte(len(msg) % 256)}
// write
_, err = c.conn.Write(append(length, msg...))
if err != nil {
return err
}
} else {
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
msg, err = readBytes(c.conn, length)
if err != nil {
return err
}
_, c1, c2, err = c.hs.ReadMessage(nil, msg)
if err != nil {
return err
}
}
state = !state
}
if c.isClient {
c.out, c.in = c1, c2
} else {
c.out, c.in = c2, c1
}
c.handshakeComplete = true
return nil
}
// IsRemoteAuthenticated can be used to check if the remote peer has been
// properly authenticated. It serves no real purpose for the moment as the
// handshake will not go through if a peer is not properly authenticated in
// patterns where the peer needs to be authenticated.
func (c *Conn) IsRemoteAuthenticated() bool {
return c.isRemoteAuthenticated
}
// RemoteKey returns the static key of the remote peer.
// It is useful in case the static key is only transmitted during the handshake.
func (c *Conn) RemoteKey() ([]byte, error) {
if !c.handshakeComplete {
return nil, errors.New("handshake not completed")
}
return c.hs.rs, nil
}
// Server returns a new Noise server side connection
// using net.Conn as the underlying transport.
// The configuration config must be non-nil and must include
// at least one certificate or else set GetCertificate.
func Server(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: false}
}
// Client returns a new Noise client side connection
// using conn as the underlying transport.
// The config cannot be nil: users must set either ServerName or
// InsecureSkipVerify in the config.
func Client(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: true}
}
// A Listener implements a network Listener (net.Listener) for Noise connections.
type Listener struct {
net.Listener
config *Config
}
// Accept waits for and returns the next incoming Noise connection.
// The returned connection is of type *Conn.
func (l *Listener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return &Conn{}, err
}
return Server(c, l.config), nil
}
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.
func (l *Listener) Close() error {
return l.Listener.Close()
}
// Addr returns the listener's network address.
func (l *Listener) Addr() net.Addr {
return l.Listener.Addr()
}
// Listen creates a Noise Listener accepting connections on the
// given network address using net.Listen.
// The configuration config must be non-nil.
func Listen(network, laddr string, config *Config) (net.Listener, error) {
if config == nil {
return &Listener{}, errors.New("Noise: no Config set")
}
l, err := net.Listen(network, laddr)
if err != nil {
return &Listener{}, err
}
noiseListener := &Listener{}
noiseListener.Listener = l
noiseListener.config = config
return noiseListener, nil
}
type timeoutError struct{}
func (timeoutError) Error() string { return "noise: DialWithDialer timed out" }
func (timeoutError) Timeout() bool { return true }
func (timeoutError) Temporary() bool { return true }
// DialWithDialer connects to the given network address using dialer.Dial and
// then initiates a Noise handshake, returning the resulting Noise connection. Any
// timeout or deadline given in the dialer apply to connection and Noise
// handshake as a whole.
func DialWithDialer(dialer *net.Dialer, network, addr, localAddr string, config *Config) (*Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and Noise handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
// check Config
if config == nil {
return nil, errors.New("empty noise.Config")
}
// Dial the net.Conn first
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- timeoutError{}
})
}
localAddrArray := strings.Split(localAddr, ":")
if len(localAddrArray) != 2 {
return nil, errors.New("invalid source address")
}
localPort, err := strconv.Atoi(localAddrArray[1])
if err != nil {
return nil, errors.New("invalid source port")
}
localAddress := net.ParseIP(localAddrArray[0])
dialer.LocalAddr = &net.TCPAddr{
IP: localAddress,
Port: localPort,
}
rawConn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
// Create the noise.Conn
conn := Client(rawConn, config)
// Do the handshake
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
return conn, nil
}
// Dial connects to the given network address using net.Dial
// and then initiates a Noise handshake, returning the resulting
// Noise connection.
func Dial(network, addr string, localAddr string, config *Config) (*Conn, error) {
return DialWithDialer(new(net.Dialer), network, addr, localAddr, config)
}
func readBytes(r io.Reader, n int) ([]byte, error) {
result := make([]byte, n)
offset := 0
for {
m, err := r.Read(result[offset:])
if err != nil {
return result, err
}
offset += m
if offset == n {
break
}
}
return result, nil
}
| {
return c.conn.Close()
} | identifier_body |
conn.go | // This code is either inspired from or taken directly from go's tls package
package noise
import (
"errors"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
)
// Conn represents a secured connection.
// It implements the net.Conn interface.
type Conn struct {
conn net.Conn
isClient bool
// handshake
config *Config // configuration passed to constructor
hs *HandshakeState
handshakeComplete bool
handshakeMutex sync.Mutex
// Authentication
isRemoteAuthenticated bool
// input/output
in, out *CipherState
inLock, outLock sync.Mutex
inputBuffer []byte
}
// LocalAddr returns the local network address.
func (c *Conn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// RemoteAddr returns the remote network address.
func (c *Conn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the read and write deadlines associated with the connection.
// A zero value for t means Read and Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
// SetReadDeadline sets the read deadline on the underlying connection.
// A zero value for t means Read will not time out.
func (c *Conn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetWriteDeadline sets the write deadline on the underlying connection.
// A zero value for t means Write will not time out.
// After a Write has timed out, the Noise state is corrupt and all future writes will return the same error.
func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// Write writes data to the connection.
func (c *Conn) Write(b []byte) (int, error) {
//
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A server should not write on one-way patterns")
}
// Make sure to go through the handshake first
if err := c.Handshake(); err != nil {
return 0, err
}
// Lock the write socket
c.outLock.Lock()
defer c.outLock.Unlock()
// process the data in a loop
var n int
data := b
for len(data) > 0 {
// fragment the data
m := len(data)
if m > MaxMsgLen {
m = MaxMsgLen
}
// Encrypt
ciphertext := c.out.Encrypt(nil, nil, data[:m])
// header (length)
length := []byte{byte(len(ciphertext) >> 8), byte(len(ciphertext) % 256)}
// Send data
_, err := c.conn.Write(append(length, ciphertext...))
if err != nil {
return n, err
}
n += m
data = data[m:]
}
return n, nil
}
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (int, error) {
var err error
// Make sure to go through the handshake first
if err = c.Handshake(); err != nil {
return 0, err
}
// Put this after Handshake, in case people were calling
// Read(nil) for the side effect of the Handshake.
if len(b) == 0 {
return 0, err
}
// If this is a one-way pattern, do some checks
if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 {
return 0, errors.New("A client should not read on one-way patterns")
}
// Lock the read socket
c.inLock.Lock()
defer c.inLock.Unlock()
// read whatever there is to read in the buffer
readSoFar := 0
if len(c.inputBuffer) > 0 {
copy(b, c.inputBuffer)
if len(c.inputBuffer) >= len(b) {
c.inputBuffer = c.inputBuffer[len(b):]
return len(b), nil
}
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
}
// read header from socket
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return 0, err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return 2, errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
// read noise message from socket
noiseMessage, err := readBytes(c.conn, length)
if err != nil {
return 0, err
}
// decrypt
plaintext, err := c.in.Decrypt(nil, nil, noiseMessage)
if err != nil {
return 0, err
}
// append to the input buffer
c.inputBuffer = append(c.inputBuffer, plaintext...)
// read whatever we can read
rest := len(b) - readSoFar
copy(b[readSoFar:], c.inputBuffer)
if len(c.inputBuffer) >= rest {
c.inputBuffer = c.inputBuffer[rest:]
return len(b), nil
}
// we haven't filled the buffer
readSoFar += len(c.inputBuffer)
c.inputBuffer = c.inputBuffer[:0]
return readSoFar, nil
}
// Close closes the connection.
func (c *Conn) Close() error {
return c.conn.Close()
}
// Noise-related functions
// Handshake runs the client or server handshake protocol if
// it has not yet been run.
// Most uses of this package need not call Handshake explicitly:
// the first Read or Write will call it automatically.
func (c *Conn) Handshake() (err error) {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if c.handshakeComplete {
return nil
}
var remoteKeyPair *DHKey
if c.config.PeerStatic != nil {
if len(c.config.PeerStatic) != 32 {
return errors.New("noise: the provided remote key is not 32-byte")
}
remoteKeyPair = &DHKey{}
copy(remoteKeyPair.Public[:], c.config.PeerStatic)
}
c.hs, err = NewHandshakeState(*c.config)
if err != nil {
return err
}
// start handshake
var c1, c2 *CipherState
var state bool
var msg []byte
state = c.isClient
for _ = range c.config.Pattern.Messages {
if state {
msg, c1, c2, err = c.hs.WriteMessage(nil, nil)
if err != nil {
return err
}
// header (length)
length := []byte{byte(len(msg) >> 8), byte(len(msg) % 256)}
// write
_, err = c.conn.Write(append(length, msg...))
if err != nil {
return err
}
} else {
bufHeader, err := readBytes(c.conn, 2)
if err != nil {
return err
}
length := (int(bufHeader[0]) << 8) | int(bufHeader[1])
if length > MaxMsgLen {
return errors.New("Noise: Noise message received exceeds NoiseMessageLength")
}
msg, err = readBytes(c.conn, length)
if err != nil |
_, c1, c2, err = c.hs.ReadMessage(nil, msg)
if err != nil {
return err
}
}
state = !state
}
if c.isClient {
c.out, c.in = c1, c2
} else {
c.out, c.in = c2, c1
}
c.handshakeComplete = true
return nil
}
// IsRemoteAuthenticated can be used to check if the remote peer has been
// properly authenticated. It serves no real purpose for the moment as the
// handshake will not go through if a peer is not properly authenticated in
// patterns where the peer needs to be authenticated.
func (c *Conn) IsRemoteAuthenticated() bool {
return c.isRemoteAuthenticated
}
// RemoteKey returns the static key of the remote peer.
// It is useful in case the static key is only transmitted during the handshake.
func (c *Conn) RemoteKey() ([]byte, error) {
if !c.handshakeComplete {
return nil, errors.New("handshake not completed")
}
return c.hs.rs, nil
}
// Server returns a new Noise server side connection
// using net.Conn as the underlying transport.
// The configuration config must be non-nil and must include
// at least one certificate or else set GetCertificate.
func Server(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: false}
}
// Client returns a new Noise client side connection
// using conn as the underlying transport.
// The config cannot be nil: users must set either ServerName or
// InsecureSkipVerify in the config.
func Client(conn net.Conn, config *Config) *Conn {
return &Conn{conn: conn, config: config, isClient: true}
}
// A Listener implements a network Listener (net.Listener) for Noise connections.
type Listener struct {
net.Listener
config *Config
}
// Accept waits for and returns the next incoming Noise connection.
// The returned connection is of type *Conn.
func (l *Listener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return &Conn{}, err
}
return Server(c, l.config), nil
}
// Close closes the listener.
// Any blocked Accept operations will be unblocked and return errors.
func (l *Listener) Close() error {
return l.Listener.Close()
}
// Addr returns the listener's network address.
func (l *Listener) Addr() net.Addr {
return l.Listener.Addr()
}
// Listen creates a Noise Listener accepting connections on the
// given network address using net.Listen.
// The configuration config must be non-nil.
func Listen(network, laddr string, config *Config) (net.Listener, error) {
if config == nil {
return &Listener{}, errors.New("Noise: no Config set")
}
l, err := net.Listen(network, laddr)
if err != nil {
return &Listener{}, err
}
noiseListener := &Listener{}
noiseListener.Listener = l
noiseListener.config = config
return noiseListener, nil
}
type timeoutError struct{}
func (timeoutError) Error() string { return "noise: DialWithDialer timed out" }
func (timeoutError) Timeout() bool { return true }
func (timeoutError) Temporary() bool { return true }
// DialWithDialer connects to the given network address using dialer.Dial and
// then initiates a Noise handshake, returning the resulting Noise connection. Any
// timeout or deadline given in the dialer apply to connection and Noise
// handshake as a whole.
func DialWithDialer(dialer *net.Dialer, network, addr, localAddr string, config *Config) (*Conn, error) {
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and Noise handshake. This means that we
// also need to start our own timers now.
timeout := dialer.Timeout
if !dialer.Deadline.IsZero() {
deadlineTimeout := time.Until(dialer.Deadline)
if timeout == 0 || deadlineTimeout < timeout {
timeout = deadlineTimeout
}
}
// check Config
if config == nil {
return nil, errors.New("empty noise.Config")
}
// Dial the net.Conn first
var errChannel chan error
if timeout != 0 {
errChannel = make(chan error, 2)
time.AfterFunc(timeout, func() {
errChannel <- timeoutError{}
})
}
localAddrArray := strings.Split(localAddr, ":")
if len(localAddrArray) != 2 {
return nil, errors.New("invalid source address")
}
localPort, err := strconv.Atoi(localAddrArray[1])
if err != nil {
return nil, errors.New("invalid source port")
}
localAddress := net.ParseIP(localAddrArray[0])
dialer.LocalAddr = &net.TCPAddr{
IP: localAddress,
Port: localPort,
}
rawConn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
// Create the noise.Conn
conn := Client(rawConn, config)
// Do the handshake
if timeout == 0 {
err = conn.Handshake()
} else {
go func() {
errChannel <- conn.Handshake()
}()
err = <-errChannel
}
if err != nil {
rawConn.Close()
return nil, err
}
return conn, nil
}
// Dial connects to the given network address using net.Dial
// and then initiates a Noise handshake, returning the resulting
// Noise connection.
func Dial(network, addr string, localAddr string, config *Config) (*Conn, error) {
return DialWithDialer(new(net.Dialer), network, addr, localAddr, config)
}
func readBytes(r io.Reader, n int) ([]byte, error) {
result := make([]byte, n)
offset := 0
for {
m, err := r.Read(result[offset:])
if err != nil {
return result, err
}
offset += m
if offset == n {
break
}
}
return result, nil
}
| {
return err
} | conditional_block |
barnacle_vcf.py | """
barnacle_vcf.py
Created by William Li
Copyright (c) 2012 Canada's Michael Smith Genome Sciences Centre. All rights reserved.
"""
import sys, os, time, datetime, commands, re
from optparse import OptionParser
sys.path.append("/projects/transabyss/trans-ABySS/v1.2.4/code")
from parsers.candidate_group_parser import CandidateGroupParserCls
from parsers.two_bit import TwoBitFileCls
def get_CHROM(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
breakpoint1 = breakpoint1.partition(':')
breakpoint2 = breakpoint2.partition(':')
chrom1 = breakpoint1[0]
chrom1 = chrom1[3:]
chrom2 = breakpoint2[0]
chrom2 = chrom2[3:]
return chrom1, chrom2
def get_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
position1 = breakpoint1.partition(':')
position2 = breakpoint2.partition(':')
position1 = position1[2]
position2 = position2[2]
position1 = position1.partition('(')[0]
position2 = position2.partition('(')[0]
if overlap > 0:
if breakpoint2.endswith('(down)'):
position2 = int(position2)-overlap
if breakpoint1.endswith('(down)'):
position1 = int(position1)-overlap
return str(position1), str(position2)
def get_REF(member, refseq):
pos1, pos2 = get_POS(member)
chrom1, chrom2 = get_CHROM(member)
seq1 = refseq.GetSequence(chrom1, int(pos1), int(pos1))
seq2 = refseq.GetSequence(chrom2, int(pos2), int(pos2))
return seq1, seq2
def get_ALT(member, refseq):
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
base1, base2 = get_REF(member, refseq)
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
if overlap > 0:
if breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
pos1 = str(int(pos1) + overlap)
pos2 = str(int(pos2) + overlap)
#both breakpoints at either the start or the end of the contig region
if breakpoint1.endswith('(up)') and breakpoint2.endswith('(up)'):
if overlap > 0:
pos1 = int(pos1)+overlap
pos2 = int(pos2)+overlap
alt1 = '['+chrom2+':'+str(pos2)+'['+base1
alt2 = '['+chrom1+':'+str(pos1)+'['+base2
return alt1, alt2
elif breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
alt1 = base1+']'+chrom2+':'+str(pos2)+']'
alt2 = base2+']'+chrom1+':'+str(pos1)+']'
return alt1, alt2
#one breakpoint is at the start of the contig region and other breakpoint is at the end
if breakpoint1.endswith('(up)'):
alt1 = ']'+chrom2+':'+pos2+']'+base1
alt2 = base2+'['+chrom1+':'+pos1+'['
else:
alt1 = base1+'['+chrom2+':'+pos2+'['
alt2 = ']'+chrom1+':'+pos1+']'+base2
return alt1, alt2
def get_QUAL():
return '.'
def get_FILT():
return 'PASS'
def get_INFO(member, id1, id2):
overlap = str(member.meta_fields['ctg_overlap'])
svtype = 'FND'
dp = int(member.avg_read_to_ctg_unique)
if int(overlap) > 0:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG='
else:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;SR='+str(dp)+';CTG='
#header output method
def write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, refseq_flag, library, filetype_flag, out_file, contig=None):
#file format
out_file.write('##fileformat=VCFv4.1\n')
#file date
out_file.write('##filedate='+time.strftime("%Y%m%d")+'\n')
#tcga version
out_file.write('##tcgaversion=1.0\n')
#genome reference; need to use URL
if refseq_flag == 'hg19':
out_file.write('##reference=<ID=hg19,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg19/1000genomes/bwa_ind/genome/\n')
elif refseq_flag == 'hg18':
out_file.write('##reference=<ID=hg18,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg18/bwa_ind/genome/tcga_ref/>\n')
elif refseq_flag == 'mm9':
out_file.write('##reference=<ID=mm9,Source=/projects/transabyss/trans-ABySS/annotations/mm9/201107/genome.fa>\n')
#contig assembly tags, need to use URL
out_file.write('##assembly='+contig+'\n')
#center
out_file.write('##center="BCGSC"\n')
#phasing
out_file.write('##phasing=none\n')
info_format = {
'svtype':'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n',
'mateid':'##INFO=<ID=MATEID,Number=1,Type=String,Description="ID of mate breakends">\n',
'event':'##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of breakend event">\n',
'cipos':'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">\n',
'svlen':'##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">\n',
'end':'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n',
'inv':'##ALT=<ID=INV,Description="Inversion">\n',
'del':'##ALT=<ID=DEL,Description="Deletion">\n',
'duptan':'##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">\n',
'sr':'##INFO=<ID=SR,Number=1,Type=Integer,Description="Spanning reads">\n',
'dp':'##INFO=<ID=DP,Number=1,Type=Integer,Description="Read depth">\n',
'CTG':'##INFO=<ID=CTG,Number=.,Type=String,Description="Contig ID">\n'
}
fusion = ['svtype', 'mateid', 'cipos', 'CTG', 'sr']
duplication = ['svtype', 'duptan', 'CTG', 'sr']
if filetype_flag == 'fusion':
for item in fusion:
if item in info_format:
out_file.write(info_format[item])
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
for item in duplication:
if item in info_format:
out_file.write(info_format[item])
sample_info = commands.getoutput("python get_tcga_sample_info.py --username "+GIN_user+" --password "+GIN_pass+" --LIMS_user "+LIMS_user+" --LIMS_pass "+LIMS_pass+" --library "+library)
#sample info
sample_info = sample_info.split(',')
patient = sample_info[0]
sample_id = sample_info[1]
sample_desc = sample_info[2]
platform = sample_info[3]
accession = sample_info[4]
out_file.write('##SAMPLE=<ID='+sample_id+',Individual='+patient+',Description="'+sample_desc+'",Platform='+platform+',Accession='+accession+'>\n')
#pedigree
out_file.write('##PEDIGREE=<Name_0='+sample_id+'>\n')
fields_line = '#CHROM\t'+'POS\t'+'ID\t'+'REF\t'+'ALT\t'+'QUAL\t'+'FILTER\t'+'INFO\n'
out_file.write(fields_line)
def create_fusion_dict(output_dir, in_file, refseq, sequence_dict):
fusion_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1, id2 = 1, 1
key1, key2 = 1, 2
ctg_dir = output_dir.strip('vcf')
contigfa = open(ctg_dir+'fa', "w")
for group in groupParser:
member = group.members[0]
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
ref1, ref2 = get_REF(member, refseq)
alt1, alt2 = get_ALT(member, refseq)
qual = get_QUAL()
filt = get_FILT()
info1, info2 = get_INFO(member, id1, id2)
fusion1 = chrom1+'\t'+pos1+'\t'+str(id1)+'a'+'\t'+ref1+'\t'+alt1+'\t'+qual+'\t'+filt+'\t'+info1
fusion2 = chrom2+'\t'+pos2+'\t'+str(id2)+'b'+'\t'+ref2+'\t'+alt2+'\t'+qual+'\t'+filt+'\t'+info2
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
fusion1 += ','+contig
fusion2 += ','+contig
else:
fusion1 += contig
fusion2 += contig
sequence = sequence_dict['>'+contig]
contigfa.write('>'+contig+'\n'+sequence+'\n')
counter += 1
fusion_dict[key1] = fusion1+'\n'
fusion_dict[key2] = fusion2+'\n'
id1 += 1
id2 += 1
key1 += 2
key2 += 2
return fusion_dict
def dup_CHROM(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
chrom = breakpoint[0]
chrom = chrom[3:]
return chrom
def dup_POS(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
position = breakpoint[2]
position = position.partition('(')[0]
return position
def | (member, refseq):
pos = dup_POS(member)
chrom = dup_CHROM(member)
seq = refseq.GetSequence(chrom, int(pos), int(pos))
return seq
def dup_ALT():
alt = '<DUP:TANDEM>'
return alt
def dup_INFO(member):
pos = int(dup_POS(member))
svtype = 'FND'
length = len(member.event_seq)
end = pos + length - 1
info = 'SVTYPE='+svtype+';END='+str(end)
return info
def ptd_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
return start
def ptd_INFO(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
endpos = end.partition(':')[2]
endpos = endpos.partition('(')[0]
svtype = 'FND'
info = 'SVTYPE='+svtype+';END='+endpos
return info
def create_dup_dict(in_file, refseq, filetype_flag):
dup_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1 = 1
for group in groupParser:
member = group.members[0]
chrom = dup_CHROM(member)
qual = get_QUAL()
filt = get_FILT()
if filetype_flag == 'itd':
pos = dup_POS(member)
else:
pos = ptd_POS(member)
ref = dup_REF(member, refseq)
alt = dup_ALT()
if filetype_flag == 'itd':
info = dup_INFO(member)
else:
info = ptd_INFO(member)
dp = int(member.avg_read_to_ctg_unique)
info += ';SR='+str(dp)+';CTG='
dup = chrom+'\t'+pos+'\t.\t'+ref+'\t'+alt+'\t'+qual+'\t'+filt+'\t'+info
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
dup += ','+contig
else:
dup += contig
counter += 1
dup_dict[id1] = dup+'\n'
id1 = id1+1
return dup_dict
#parse contig(fa) file and put reads into dictionary with contig ID as key
def parse_fa(fa_file):
sequence = {}
contig = None
for line in open(fa_file, 'r'):
if line[0] == '>':
contig = line.split()[0]
else:
sequence[contig] = line.rstrip('\n')
return sequence
def out_to_VCF(filetype_flag, in_file, gene_flag, output_dir, contig, library, GIN_user, GIN_pass, LIMS_user, LIMS_pass):
#reference sequence file
if gene_flag == 'hg18':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg18/200909/hg18.2bit')
elif gene_flag == 'hg19':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg19/201110/hg19.2bit')
elif gene_flag == 'mm9':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/mm9/201107/mm9.2bit')
out_file = open(output_dir, "w")
write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, gene_flag, library, filetype_flag, out_file, contig)
if filetype_flag == 'fusion':
sequence_dict = parse_fa(contig)
dictionary = create_fusion_dict(output_dir, in_file, refseq, sequence_dict)
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
dictionary = create_dup_dict(in_file, refseq, filetype_flag)
for key in dictionary:
out_file.write(dictionary[key])
out_file.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f",dest="fusion_filename",default=None,help="directory and name of input fusion file to convert into vcf")
parser.add_option("-i",dest="itd_filename",default=None,help="directory and name of input itd file to convert into vcf")
parser.add_option("-p",dest="ptd_filename",default=None,help="directory and name of input ptd file to convert into vcf")
parser.add_option("-d",dest="output_dir",default=None,help="directory and name of output file to write into")
parser.add_option("--username",dest="username",default=None,help="GIN username")
parser.add_option("--password",dest="password",default=None,help="GIN password")
parser.add_option("--LIMS_user",dest="lims_user",default=None,help="LIMS username")
parser.add_option("--LIMS_pass",dest="lims_pass",default=None,help="LIMS password")
parser.add_option("--library",dest="library",default=None,help="Library name")
parser.add_option("-c",dest="contigs_file",default=None,help="directory and name of contig assembly file(*.fa)")
parser.add_option("-g",dest="gene_flag",default=None,help="Specify which genome to use")
(options, args) = parser.parse_args()
if options.fusion_filename:
filetype_flag = 'fusion'
in_file = options.fusion_filename
elif options.itd_filename:
filetype_flag = 'itd'
in_file = options.itd_filename
elif options.ptd_filename:
filetype_flag = 'ptd'
in_file = options.ptd_filename
output_dir = '/projects/transabyss/workspace/jira/APA-69/barnacle_test/A05012_barnacle_fus.vcf'
library = 'A05012'
in_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/barnacle/ver_1.2/8_predicted_events/A05012.barnacle.fus'
contig_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/merge/A05012-contigs.fa'
out_to_VCF('fusion', in_file, 'hg19', output_dir, contig_file, library, gin_user, gin_pass, lims_user, lims_pass)
| dup_REF | identifier_name |
barnacle_vcf.py | """
barnacle_vcf.py
Created by William Li
Copyright (c) 2012 Canada's Michael Smith Genome Sciences Centre. All rights reserved.
"""
import sys, os, time, datetime, commands, re
from optparse import OptionParser
sys.path.append("/projects/transabyss/trans-ABySS/v1.2.4/code")
from parsers.candidate_group_parser import CandidateGroupParserCls
from parsers.two_bit import TwoBitFileCls
def get_CHROM(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
breakpoint1 = breakpoint1.partition(':')
breakpoint2 = breakpoint2.partition(':')
chrom1 = breakpoint1[0]
chrom1 = chrom1[3:]
chrom2 = breakpoint2[0]
chrom2 = chrom2[3:]
return chrom1, chrom2
def get_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
position1 = breakpoint1.partition(':')
position2 = breakpoint2.partition(':')
position1 = position1[2]
position2 = position2[2]
position1 = position1.partition('(')[0]
position2 = position2.partition('(')[0]
if overlap > 0:
if breakpoint2.endswith('(down)'):
position2 = int(position2)-overlap
if breakpoint1.endswith('(down)'):
position1 = int(position1)-overlap
return str(position1), str(position2)
def get_REF(member, refseq):
pos1, pos2 = get_POS(member)
chrom1, chrom2 = get_CHROM(member)
seq1 = refseq.GetSequence(chrom1, int(pos1), int(pos1))
seq2 = refseq.GetSequence(chrom2, int(pos2), int(pos2))
return seq1, seq2
def get_ALT(member, refseq):
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
base1, base2 = get_REF(member, refseq)
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
if overlap > 0:
if breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
pos1 = str(int(pos1) + overlap)
pos2 = str(int(pos2) + overlap)
#both breakpoints at either the start or the end of the contig region
if breakpoint1.endswith('(up)') and breakpoint2.endswith('(up)'):
if overlap > 0:
pos1 = int(pos1)+overlap
pos2 = int(pos2)+overlap
alt1 = '['+chrom2+':'+str(pos2)+'['+base1
alt2 = '['+chrom1+':'+str(pos1)+'['+base2
return alt1, alt2
elif breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
alt1 = base1+']'+chrom2+':'+str(pos2)+']'
alt2 = base2+']'+chrom1+':'+str(pos1)+']'
return alt1, alt2
#one breakpoint is at the start of the contig region and other breakpoint is at the end
if breakpoint1.endswith('(up)'):
alt1 = ']'+chrom2+':'+pos2+']'+base1
alt2 = base2+'['+chrom1+':'+pos1+'['
else:
alt1 = base1+'['+chrom2+':'+pos2+'['
alt2 = ']'+chrom1+':'+pos1+']'+base2
return alt1, alt2
def get_QUAL():
return '.'
def get_FILT():
return 'PASS'
def get_INFO(member, id1, id2):
overlap = str(member.meta_fields['ctg_overlap'])
svtype = 'FND'
dp = int(member.avg_read_to_ctg_unique)
if int(overlap) > 0:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG='
else:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;SR='+str(dp)+';CTG='
#header output method
def write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, refseq_flag, library, filetype_flag, out_file, contig=None):
#file format
out_file.write('##fileformat=VCFv4.1\n')
#file date
out_file.write('##filedate='+time.strftime("%Y%m%d")+'\n')
#tcga version
out_file.write('##tcgaversion=1.0\n')
#genome reference; need to use URL
if refseq_flag == 'hg19':
out_file.write('##reference=<ID=hg19,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg19/1000genomes/bwa_ind/genome/\n')
elif refseq_flag == 'hg18':
out_file.write('##reference=<ID=hg18,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg18/bwa_ind/genome/tcga_ref/>\n')
elif refseq_flag == 'mm9':
out_file.write('##reference=<ID=mm9,Source=/projects/transabyss/trans-ABySS/annotations/mm9/201107/genome.fa>\n')
#contig assembly tags, need to use URL
out_file.write('##assembly='+contig+'\n')
#center
out_file.write('##center="BCGSC"\n')
#phasing
out_file.write('##phasing=none\n')
info_format = {
'svtype':'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n',
'mateid':'##INFO=<ID=MATEID,Number=1,Type=String,Description="ID of mate breakends">\n',
'event':'##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of breakend event">\n',
'cipos':'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">\n',
'svlen':'##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">\n',
'end':'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n',
'inv':'##ALT=<ID=INV,Description="Inversion">\n',
'del':'##ALT=<ID=DEL,Description="Deletion">\n',
'duptan':'##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">\n',
'sr':'##INFO=<ID=SR,Number=1,Type=Integer,Description="Spanning reads">\n',
'dp':'##INFO=<ID=DP,Number=1,Type=Integer,Description="Read depth">\n',
'CTG':'##INFO=<ID=CTG,Number=.,Type=String,Description="Contig ID">\n'
}
fusion = ['svtype', 'mateid', 'cipos', 'CTG', 'sr']
duplication = ['svtype', 'duptan', 'CTG', 'sr']
if filetype_flag == 'fusion':
for item in fusion:
if item in info_format:
out_file.write(info_format[item])
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
for item in duplication:
if item in info_format:
out_file.write(info_format[item])
sample_info = commands.getoutput("python get_tcga_sample_info.py --username "+GIN_user+" --password "+GIN_pass+" --LIMS_user "+LIMS_user+" --LIMS_pass "+LIMS_pass+" --library "+library)
#sample info
sample_info = sample_info.split(',')
patient = sample_info[0]
sample_id = sample_info[1]
sample_desc = sample_info[2]
platform = sample_info[3]
accession = sample_info[4]
out_file.write('##SAMPLE=<ID='+sample_id+',Individual='+patient+',Description="'+sample_desc+'",Platform='+platform+',Accession='+accession+'>\n')
#pedigree
out_file.write('##PEDIGREE=<Name_0='+sample_id+'>\n')
fields_line = '#CHROM\t'+'POS\t'+'ID\t'+'REF\t'+'ALT\t'+'QUAL\t'+'FILTER\t'+'INFO\n'
out_file.write(fields_line)
def create_fusion_dict(output_dir, in_file, refseq, sequence_dict):
fusion_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1, id2 = 1, 1
key1, key2 = 1, 2
ctg_dir = output_dir.strip('vcf')
contigfa = open(ctg_dir+'fa', "w")
for group in groupParser:
member = group.members[0]
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
ref1, ref2 = get_REF(member, refseq)
alt1, alt2 = get_ALT(member, refseq)
qual = get_QUAL()
filt = get_FILT()
info1, info2 = get_INFO(member, id1, id2)
fusion1 = chrom1+'\t'+pos1+'\t'+str(id1)+'a'+'\t'+ref1+'\t'+alt1+'\t'+qual+'\t'+filt+'\t'+info1
fusion2 = chrom2+'\t'+pos2+'\t'+str(id2)+'b'+'\t'+ref2+'\t'+alt2+'\t'+qual+'\t'+filt+'\t'+info2
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
fusion1 += ','+contig
fusion2 += ','+contig
else:
fusion1 += contig
fusion2 += contig
sequence = sequence_dict['>'+contig]
contigfa.write('>'+contig+'\n'+sequence+'\n')
counter += 1
fusion_dict[key1] = fusion1+'\n'
fusion_dict[key2] = fusion2+'\n'
id1 += 1
id2 += 1
key1 += 2
key2 += 2
return fusion_dict
def dup_CHROM(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
chrom = breakpoint[0]
chrom = chrom[3:]
return chrom
def dup_POS(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
position = breakpoint[2]
position = position.partition('(')[0]
return position
def dup_REF(member, refseq):
pos = dup_POS(member)
chrom = dup_CHROM(member)
seq = refseq.GetSequence(chrom, int(pos), int(pos))
return seq
def dup_ALT():
alt = '<DUP:TANDEM>'
return alt
def dup_INFO(member):
pos = int(dup_POS(member))
svtype = 'FND'
length = len(member.event_seq)
end = pos + length - 1
info = 'SVTYPE='+svtype+';END='+str(end)
return info
def ptd_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
return start
def ptd_INFO(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
endpos = end.partition(':')[2]
endpos = endpos.partition('(')[0]
svtype = 'FND'
info = 'SVTYPE='+svtype+';END='+endpos
return info
def create_dup_dict(in_file, refseq, filetype_flag):
dup_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1 = 1
for group in groupParser:
member = group.members[0]
chrom = dup_CHROM(member)
qual = get_QUAL()
filt = get_FILT()
if filetype_flag == 'itd':
pos = dup_POS(member)
else:
pos = ptd_POS(member)
ref = dup_REF(member, refseq)
alt = dup_ALT()
if filetype_flag == 'itd':
info = dup_INFO(member)
else:
|
dp = int(member.avg_read_to_ctg_unique)
info += ';SR='+str(dp)+';CTG='
dup = chrom+'\t'+pos+'\t.\t'+ref+'\t'+alt+'\t'+qual+'\t'+filt+'\t'+info
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
dup += ','+contig
else:
dup += contig
counter += 1
dup_dict[id1] = dup+'\n'
id1 = id1+1
return dup_dict
#parse contig(fa) file and put reads into dictionary with contig ID as key
def parse_fa(fa_file):
sequence = {}
contig = None
for line in open(fa_file, 'r'):
if line[0] == '>':
contig = line.split()[0]
else:
sequence[contig] = line.rstrip('\n')
return sequence
def out_to_VCF(filetype_flag, in_file, gene_flag, output_dir, contig, library, GIN_user, GIN_pass, LIMS_user, LIMS_pass):
#reference sequence file
if gene_flag == 'hg18':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg18/200909/hg18.2bit')
elif gene_flag == 'hg19':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg19/201110/hg19.2bit')
elif gene_flag == 'mm9':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/mm9/201107/mm9.2bit')
out_file = open(output_dir, "w")
write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, gene_flag, library, filetype_flag, out_file, contig)
if filetype_flag == 'fusion':
sequence_dict = parse_fa(contig)
dictionary = create_fusion_dict(output_dir, in_file, refseq, sequence_dict)
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
dictionary = create_dup_dict(in_file, refseq, filetype_flag)
for key in dictionary:
out_file.write(dictionary[key])
out_file.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f",dest="fusion_filename",default=None,help="directory and name of input fusion file to convert into vcf")
parser.add_option("-i",dest="itd_filename",default=None,help="directory and name of input itd file to convert into vcf")
parser.add_option("-p",dest="ptd_filename",default=None,help="directory and name of input ptd file to convert into vcf")
parser.add_option("-d",dest="output_dir",default=None,help="directory and name of output file to write into")
parser.add_option("--username",dest="username",default=None,help="GIN username")
parser.add_option("--password",dest="password",default=None,help="GIN password")
parser.add_option("--LIMS_user",dest="lims_user",default=None,help="LIMS username")
parser.add_option("--LIMS_pass",dest="lims_pass",default=None,help="LIMS password")
parser.add_option("--library",dest="library",default=None,help="Library name")
parser.add_option("-c",dest="contigs_file",default=None,help="directory and name of contig assembly file(*.fa)")
parser.add_option("-g",dest="gene_flag",default=None,help="Specify which genome to use")
(options, args) = parser.parse_args()
if options.fusion_filename:
filetype_flag = 'fusion'
in_file = options.fusion_filename
elif options.itd_filename:
filetype_flag = 'itd'
in_file = options.itd_filename
elif options.ptd_filename:
filetype_flag = 'ptd'
in_file = options.ptd_filename
output_dir = '/projects/transabyss/workspace/jira/APA-69/barnacle_test/A05012_barnacle_fus.vcf'
library = 'A05012'
in_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/barnacle/ver_1.2/8_predicted_events/A05012.barnacle.fus'
contig_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/merge/A05012-contigs.fa'
out_to_VCF('fusion', in_file, 'hg19', output_dir, contig_file, library, gin_user, gin_pass, lims_user, lims_pass)
| info = ptd_INFO(member) | conditional_block |
barnacle_vcf.py | """
barnacle_vcf.py
Created by William Li
Copyright (c) 2012 Canada's Michael Smith Genome Sciences Centre. All rights reserved.
"""
import sys, os, time, datetime, commands, re
from optparse import OptionParser
sys.path.append("/projects/transabyss/trans-ABySS/v1.2.4/code")
from parsers.candidate_group_parser import CandidateGroupParserCls
from parsers.two_bit import TwoBitFileCls
def get_CHROM(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
breakpoint1 = breakpoint1.partition(':')
breakpoint2 = breakpoint2.partition(':')
chrom1 = breakpoint1[0]
chrom1 = chrom1[3:]
chrom2 = breakpoint2[0]
chrom2 = chrom2[3:]
return chrom1, chrom2
def get_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
position1 = breakpoint1.partition(':')
position2 = breakpoint2.partition(':')
position1 = position1[2]
position2 = position2[2]
position1 = position1.partition('(')[0]
position2 = position2.partition('(')[0]
if overlap > 0:
if breakpoint2.endswith('(down)'):
position2 = int(position2)-overlap
if breakpoint1.endswith('(down)'):
position1 = int(position1)-overlap
return str(position1), str(position2)
def get_REF(member, refseq):
pos1, pos2 = get_POS(member)
chrom1, chrom2 = get_CHROM(member)
seq1 = refseq.GetSequence(chrom1, int(pos1), int(pos1))
seq2 = refseq.GetSequence(chrom2, int(pos2), int(pos2))
return seq1, seq2
def get_ALT(member, refseq):
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
base1, base2 = get_REF(member, refseq)
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
if overlap > 0:
if breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
pos1 = str(int(pos1) + overlap)
pos2 = str(int(pos2) + overlap)
#both breakpoints at either the start or the end of the contig region
if breakpoint1.endswith('(up)') and breakpoint2.endswith('(up)'):
if overlap > 0:
pos1 = int(pos1)+overlap
pos2 = int(pos2)+overlap
alt1 = '['+chrom2+':'+str(pos2)+'['+base1
alt2 = '['+chrom1+':'+str(pos1)+'['+base2
return alt1, alt2
elif breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
alt1 = base1+']'+chrom2+':'+str(pos2)+']'
alt2 = base2+']'+chrom1+':'+str(pos1)+']'
return alt1, alt2
#one breakpoint is at the start of the contig region and other breakpoint is at the end
if breakpoint1.endswith('(up)'):
alt1 = ']'+chrom2+':'+pos2+']'+base1
alt2 = base2+'['+chrom1+':'+pos1+'['
else:
alt1 = base1+'['+chrom2+':'+pos2+'['
alt2 = ']'+chrom1+':'+pos1+']'+base2
return alt1, alt2
def get_QUAL():
return '.'
def get_FILT():
return 'PASS'
def get_INFO(member, id1, id2):
overlap = str(member.meta_fields['ctg_overlap'])
svtype = 'FND'
dp = int(member.avg_read_to_ctg_unique)
if int(overlap) > 0:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG='
else:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;SR='+str(dp)+';CTG='
#header output method
def write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, refseq_flag, library, filetype_flag, out_file, contig=None):
#file format
out_file.write('##fileformat=VCFv4.1\n')
#file date
out_file.write('##filedate='+time.strftime("%Y%m%d")+'\n')
#tcga version
out_file.write('##tcgaversion=1.0\n')
#genome reference; need to use URL
if refseq_flag == 'hg19':
out_file.write('##reference=<ID=hg19,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg19/1000genomes/bwa_ind/genome/\n')
elif refseq_flag == 'hg18':
out_file.write('##reference=<ID=hg18,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg18/bwa_ind/genome/tcga_ref/>\n')
elif refseq_flag == 'mm9':
out_file.write('##reference=<ID=mm9,Source=/projects/transabyss/trans-ABySS/annotations/mm9/201107/genome.fa>\n')
#contig assembly tags, need to use URL
out_file.write('##assembly='+contig+'\n')
#center
out_file.write('##center="BCGSC"\n')
#phasing
out_file.write('##phasing=none\n')
info_format = {
'svtype':'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n',
'mateid':'##INFO=<ID=MATEID,Number=1,Type=String,Description="ID of mate breakends">\n',
'event':'##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of breakend event">\n',
'cipos':'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">\n',
'svlen':'##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">\n',
'end':'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n',
'inv':'##ALT=<ID=INV,Description="Inversion">\n',
'del':'##ALT=<ID=DEL,Description="Deletion">\n',
'duptan':'##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">\n',
'sr':'##INFO=<ID=SR,Number=1,Type=Integer,Description="Spanning reads">\n',
'dp':'##INFO=<ID=DP,Number=1,Type=Integer,Description="Read depth">\n',
'CTG':'##INFO=<ID=CTG,Number=.,Type=String,Description="Contig ID">\n'
}
fusion = ['svtype', 'mateid', 'cipos', 'CTG', 'sr']
duplication = ['svtype', 'duptan', 'CTG', 'sr']
if filetype_flag == 'fusion':
for item in fusion:
if item in info_format:
out_file.write(info_format[item])
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
for item in duplication:
if item in info_format:
out_file.write(info_format[item])
sample_info = commands.getoutput("python get_tcga_sample_info.py --username "+GIN_user+" --password "+GIN_pass+" --LIMS_user "+LIMS_user+" --LIMS_pass "+LIMS_pass+" --library "+library)
#sample info
sample_info = sample_info.split(',')
patient = sample_info[0]
sample_id = sample_info[1]
sample_desc = sample_info[2]
platform = sample_info[3]
accession = sample_info[4]
out_file.write('##SAMPLE=<ID='+sample_id+',Individual='+patient+',Description="'+sample_desc+'",Platform='+platform+',Accession='+accession+'>\n')
#pedigree
out_file.write('##PEDIGREE=<Name_0='+sample_id+'>\n')
fields_line = '#CHROM\t'+'POS\t'+'ID\t'+'REF\t'+'ALT\t'+'QUAL\t'+'FILTER\t'+'INFO\n'
out_file.write(fields_line)
def create_fusion_dict(output_dir, in_file, refseq, sequence_dict):
fusion_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1, id2 = 1, 1
key1, key2 = 1, 2
ctg_dir = output_dir.strip('vcf')
contigfa = open(ctg_dir+'fa', "w")
for group in groupParser:
member = group.members[0]
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
ref1, ref2 = get_REF(member, refseq)
alt1, alt2 = get_ALT(member, refseq)
qual = get_QUAL()
filt = get_FILT()
info1, info2 = get_INFO(member, id1, id2)
fusion1 = chrom1+'\t'+pos1+'\t'+str(id1)+'a'+'\t'+ref1+'\t'+alt1+'\t'+qual+'\t'+filt+'\t'+info1
fusion2 = chrom2+'\t'+pos2+'\t'+str(id2)+'b'+'\t'+ref2+'\t'+alt2+'\t'+qual+'\t'+filt+'\t'+info2
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
fusion1 += ','+contig
fusion2 += ','+contig
else:
fusion1 += contig
fusion2 += contig
sequence = sequence_dict['>'+contig]
contigfa.write('>'+contig+'\n'+sequence+'\n')
counter += 1
fusion_dict[key1] = fusion1+'\n'
fusion_dict[key2] = fusion2+'\n'
id1 += 1
id2 += 1
key1 += 2
key2 += 2
return fusion_dict
def dup_CHROM(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
chrom = breakpoint[0]
chrom = chrom[3:]
return chrom
def dup_POS(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
position = breakpoint[2]
position = position.partition('(')[0]
return position | seq = refseq.GetSequence(chrom, int(pos), int(pos))
return seq
def dup_ALT():
alt = '<DUP:TANDEM>'
return alt
def dup_INFO(member):
pos = int(dup_POS(member))
svtype = 'FND'
length = len(member.event_seq)
end = pos + length - 1
info = 'SVTYPE='+svtype+';END='+str(end)
return info
def ptd_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
return start
def ptd_INFO(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
endpos = end.partition(':')[2]
endpos = endpos.partition('(')[0]
svtype = 'FND'
info = 'SVTYPE='+svtype+';END='+endpos
return info
def create_dup_dict(in_file, refseq, filetype_flag):
dup_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1 = 1
for group in groupParser:
member = group.members[0]
chrom = dup_CHROM(member)
qual = get_QUAL()
filt = get_FILT()
if filetype_flag == 'itd':
pos = dup_POS(member)
else:
pos = ptd_POS(member)
ref = dup_REF(member, refseq)
alt = dup_ALT()
if filetype_flag == 'itd':
info = dup_INFO(member)
else:
info = ptd_INFO(member)
dp = int(member.avg_read_to_ctg_unique)
info += ';SR='+str(dp)+';CTG='
dup = chrom+'\t'+pos+'\t.\t'+ref+'\t'+alt+'\t'+qual+'\t'+filt+'\t'+info
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
dup += ','+contig
else:
dup += contig
counter += 1
dup_dict[id1] = dup+'\n'
id1 = id1+1
return dup_dict
#parse contig(fa) file and put reads into dictionary with contig ID as key
def parse_fa(fa_file):
sequence = {}
contig = None
for line in open(fa_file, 'r'):
if line[0] == '>':
contig = line.split()[0]
else:
sequence[contig] = line.rstrip('\n')
return sequence
def out_to_VCF(filetype_flag, in_file, gene_flag, output_dir, contig, library, GIN_user, GIN_pass, LIMS_user, LIMS_pass):
#reference sequence file
if gene_flag == 'hg18':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg18/200909/hg18.2bit')
elif gene_flag == 'hg19':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg19/201110/hg19.2bit')
elif gene_flag == 'mm9':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/mm9/201107/mm9.2bit')
out_file = open(output_dir, "w")
write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, gene_flag, library, filetype_flag, out_file, contig)
if filetype_flag == 'fusion':
sequence_dict = parse_fa(contig)
dictionary = create_fusion_dict(output_dir, in_file, refseq, sequence_dict)
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
dictionary = create_dup_dict(in_file, refseq, filetype_flag)
for key in dictionary:
out_file.write(dictionary[key])
out_file.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f",dest="fusion_filename",default=None,help="directory and name of input fusion file to convert into vcf")
parser.add_option("-i",dest="itd_filename",default=None,help="directory and name of input itd file to convert into vcf")
parser.add_option("-p",dest="ptd_filename",default=None,help="directory and name of input ptd file to convert into vcf")
parser.add_option("-d",dest="output_dir",default=None,help="directory and name of output file to write into")
parser.add_option("--username",dest="username",default=None,help="GIN username")
parser.add_option("--password",dest="password",default=None,help="GIN password")
parser.add_option("--LIMS_user",dest="lims_user",default=None,help="LIMS username")
parser.add_option("--LIMS_pass",dest="lims_pass",default=None,help="LIMS password")
parser.add_option("--library",dest="library",default=None,help="Library name")
parser.add_option("-c",dest="contigs_file",default=None,help="directory and name of contig assembly file(*.fa)")
parser.add_option("-g",dest="gene_flag",default=None,help="Specify which genome to use")
(options, args) = parser.parse_args()
if options.fusion_filename:
filetype_flag = 'fusion'
in_file = options.fusion_filename
elif options.itd_filename:
filetype_flag = 'itd'
in_file = options.itd_filename
elif options.ptd_filename:
filetype_flag = 'ptd'
in_file = options.ptd_filename
output_dir = '/projects/transabyss/workspace/jira/APA-69/barnacle_test/A05012_barnacle_fus.vcf'
library = 'A05012'
in_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/barnacle/ver_1.2/8_predicted_events/A05012.barnacle.fus'
contig_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/merge/A05012-contigs.fa'
out_to_VCF('fusion', in_file, 'hg19', output_dir, contig_file, library, gin_user, gin_pass, lims_user, lims_pass) |
def dup_REF(member, refseq):
pos = dup_POS(member)
chrom = dup_CHROM(member) | random_line_split |
barnacle_vcf.py | """
barnacle_vcf.py
Created by William Li
Copyright (c) 2012 Canada's Michael Smith Genome Sciences Centre. All rights reserved.
"""
import sys, os, time, datetime, commands, re
from optparse import OptionParser
sys.path.append("/projects/transabyss/trans-ABySS/v1.2.4/code")
from parsers.candidate_group_parser import CandidateGroupParserCls
from parsers.two_bit import TwoBitFileCls
def get_CHROM(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
breakpoint1 = breakpoint1.partition(':')
breakpoint2 = breakpoint2.partition(':')
chrom1 = breakpoint1[0]
chrom1 = chrom1[3:]
chrom2 = breakpoint2[0]
chrom2 = chrom2[3:]
return chrom1, chrom2
def get_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
position1 = breakpoint1.partition(':')
position2 = breakpoint2.partition(':')
position1 = position1[2]
position2 = position2[2]
position1 = position1.partition('(')[0]
position2 = position2.partition('(')[0]
if overlap > 0:
if breakpoint2.endswith('(down)'):
position2 = int(position2)-overlap
if breakpoint1.endswith('(down)'):
position1 = int(position1)-overlap
return str(position1), str(position2)
def get_REF(member, refseq):
pos1, pos2 = get_POS(member)
chrom1, chrom2 = get_CHROM(member)
seq1 = refseq.GetSequence(chrom1, int(pos1), int(pos1))
seq2 = refseq.GetSequence(chrom2, int(pos2), int(pos2))
return seq1, seq2
def get_ALT(member, refseq):
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
base1, base2 = get_REF(member, refseq)
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
overlap = int(member.meta_fields['ctg_overlap'])
if overlap > 0:
if breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
pos1 = str(int(pos1) + overlap)
pos2 = str(int(pos2) + overlap)
#both breakpoints at either the start or the end of the contig region
if breakpoint1.endswith('(up)') and breakpoint2.endswith('(up)'):
if overlap > 0:
pos1 = int(pos1)+overlap
pos2 = int(pos2)+overlap
alt1 = '['+chrom2+':'+str(pos2)+'['+base1
alt2 = '['+chrom1+':'+str(pos1)+'['+base2
return alt1, alt2
elif breakpoint1.endswith('(down)') and breakpoint2.endswith('(down)'):
alt1 = base1+']'+chrom2+':'+str(pos2)+']'
alt2 = base2+']'+chrom1+':'+str(pos1)+']'
return alt1, alt2
#one breakpoint is at the start of the contig region and other breakpoint is at the end
if breakpoint1.endswith('(up)'):
alt1 = ']'+chrom2+':'+pos2+']'+base1
alt2 = base2+'['+chrom1+':'+pos1+'['
else:
alt1 = base1+'['+chrom2+':'+pos2+'['
alt2 = ']'+chrom1+':'+pos1+']'+base2
return alt1, alt2
def get_QUAL():
return '.'
def get_FILT():
return 'PASS'
def get_INFO(member, id1, id2):
|
#header output method
def write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, refseq_flag, library, filetype_flag, out_file, contig=None):
#file format
out_file.write('##fileformat=VCFv4.1\n')
#file date
out_file.write('##filedate='+time.strftime("%Y%m%d")+'\n')
#tcga version
out_file.write('##tcgaversion=1.0\n')
#genome reference; need to use URL
if refseq_flag == 'hg19':
out_file.write('##reference=<ID=hg19,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg19/1000genomes/bwa_ind/genome/\n')
elif refseq_flag == 'hg18':
out_file.write('##reference=<ID=hg18,Source=http://www.bcgsc.ca/downloads/genomes/Homo_sapiens/hg18/bwa_ind/genome/tcga_ref/>\n')
elif refseq_flag == 'mm9':
out_file.write('##reference=<ID=mm9,Source=/projects/transabyss/trans-ABySS/annotations/mm9/201107/genome.fa>\n')
#contig assembly tags, need to use URL
out_file.write('##assembly='+contig+'\n')
#center
out_file.write('##center="BCGSC"\n')
#phasing
out_file.write('##phasing=none\n')
info_format = {
'svtype':'##INFO=<ID=SVTYPE,Number=1,Type=String,Description="Type of structural variant">\n',
'mateid':'##INFO=<ID=MATEID,Number=1,Type=String,Description="ID of mate breakends">\n',
'event':'##INFO=<ID=EVENT,Number=1,Type=String,Description="ID of breakend event">\n',
'cipos':'##INFO=<ID=CIPOS,Number=2,Type=Integer,Description="Confidence interval around POS for imprecise variants">\n',
'svlen':'##INFO=<ID=SVLEN,Number=1,Type=Integer,Description="Difference in length between REF and ALT alleles">\n',
'end':'##INFO=<ID=END,Number=1,Type=Integer,Description="End position of the variant described in this record">\n',
'inv':'##ALT=<ID=INV,Description="Inversion">\n',
'del':'##ALT=<ID=DEL,Description="Deletion">\n',
'duptan':'##ALT=<ID=DUP:TANDEM,Description="Tandem Duplication">\n',
'sr':'##INFO=<ID=SR,Number=1,Type=Integer,Description="Spanning reads">\n',
'dp':'##INFO=<ID=DP,Number=1,Type=Integer,Description="Read depth">\n',
'CTG':'##INFO=<ID=CTG,Number=.,Type=String,Description="Contig ID">\n'
}
fusion = ['svtype', 'mateid', 'cipos', 'CTG', 'sr']
duplication = ['svtype', 'duptan', 'CTG', 'sr']
if filetype_flag == 'fusion':
for item in fusion:
if item in info_format:
out_file.write(info_format[item])
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
for item in duplication:
if item in info_format:
out_file.write(info_format[item])
sample_info = commands.getoutput("python get_tcga_sample_info.py --username "+GIN_user+" --password "+GIN_pass+" --LIMS_user "+LIMS_user+" --LIMS_pass "+LIMS_pass+" --library "+library)
#sample info
sample_info = sample_info.split(',')
patient = sample_info[0]
sample_id = sample_info[1]
sample_desc = sample_info[2]
platform = sample_info[3]
accession = sample_info[4]
out_file.write('##SAMPLE=<ID='+sample_id+',Individual='+patient+',Description="'+sample_desc+'",Platform='+platform+',Accession='+accession+'>\n')
#pedigree
out_file.write('##PEDIGREE=<Name_0='+sample_id+'>\n')
fields_line = '#CHROM\t'+'POS\t'+'ID\t'+'REF\t'+'ALT\t'+'QUAL\t'+'FILTER\t'+'INFO\n'
out_file.write(fields_line)
def create_fusion_dict(output_dir, in_file, refseq, sequence_dict):
fusion_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1, id2 = 1, 1
key1, key2 = 1, 2
ctg_dir = output_dir.strip('vcf')
contigfa = open(ctg_dir+'fa', "w")
for group in groupParser:
member = group.members[0]
chrom1, chrom2 = get_CHROM(member)
pos1, pos2 = get_POS(member)
ref1, ref2 = get_REF(member, refseq)
alt1, alt2 = get_ALT(member, refseq)
qual = get_QUAL()
filt = get_FILT()
info1, info2 = get_INFO(member, id1, id2)
fusion1 = chrom1+'\t'+pos1+'\t'+str(id1)+'a'+'\t'+ref1+'\t'+alt1+'\t'+qual+'\t'+filt+'\t'+info1
fusion2 = chrom2+'\t'+pos2+'\t'+str(id2)+'b'+'\t'+ref2+'\t'+alt2+'\t'+qual+'\t'+filt+'\t'+info2
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
fusion1 += ','+contig
fusion2 += ','+contig
else:
fusion1 += contig
fusion2 += contig
sequence = sequence_dict['>'+contig]
contigfa.write('>'+contig+'\n'+sequence+'\n')
counter += 1
fusion_dict[key1] = fusion1+'\n'
fusion_dict[key2] = fusion2+'\n'
id1 += 1
id2 += 1
key1 += 2
key2 += 2
return fusion_dict
def dup_CHROM(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
chrom = breakpoint[0]
chrom = chrom[3:]
return chrom
def dup_POS(member):
breakpoint = member.breakpointA.ToString()
breakpoint = breakpoint.partition(':')
position = breakpoint[2]
position = position.partition('(')[0]
return position
def dup_REF(member, refseq):
pos = dup_POS(member)
chrom = dup_CHROM(member)
seq = refseq.GetSequence(chrom, int(pos), int(pos))
return seq
def dup_ALT():
alt = '<DUP:TANDEM>'
return alt
def dup_INFO(member):
pos = int(dup_POS(member))
svtype = 'FND'
length = len(member.event_seq)
end = pos + length - 1
info = 'SVTYPE='+svtype+';END='+str(end)
return info
def ptd_POS(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
return start
def ptd_INFO(member):
breakpoint1 = member.breakpointA.ToString()
breakpoint2 = member.breakpointB.ToString()
if breakpoint1.endswith('(down)'):
end = breakpoint2
start = breakpoint1.partition(':')[2]
else:
end = breakpoint1
start = breakpoint2.partition(':')[2]
start = start.partition('(')[0]
endpos = end.partition(':')[2]
endpos = endpos.partition('(')[0]
svtype = 'FND'
info = 'SVTYPE='+svtype+';END='+endpos
return info
def create_dup_dict(in_file, refseq, filetype_flag):
dup_dict = {}
groupParser = CandidateGroupParserCls(in_file)
id1 = 1
for group in groupParser:
member = group.members[0]
chrom = dup_CHROM(member)
qual = get_QUAL()
filt = get_FILT()
if filetype_flag == 'itd':
pos = dup_POS(member)
else:
pos = ptd_POS(member)
ref = dup_REF(member, refseq)
alt = dup_ALT()
if filetype_flag == 'itd':
info = dup_INFO(member)
else:
info = ptd_INFO(member)
dp = int(member.avg_read_to_ctg_unique)
info += ';SR='+str(dp)+';CTG='
dup = chrom+'\t'+pos+'\t.\t'+ref+'\t'+alt+'\t'+qual+'\t'+filt+'\t'+info
counter = 0
for m in group.members:
contig = m.contig_info.ToString()
contig = contig.replace(':', '_')
contig = contig.partition('(')[0]
if counter > 0:
dup += ','+contig
else:
dup += contig
counter += 1
dup_dict[id1] = dup+'\n'
id1 = id1+1
return dup_dict
#parse contig(fa) file and put reads into dictionary with contig ID as key
def parse_fa(fa_file):
sequence = {}
contig = None
for line in open(fa_file, 'r'):
if line[0] == '>':
contig = line.split()[0]
else:
sequence[contig] = line.rstrip('\n')
return sequence
def out_to_VCF(filetype_flag, in_file, gene_flag, output_dir, contig, library, GIN_user, GIN_pass, LIMS_user, LIMS_pass):
#reference sequence file
if gene_flag == 'hg18':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg18/200909/hg18.2bit')
elif gene_flag == 'hg19':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/hg19/201110/hg19.2bit')
elif gene_flag == 'mm9':
refseq = TwoBitFileCls('/projects/transabyss/trans-ABySS/annotations/mm9/201107/mm9.2bit')
out_file = open(output_dir, "w")
write_header(GIN_user, GIN_pass, LIMS_user, LIMS_pass, gene_flag, library, filetype_flag, out_file, contig)
if filetype_flag == 'fusion':
sequence_dict = parse_fa(contig)
dictionary = create_fusion_dict(output_dir, in_file, refseq, sequence_dict)
elif filetype_flag == 'itd' or filetype_flag == 'ptd':
dictionary = create_dup_dict(in_file, refseq, filetype_flag)
for key in dictionary:
out_file.write(dictionary[key])
out_file.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f",dest="fusion_filename",default=None,help="directory and name of input fusion file to convert into vcf")
parser.add_option("-i",dest="itd_filename",default=None,help="directory and name of input itd file to convert into vcf")
parser.add_option("-p",dest="ptd_filename",default=None,help="directory and name of input ptd file to convert into vcf")
parser.add_option("-d",dest="output_dir",default=None,help="directory and name of output file to write into")
parser.add_option("--username",dest="username",default=None,help="GIN username")
parser.add_option("--password",dest="password",default=None,help="GIN password")
parser.add_option("--LIMS_user",dest="lims_user",default=None,help="LIMS username")
parser.add_option("--LIMS_pass",dest="lims_pass",default=None,help="LIMS password")
parser.add_option("--library",dest="library",default=None,help="Library name")
parser.add_option("-c",dest="contigs_file",default=None,help="directory and name of contig assembly file(*.fa)")
parser.add_option("-g",dest="gene_flag",default=None,help="Specify which genome to use")
(options, args) = parser.parse_args()
if options.fusion_filename:
filetype_flag = 'fusion'
in_file = options.fusion_filename
elif options.itd_filename:
filetype_flag = 'itd'
in_file = options.itd_filename
elif options.ptd_filename:
filetype_flag = 'ptd'
in_file = options.ptd_filename
output_dir = '/projects/transabyss/workspace/jira/APA-69/barnacle_test/A05012_barnacle_fus.vcf'
library = 'A05012'
in_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/barnacle/ver_1.2/8_predicted_events/A05012.barnacle.fus'
contig_file = '/genesis/scratch/validations/transcriptome/AML/A05012/Assembly/current/merge/A05012-contigs.fa'
out_to_VCF('fusion', in_file, 'hg19', output_dir, contig_file, library, gin_user, gin_pass, lims_user, lims_pass)
| overlap = str(member.meta_fields['ctg_overlap'])
svtype = 'FND'
dp = int(member.avg_read_to_ctg_unique)
if int(overlap) > 0:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;CIPOS=0,'+overlap+';SR='+str(dp)+';CTG='
else:
return 'SVTYPE='+svtype+';MATEID='+str(id2)+'b;SR='+str(dp)+';CTG=', svtype+';MATEID='+str(id1)+'a;SR='+str(dp)+';CTG=' | identifier_body |
MakeGoodGamesBot.py | # -*- coding: utf-8 -*-
#MakeGoodGamesBot
#Requiered libraries
from twitter import *
from collections import Counter
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler, API
from tweepy import Stream
import time
import pandas as pd
import matplotlib.pyplot as plt
import json
import random as rd
#import seaborn as sns
#Auth keys and tokens.
ds = pd.read_pickle('dogge_secret')
consumer_key = ds[0]
consumer_secret = ds[1]
access_token = ds[2]
access_token_secret = ds[3]
#Initially exploring to Python APIs sixohsix and Tweepy. Now the bot uses both for different purposes.
#Probably going to modify soon for only one.
#Twitter (sixohsix) client
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
#Tweepy client
auth_handler = OAuthHandler(consumer_key, consumer_secret)
auth_handler.set_access_token(access_token, access_token_secret)
twitter_client = API(auth_handler)
#Banwords
banwords = ['#mplusrewards','#cheats','#cheat']
#Date and data
def get_date():
today = time.localtime()
date = str(today[0])+'-'+str(today[1])+'-'+str(today[2])
return date
try:
banRT = list(pd.read_pickle('ban_RT'))
banTXT = list(pd.read_pickle('ban_TXT'))
except:
banRT,banTXT = [],[]
#Schedule for a day. Default time between RT's set to 3 minutes.
#Steps: 1) First follow or unfollow according to tit_for_tat(),
# 2) RT followers,
# 3) RT highest ratio of number_of_RT/number_of_followers of previous day Statuses.
def run_schedule(dt=get_date(),ky='#indiedev',mx=150,clean=False,folow=False):
if clean: tit_for_tat()
if folow: RT_followers(key_=ky,max_=mx)
RT_last_day(dt,key_=ky)
def twice():
run_schedule(folow=True)
run_schedule(ky='#indiegame',clean=True,folow=True)
def loop_schedule(date):
while True:
for ky in ['#indiedev','#indiegame']:
print 'Day '+str(date)+' and keyword '+str(ky)
run_schedule(dt=date,ky=ky)
d = get_date()
if date != d:
date = d
break
#Main Functions for 'run_schedule()'
#Keeps track of who doesn't follow back. If an ID appears twice in this category then it
#unfollows. Bot follows ALL followers. Filter for undesirable accounts will be implemented soon.
def | ():
print 'Tit for Tat!'
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
erros_ids = []
fol_len = len([1 for id_ in follow_me if id_ not in follow_you])
print 'Following '+str(fol_len)+' new users.'
for id_ in follow_me:
if id_ not in follow_you:
try:
twitter_client.create_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
unfollow,rem_len = remember_follow()
print 'Unfollowing '+str(len(unfollow))+'. Remembering '+str(rem_len)+'.'
for id_ in follow_you:
if id_ in unfollow:
try:
twitter_client.destroy_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
#Take previous day tweets. Rank by higher RT of smaller accounts. Try to RT the underdog!
def RT_last_day(date,key_='#indiedev'):
print 'RT '+str(key_)+' most relevant tweets from yesterday!'
d = latest_tweets(date=date,key_=key_)
d = rank_sort(d)
plot_distro(d[:5000])
a = RT_this(d[:10000],sleep_t=180)
return a
#Take timelines from followers and looks for keyword (default: #indiedev. RTs top tweets (default=2).
def RT_followers(key_='#indiedev',max_=150,rts_=2): #900/1500 Rate limit
print 'RT '+str(key_)+' within followers!'
clct,twtn = [],0
friends = twitter_client.followers_ids()
#Get collection of tweets of followers.
for f in friends:
c=[]
try:
c = twitter_client.user_timeline(f,count=100)
except:
#print 'Cant retweet follower.'
pass
tcl = [ci for ci in c if '#indiedev' in ci.text and ci.in_reply_to_status_id == None]
ttcl = []
for t in tcl:
keep = True
for word in banwords:
if word in t.entities['hashtags']:
keep = False
if keep == True:
ttcl.append(t)
tcl = ttcl
if len(tcl) > 0:
dc = {str(i.id):int(i.retweet_count) for i in tcl}
dfc = pd.DataFrame.from_dict(dc,orient='index')
dfc = dfc.sort(0,ascending=False)
#Final collection of RTs considers only top statuses
clct = clct + list(dfc.index[:rts_])
#After selection of most desirable RTs, we randomly RT them.
rd.shuffle(clct)
print 'Going for '+str(len(clct[:max_]))+' tweets.'
for id_ in clct:
if twtn >= max_: break
try:
twtn+=1
twitter_client.retweet(id_)
print 'Tweeted '+str(twtn)
time.sleep(120)
twitter_client.create_favorite(id_)
except:
pass
#RT statuses from a given DataFrame d.
def RT_this(d,sleep_t=60,stop_at=500,allow_like=False,checkmedia=False):
err,twts,iters=0,0,0
for tweet in d.values:
if twts == stop_at:
print 'Got '+str(stop_at)+' tweets. Stopping.'
break
try:
like,publish,i = True,True,0
if type(tweet[11]) != None:
try:
if tweet[11] in banRT: like,publish = False,False
except:
pass
i=1
if len(tweet[27]) > 14:
if tweet[27][:15] in banTXT: like,publish = False,False
i=2
like = False
if like and rd.random() < 0.05:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
if publish == False:
time.sleep(60)
i=3
if checkmedia and publish:
publish = filter_gif(tweet)
if publish:
try:
twitter_client.retweet(tweet[8])
i,twts=4,twts+1
print 'RTed : '+str(twts)+' at '+str(time.ctime())
time.sleep(sleep_t)
i=5
if type(tweet[11]) != None:
banRT.append(tweet[11])
if len(tweet[27]) > 14:
banTXT.append(tweet[27][:15])
except:
pass
try:
u_fol,u_fri=tweet[29]['followers_count'],tweet[29]['friends_count']
if (u_fol > 500 and u_fol < 10000) or (u_fol > 1.5*u_fri):
if i==4:
time.sleep(sleep_t)
i=6
twitter_client.create_friendship(tweet[29]['id'])
except:
pass
if like and allow_like:
try:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
time.sleep(sleep_t/3)
except:
print 'Couldnt like'
save_banDF()
iters+=1
except:
err+=1
#StreamListener: DoggoListener is not currently on use as RTs of previous day and followers
#use most of the bot's daytime.
#List of words to avoid and possible tweet lexic
banword = ["porn","pron","p0rn","pr0n"]
doggolex = ['*doggehyped*']
class DoggoListener(StreamListener):
def on_data(self, data):
tweet = json.loads(data)
i,like,publish = 0,True,True
try:
for word in banword:
if word in tweet['text'].lower(): like,publish = False,False
i=1
if tweet.get('lang') and tweet.get('lang') != 'en': like,publish = False,False
i=2
try:
if type(tweet['user']['description']) != None:
if 'indie' not in tweet['user']['description'] or 'dev' not in tweet['user']['description'] or 'developer' not in tweet['user']['description']:
like = False
if tweet['user']['followers_count'] < 1000: publish = False
else:
like,publish = False,False
except:
like,publish = False,False
i=3
if type(tweet['in_reply_to_status_id']) != None:
if tweet['in_reply_to_status_id'] in banRT:
like,publish = False,False
i=4
if len(tweet['text']) > 14:
if tweet['text'][:15] in banTXT:
like,publish = False,False
i=5
if like:
twitter_client.create_favorite(tweet['id'])
print 'Liked '+tweet['text']
if publish == False:
time.sleep(10)
i=6
if publish:
twitter_client.retweet(tweet['id'])
#Some console output to check if stream is RTweeting.
try:
print 'RTd: '+str(tweet['text'])
except:
print '*Woof*'
i='t'
if type(tweet['in_reply_to_status_id']) != None:
i=7
banRT.append(tweet['in_reply_to_status_id'])
if len(tweet['text']) > 14:
i=8
banTXT.append(tweet['text'][:15])
save_banDF()
time.sleep(60)
except:
print i #For debugging purposes
return True
def run_doggo_run(): #Streams the doggolistener()
if __name__ == '__main__':
listener = DoggoListener()
stream = Stream(auth_handler, listener)
stream.filter(track=['indie game', 'indie games','gamedev','game dev','#indiedev',
'#gamedev','#indiegame','#steamNewRelease','#nintendoswitch'])
#From here you will find other useful functions that support Run_schedule and Run_doggo_run.
def save_banDF():
df = pd.Series(banRT)
df.to_pickle('ban_RT')
df = pd.Series(banTXT)
df.to_pickle('ban_TXT')
#Based on sixohsix
def latest_tweets(date=get_date(),key_="#indiedev #indiegame #gamedev"):
print "Search for "+str(date)
tweets = t.search.tweets(q=key_,count=5000,until=date)
dftwt = pd.DataFrame(tweets['statuses'])
while True:
try:
tweets = t.search.tweets(q=key_,max_id=min(list(dftwt.id)),count=5000,until=date)
tempdf = pd.DataFrame(tweets['statuses'])
dftwt = dftwt.append(tempdf)
except:
break
date = str(list(dftwt.created_at)[0][:3])
print "Check for only TODAYS ONLY!!!"
dftwt = only_with_date(date,dftwt)
return dftwt
def only_with_date(str_,df):
valid = []
for date in df.created_at:
if str_ in date[:3]:
valid.append(1)
else:
valid.append(0)
df['valid'] = valid
return df[df.valid == 1]
def plot_distro(d):
for column in ['favorite_count','retweet_count']:
t = list(d[column])
t.sort()
plt.title(column)
plt.plot(t)
plt.show()
a = [i[29]['followers_count'] for i in d.values]
a.sort()
plt.title('followers_count')
plt.plot(a)
plt.show()
def rank_sort(d):
d['follow'] = [i[29]['followers_count'] for i in d.values]
da = d[d.follow > 100]
a = [float(i[23])/float(i[29]['followers_count']) for i in da.values]
da['rank'] = list(a)
dd = da.sort_values(by='rank',ascending=False)
#ban,keep=[],[]
#for i in d.values:
# if i[27][:15] in ban:
# keep.append(0)
# else:
# keep.append(1)
# ban.append(i[27][:15])
#d['ktxt'] = keep
#d = d[d.ktxt == 1]
return dd
def remember_follow():
try:
rem = list(pd.read_pickle('remember'))
except:
rem = []
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
remember = [id_ for id_ in follow_you if id_ not in follow_me and id_ not in rem]
unfollow = [id_ for id_ in follow_you if id_ not in follow_me and id_ in rem ]
rdf = pd.Series(remember)
rdf.to_pickle('remember')
return unfollow,len(rdf)
def unfollow_this(ids_):
for id_ in ids_:
twitter_client.destroy_friendship(id_)
time.sleep(10)
def filter_gif(tweet,v=False):
try:
if len(tweet[4]['media']) > 0: v=True
except:
pass
return v | tit_for_tat | identifier_name |
MakeGoodGamesBot.py | # -*- coding: utf-8 -*-
#MakeGoodGamesBot
#Requiered libraries
from twitter import *
from collections import Counter
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler, API
from tweepy import Stream
import time
import pandas as pd
import matplotlib.pyplot as plt
import json
import random as rd
#import seaborn as sns
#Auth keys and tokens.
ds = pd.read_pickle('dogge_secret')
consumer_key = ds[0]
consumer_secret = ds[1]
access_token = ds[2]
access_token_secret = ds[3]
#Initially exploring to Python APIs sixohsix and Tweepy. Now the bot uses both for different purposes.
#Probably going to modify soon for only one.
#Twitter (sixohsix) client
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
#Tweepy client
auth_handler = OAuthHandler(consumer_key, consumer_secret)
auth_handler.set_access_token(access_token, access_token_secret)
twitter_client = API(auth_handler)
#Banwords
banwords = ['#mplusrewards','#cheats','#cheat']
#Date and data
def get_date():
today = time.localtime()
date = str(today[0])+'-'+str(today[1])+'-'+str(today[2])
return date
try:
banRT = list(pd.read_pickle('ban_RT'))
banTXT = list(pd.read_pickle('ban_TXT'))
except:
banRT,banTXT = [],[]
#Schedule for a day. Default time between RT's set to 3 minutes.
#Steps: 1) First follow or unfollow according to tit_for_tat(),
# 2) RT followers,
# 3) RT highest ratio of number_of_RT/number_of_followers of previous day Statuses.
def run_schedule(dt=get_date(),ky='#indiedev',mx=150,clean=False,folow=False):
if clean: tit_for_tat()
if folow: RT_followers(key_=ky,max_=mx)
RT_last_day(dt,key_=ky)
def twice():
|
def loop_schedule(date):
while True:
for ky in ['#indiedev','#indiegame']:
print 'Day '+str(date)+' and keyword '+str(ky)
run_schedule(dt=date,ky=ky)
d = get_date()
if date != d:
date = d
break
#Main Functions for 'run_schedule()'
#Keeps track of who doesn't follow back. If an ID appears twice in this category then it
#unfollows. Bot follows ALL followers. Filter for undesirable accounts will be implemented soon.
def tit_for_tat():
print 'Tit for Tat!'
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
erros_ids = []
fol_len = len([1 for id_ in follow_me if id_ not in follow_you])
print 'Following '+str(fol_len)+' new users.'
for id_ in follow_me:
if id_ not in follow_you:
try:
twitter_client.create_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
unfollow,rem_len = remember_follow()
print 'Unfollowing '+str(len(unfollow))+'. Remembering '+str(rem_len)+'.'
for id_ in follow_you:
if id_ in unfollow:
try:
twitter_client.destroy_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
#Take previous day tweets. Rank by higher RT of smaller accounts. Try to RT the underdog!
def RT_last_day(date,key_='#indiedev'):
print 'RT '+str(key_)+' most relevant tweets from yesterday!'
d = latest_tweets(date=date,key_=key_)
d = rank_sort(d)
plot_distro(d[:5000])
a = RT_this(d[:10000],sleep_t=180)
return a
#Take timelines from followers and looks for keyword (default: #indiedev. RTs top tweets (default=2).
def RT_followers(key_='#indiedev',max_=150,rts_=2): #900/1500 Rate limit
print 'RT '+str(key_)+' within followers!'
clct,twtn = [],0
friends = twitter_client.followers_ids()
#Get collection of tweets of followers.
for f in friends:
c=[]
try:
c = twitter_client.user_timeline(f,count=100)
except:
#print 'Cant retweet follower.'
pass
tcl = [ci for ci in c if '#indiedev' in ci.text and ci.in_reply_to_status_id == None]
ttcl = []
for t in tcl:
keep = True
for word in banwords:
if word in t.entities['hashtags']:
keep = False
if keep == True:
ttcl.append(t)
tcl = ttcl
if len(tcl) > 0:
dc = {str(i.id):int(i.retweet_count) for i in tcl}
dfc = pd.DataFrame.from_dict(dc,orient='index')
dfc = dfc.sort(0,ascending=False)
#Final collection of RTs considers only top statuses
clct = clct + list(dfc.index[:rts_])
#After selection of most desirable RTs, we randomly RT them.
rd.shuffle(clct)
print 'Going for '+str(len(clct[:max_]))+' tweets.'
for id_ in clct:
if twtn >= max_: break
try:
twtn+=1
twitter_client.retweet(id_)
print 'Tweeted '+str(twtn)
time.sleep(120)
twitter_client.create_favorite(id_)
except:
pass
#RT statuses from a given DataFrame d.
def RT_this(d,sleep_t=60,stop_at=500,allow_like=False,checkmedia=False):
err,twts,iters=0,0,0
for tweet in d.values:
if twts == stop_at:
print 'Got '+str(stop_at)+' tweets. Stopping.'
break
try:
like,publish,i = True,True,0
if type(tweet[11]) != None:
try:
if tweet[11] in banRT: like,publish = False,False
except:
pass
i=1
if len(tweet[27]) > 14:
if tweet[27][:15] in banTXT: like,publish = False,False
i=2
like = False
if like and rd.random() < 0.05:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
if publish == False:
time.sleep(60)
i=3
if checkmedia and publish:
publish = filter_gif(tweet)
if publish:
try:
twitter_client.retweet(tweet[8])
i,twts=4,twts+1
print 'RTed : '+str(twts)+' at '+str(time.ctime())
time.sleep(sleep_t)
i=5
if type(tweet[11]) != None:
banRT.append(tweet[11])
if len(tweet[27]) > 14:
banTXT.append(tweet[27][:15])
except:
pass
try:
u_fol,u_fri=tweet[29]['followers_count'],tweet[29]['friends_count']
if (u_fol > 500 and u_fol < 10000) or (u_fol > 1.5*u_fri):
if i==4:
time.sleep(sleep_t)
i=6
twitter_client.create_friendship(tweet[29]['id'])
except:
pass
if like and allow_like:
try:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
time.sleep(sleep_t/3)
except:
print 'Couldnt like'
save_banDF()
iters+=1
except:
err+=1
#StreamListener: DoggoListener is not currently on use as RTs of previous day and followers
#use most of the bot's daytime.
#List of words to avoid and possible tweet lexic
banword = ["porn","pron","p0rn","pr0n"]
doggolex = ['*doggehyped*']
class DoggoListener(StreamListener):
def on_data(self, data):
tweet = json.loads(data)
i,like,publish = 0,True,True
try:
for word in banword:
if word in tweet['text'].lower(): like,publish = False,False
i=1
if tweet.get('lang') and tweet.get('lang') != 'en': like,publish = False,False
i=2
try:
if type(tweet['user']['description']) != None:
if 'indie' not in tweet['user']['description'] or 'dev' not in tweet['user']['description'] or 'developer' not in tweet['user']['description']:
like = False
if tweet['user']['followers_count'] < 1000: publish = False
else:
like,publish = False,False
except:
like,publish = False,False
i=3
if type(tweet['in_reply_to_status_id']) != None:
if tweet['in_reply_to_status_id'] in banRT:
like,publish = False,False
i=4
if len(tweet['text']) > 14:
if tweet['text'][:15] in banTXT:
like,publish = False,False
i=5
if like:
twitter_client.create_favorite(tweet['id'])
print 'Liked '+tweet['text']
if publish == False:
time.sleep(10)
i=6
if publish:
twitter_client.retweet(tweet['id'])
#Some console output to check if stream is RTweeting.
try:
print 'RTd: '+str(tweet['text'])
except:
print '*Woof*'
i='t'
if type(tweet['in_reply_to_status_id']) != None:
i=7
banRT.append(tweet['in_reply_to_status_id'])
if len(tweet['text']) > 14:
i=8
banTXT.append(tweet['text'][:15])
save_banDF()
time.sleep(60)
except:
print i #For debugging purposes
return True
def run_doggo_run(): #Streams the doggolistener()
if __name__ == '__main__':
listener = DoggoListener()
stream = Stream(auth_handler, listener)
stream.filter(track=['indie game', 'indie games','gamedev','game dev','#indiedev',
'#gamedev','#indiegame','#steamNewRelease','#nintendoswitch'])
#From here you will find other useful functions that support Run_schedule and Run_doggo_run.
def save_banDF():
df = pd.Series(banRT)
df.to_pickle('ban_RT')
df = pd.Series(banTXT)
df.to_pickle('ban_TXT')
#Based on sixohsix
def latest_tweets(date=get_date(),key_="#indiedev #indiegame #gamedev"):
print "Search for "+str(date)
tweets = t.search.tweets(q=key_,count=5000,until=date)
dftwt = pd.DataFrame(tweets['statuses'])
while True:
try:
tweets = t.search.tweets(q=key_,max_id=min(list(dftwt.id)),count=5000,until=date)
tempdf = pd.DataFrame(tweets['statuses'])
dftwt = dftwt.append(tempdf)
except:
break
date = str(list(dftwt.created_at)[0][:3])
print "Check for only TODAYS ONLY!!!"
dftwt = only_with_date(date,dftwt)
return dftwt
def only_with_date(str_,df):
valid = []
for date in df.created_at:
if str_ in date[:3]:
valid.append(1)
else:
valid.append(0)
df['valid'] = valid
return df[df.valid == 1]
def plot_distro(d):
for column in ['favorite_count','retweet_count']:
t = list(d[column])
t.sort()
plt.title(column)
plt.plot(t)
plt.show()
a = [i[29]['followers_count'] for i in d.values]
a.sort()
plt.title('followers_count')
plt.plot(a)
plt.show()
def rank_sort(d):
d['follow'] = [i[29]['followers_count'] for i in d.values]
da = d[d.follow > 100]
a = [float(i[23])/float(i[29]['followers_count']) for i in da.values]
da['rank'] = list(a)
dd = da.sort_values(by='rank',ascending=False)
#ban,keep=[],[]
#for i in d.values:
# if i[27][:15] in ban:
# keep.append(0)
# else:
# keep.append(1)
# ban.append(i[27][:15])
#d['ktxt'] = keep
#d = d[d.ktxt == 1]
return dd
def remember_follow():
try:
rem = list(pd.read_pickle('remember'))
except:
rem = []
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
remember = [id_ for id_ in follow_you if id_ not in follow_me and id_ not in rem]
unfollow = [id_ for id_ in follow_you if id_ not in follow_me and id_ in rem ]
rdf = pd.Series(remember)
rdf.to_pickle('remember')
return unfollow,len(rdf)
def unfollow_this(ids_):
for id_ in ids_:
twitter_client.destroy_friendship(id_)
time.sleep(10)
def filter_gif(tweet,v=False):
try:
if len(tweet[4]['media']) > 0: v=True
except:
pass
return v | run_schedule(folow=True)
run_schedule(ky='#indiegame',clean=True,folow=True) | identifier_body |
MakeGoodGamesBot.py | # -*- coding: utf-8 -*-
#MakeGoodGamesBot
#Requiered libraries
from twitter import *
from collections import Counter
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler, API
from tweepy import Stream
import time
import pandas as pd
import matplotlib.pyplot as plt
import json
import random as rd
#import seaborn as sns
#Auth keys and tokens.
ds = pd.read_pickle('dogge_secret')
consumer_key = ds[0]
consumer_secret = ds[1]
access_token = ds[2]
access_token_secret = ds[3]
#Initially exploring to Python APIs sixohsix and Tweepy. Now the bot uses both for different purposes.
#Probably going to modify soon for only one.
#Twitter (sixohsix) client
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
#Tweepy client
auth_handler = OAuthHandler(consumer_key, consumer_secret)
auth_handler.set_access_token(access_token, access_token_secret)
twitter_client = API(auth_handler)
#Banwords
banwords = ['#mplusrewards','#cheats','#cheat']
#Date and data
def get_date():
today = time.localtime()
date = str(today[0])+'-'+str(today[1])+'-'+str(today[2])
return date
try:
banRT = list(pd.read_pickle('ban_RT'))
banTXT = list(pd.read_pickle('ban_TXT'))
except:
banRT,banTXT = [],[]
#Schedule for a day. Default time between RT's set to 3 minutes.
#Steps: 1) First follow or unfollow according to tit_for_tat(),
# 2) RT followers,
# 3) RT highest ratio of number_of_RT/number_of_followers of previous day Statuses.
def run_schedule(dt=get_date(),ky='#indiedev',mx=150,clean=False,folow=False):
if clean: |
if folow: RT_followers(key_=ky,max_=mx)
RT_last_day(dt,key_=ky)
def twice():
run_schedule(folow=True)
run_schedule(ky='#indiegame',clean=True,folow=True)
def loop_schedule(date):
while True:
for ky in ['#indiedev','#indiegame']:
print 'Day '+str(date)+' and keyword '+str(ky)
run_schedule(dt=date,ky=ky)
d = get_date()
if date != d:
date = d
break
#Main Functions for 'run_schedule()'
#Keeps track of who doesn't follow back. If an ID appears twice in this category then it
#unfollows. Bot follows ALL followers. Filter for undesirable accounts will be implemented soon.
def tit_for_tat():
print 'Tit for Tat!'
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
erros_ids = []
fol_len = len([1 for id_ in follow_me if id_ not in follow_you])
print 'Following '+str(fol_len)+' new users.'
for id_ in follow_me:
if id_ not in follow_you:
try:
twitter_client.create_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
unfollow,rem_len = remember_follow()
print 'Unfollowing '+str(len(unfollow))+'. Remembering '+str(rem_len)+'.'
for id_ in follow_you:
if id_ in unfollow:
try:
twitter_client.destroy_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
#Take previous day tweets. Rank by higher RT of smaller accounts. Try to RT the underdog!
def RT_last_day(date,key_='#indiedev'):
print 'RT '+str(key_)+' most relevant tweets from yesterday!'
d = latest_tweets(date=date,key_=key_)
d = rank_sort(d)
plot_distro(d[:5000])
a = RT_this(d[:10000],sleep_t=180)
return a
#Take timelines from followers and looks for keyword (default: #indiedev. RTs top tweets (default=2).
def RT_followers(key_='#indiedev',max_=150,rts_=2): #900/1500 Rate limit
print 'RT '+str(key_)+' within followers!'
clct,twtn = [],0
friends = twitter_client.followers_ids()
#Get collection of tweets of followers.
for f in friends:
c=[]
try:
c = twitter_client.user_timeline(f,count=100)
except:
#print 'Cant retweet follower.'
pass
tcl = [ci for ci in c if '#indiedev' in ci.text and ci.in_reply_to_status_id == None]
ttcl = []
for t in tcl:
keep = True
for word in banwords:
if word in t.entities['hashtags']:
keep = False
if keep == True:
ttcl.append(t)
tcl = ttcl
if len(tcl) > 0:
dc = {str(i.id):int(i.retweet_count) for i in tcl}
dfc = pd.DataFrame.from_dict(dc,orient='index')
dfc = dfc.sort(0,ascending=False)
#Final collection of RTs considers only top statuses
clct = clct + list(dfc.index[:rts_])
#After selection of most desirable RTs, we randomly RT them.
rd.shuffle(clct)
print 'Going for '+str(len(clct[:max_]))+' tweets.'
for id_ in clct:
if twtn >= max_: break
try:
twtn+=1
twitter_client.retweet(id_)
print 'Tweeted '+str(twtn)
time.sleep(120)
twitter_client.create_favorite(id_)
except:
pass
#RT statuses from a given DataFrame d.
def RT_this(d,sleep_t=60,stop_at=500,allow_like=False,checkmedia=False):
err,twts,iters=0,0,0
for tweet in d.values:
if twts == stop_at:
print 'Got '+str(stop_at)+' tweets. Stopping.'
break
try:
like,publish,i = True,True,0
if type(tweet[11]) != None:
try:
if tweet[11] in banRT: like,publish = False,False
except:
pass
i=1
if len(tweet[27]) > 14:
if tweet[27][:15] in banTXT: like,publish = False,False
i=2
like = False
if like and rd.random() < 0.05:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
if publish == False:
time.sleep(60)
i=3
if checkmedia and publish:
publish = filter_gif(tweet)
if publish:
try:
twitter_client.retweet(tweet[8])
i,twts=4,twts+1
print 'RTed : '+str(twts)+' at '+str(time.ctime())
time.sleep(sleep_t)
i=5
if type(tweet[11]) != None:
banRT.append(tweet[11])
if len(tweet[27]) > 14:
banTXT.append(tweet[27][:15])
except:
pass
try:
u_fol,u_fri=tweet[29]['followers_count'],tweet[29]['friends_count']
if (u_fol > 500 and u_fol < 10000) or (u_fol > 1.5*u_fri):
if i==4:
time.sleep(sleep_t)
i=6
twitter_client.create_friendship(tweet[29]['id'])
except:
pass
if like and allow_like:
try:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
time.sleep(sleep_t/3)
except:
print 'Couldnt like'
save_banDF()
iters+=1
except:
err+=1
#StreamListener: DoggoListener is not currently on use as RTs of previous day and followers
#use most of the bot's daytime.
#List of words to avoid and possible tweet lexic
banword = ["porn","pron","p0rn","pr0n"]
doggolex = ['*doggehyped*']
class DoggoListener(StreamListener):
def on_data(self, data):
tweet = json.loads(data)
i,like,publish = 0,True,True
try:
for word in banword:
if word in tweet['text'].lower(): like,publish = False,False
i=1
if tweet.get('lang') and tweet.get('lang') != 'en': like,publish = False,False
i=2
try:
if type(tweet['user']['description']) != None:
if 'indie' not in tweet['user']['description'] or 'dev' not in tweet['user']['description'] or 'developer' not in tweet['user']['description']:
like = False
if tweet['user']['followers_count'] < 1000: publish = False
else:
like,publish = False,False
except:
like,publish = False,False
i=3
if type(tweet['in_reply_to_status_id']) != None:
if tweet['in_reply_to_status_id'] in banRT:
like,publish = False,False
i=4
if len(tweet['text']) > 14:
if tweet['text'][:15] in banTXT:
like,publish = False,False
i=5
if like:
twitter_client.create_favorite(tweet['id'])
print 'Liked '+tweet['text']
if publish == False:
time.sleep(10)
i=6
if publish:
twitter_client.retweet(tweet['id'])
#Some console output to check if stream is RTweeting.
try:
print 'RTd: '+str(tweet['text'])
except:
print '*Woof*'
i='t'
if type(tweet['in_reply_to_status_id']) != None:
i=7
banRT.append(tweet['in_reply_to_status_id'])
if len(tweet['text']) > 14:
i=8
banTXT.append(tweet['text'][:15])
save_banDF()
time.sleep(60)
except:
print i #For debugging purposes
return True
def run_doggo_run(): #Streams the doggolistener()
if __name__ == '__main__':
listener = DoggoListener()
stream = Stream(auth_handler, listener)
stream.filter(track=['indie game', 'indie games','gamedev','game dev','#indiedev',
'#gamedev','#indiegame','#steamNewRelease','#nintendoswitch'])
#From here you will find other useful functions that support Run_schedule and Run_doggo_run.
def save_banDF():
df = pd.Series(banRT)
df.to_pickle('ban_RT')
df = pd.Series(banTXT)
df.to_pickle('ban_TXT')
#Based on sixohsix
def latest_tweets(date=get_date(),key_="#indiedev #indiegame #gamedev"):
print "Search for "+str(date)
tweets = t.search.tweets(q=key_,count=5000,until=date)
dftwt = pd.DataFrame(tweets['statuses'])
while True:
try:
tweets = t.search.tweets(q=key_,max_id=min(list(dftwt.id)),count=5000,until=date)
tempdf = pd.DataFrame(tweets['statuses'])
dftwt = dftwt.append(tempdf)
except:
break
date = str(list(dftwt.created_at)[0][:3])
print "Check for only TODAYS ONLY!!!"
dftwt = only_with_date(date,dftwt)
return dftwt
def only_with_date(str_,df):
valid = []
for date in df.created_at:
if str_ in date[:3]:
valid.append(1)
else:
valid.append(0)
df['valid'] = valid
return df[df.valid == 1]
def plot_distro(d):
for column in ['favorite_count','retweet_count']:
t = list(d[column])
t.sort()
plt.title(column)
plt.plot(t)
plt.show()
a = [i[29]['followers_count'] for i in d.values]
a.sort()
plt.title('followers_count')
plt.plot(a)
plt.show()
def rank_sort(d):
d['follow'] = [i[29]['followers_count'] for i in d.values]
da = d[d.follow > 100]
a = [float(i[23])/float(i[29]['followers_count']) for i in da.values]
da['rank'] = list(a)
dd = da.sort_values(by='rank',ascending=False)
#ban,keep=[],[]
#for i in d.values:
# if i[27][:15] in ban:
# keep.append(0)
# else:
# keep.append(1)
# ban.append(i[27][:15])
#d['ktxt'] = keep
#d = d[d.ktxt == 1]
return dd
def remember_follow():
try:
rem = list(pd.read_pickle('remember'))
except:
rem = []
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
remember = [id_ for id_ in follow_you if id_ not in follow_me and id_ not in rem]
unfollow = [id_ for id_ in follow_you if id_ not in follow_me and id_ in rem ]
rdf = pd.Series(remember)
rdf.to_pickle('remember')
return unfollow,len(rdf)
def unfollow_this(ids_):
for id_ in ids_:
twitter_client.destroy_friendship(id_)
time.sleep(10)
def filter_gif(tweet,v=False):
try:
if len(tweet[4]['media']) > 0: v=True
except:
pass
return v | tit_for_tat() | conditional_block |
MakeGoodGamesBot.py | # -*- coding: utf-8 -*-
#MakeGoodGamesBot
#Requiered libraries
from twitter import *
from collections import Counter
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler, API
from tweepy import Stream
import time
import pandas as pd
import matplotlib.pyplot as plt
import json
import random as rd
#import seaborn as sns
#Auth keys and tokens.
ds = pd.read_pickle('dogge_secret')
consumer_key = ds[0]
consumer_secret = ds[1]
access_token = ds[2]
access_token_secret = ds[3]
#Initially exploring to Python APIs sixohsix and Tweepy. Now the bot uses both for different purposes.
#Probably going to modify soon for only one.
#Twitter (sixohsix) client
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key, consumer_secret))
#Tweepy client
auth_handler = OAuthHandler(consumer_key, consumer_secret)
auth_handler.set_access_token(access_token, access_token_secret)
twitter_client = API(auth_handler)
#Banwords
banwords = ['#mplusrewards','#cheats','#cheat']
#Date and data
def get_date():
today = time.localtime()
date = str(today[0])+'-'+str(today[1])+'-'+str(today[2])
return date
try:
banRT = list(pd.read_pickle('ban_RT'))
banTXT = list(pd.read_pickle('ban_TXT'))
except:
banRT,banTXT = [],[]
#Schedule for a day. Default time between RT's set to 3 minutes.
#Steps: 1) First follow or unfollow according to tit_for_tat(),
# 2) RT followers,
# 3) RT highest ratio of number_of_RT/number_of_followers of previous day Statuses.
def run_schedule(dt=get_date(),ky='#indiedev',mx=150,clean=False,folow=False):
if clean: tit_for_tat()
if folow: RT_followers(key_=ky,max_=mx)
RT_last_day(dt,key_=ky)
def twice():
run_schedule(folow=True)
run_schedule(ky='#indiegame',clean=True,folow=True)
def loop_schedule(date):
while True:
for ky in ['#indiedev','#indiegame']:
print 'Day '+str(date)+' and keyword '+str(ky)
run_schedule(dt=date,ky=ky)
d = get_date()
if date != d:
date = d
break
#Main Functions for 'run_schedule()'
#Keeps track of who doesn't follow back. If an ID appears twice in this category then it
#unfollows. Bot follows ALL followers. Filter for undesirable accounts will be implemented soon.
def tit_for_tat():
print 'Tit for Tat!'
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
erros_ids = []
fol_len = len([1 for id_ in follow_me if id_ not in follow_you])
print 'Following '+str(fol_len)+' new users.'
for id_ in follow_me:
if id_ not in follow_you:
try:
twitter_client.create_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
unfollow,rem_len = remember_follow()
print 'Unfollowing '+str(len(unfollow))+'. Remembering '+str(rem_len)+'.'
for id_ in follow_you:
if id_ in unfollow:
try:
twitter_client.destroy_friendship(id_)
time.sleep(5)
except:
erros_ids.append(id_)
#Take previous day tweets. Rank by higher RT of smaller accounts. Try to RT the underdog!
def RT_last_day(date,key_='#indiedev'):
print 'RT '+str(key_)+' most relevant tweets from yesterday!'
d = latest_tweets(date=date,key_=key_)
d = rank_sort(d)
plot_distro(d[:5000])
a = RT_this(d[:10000],sleep_t=180)
return a
#Take timelines from followers and looks for keyword (default: #indiedev. RTs top tweets (default=2).
def RT_followers(key_='#indiedev',max_=150,rts_=2): #900/1500 Rate limit
print 'RT '+str(key_)+' within followers!'
clct,twtn = [],0
friends = twitter_client.followers_ids()
#Get collection of tweets of followers.
for f in friends:
c=[]
try:
c = twitter_client.user_timeline(f,count=100)
except:
#print 'Cant retweet follower.'
pass
tcl = [ci for ci in c if '#indiedev' in ci.text and ci.in_reply_to_status_id == None]
ttcl = []
for t in tcl:
keep = True
for word in banwords:
if word in t.entities['hashtags']:
keep = False
if keep == True:
ttcl.append(t)
tcl = ttcl
if len(tcl) > 0:
dc = {str(i.id):int(i.retweet_count) for i in tcl}
dfc = pd.DataFrame.from_dict(dc,orient='index')
dfc = dfc.sort(0,ascending=False)
#Final collection of RTs considers only top statuses
clct = clct + list(dfc.index[:rts_])
#After selection of most desirable RTs, we randomly RT them.
rd.shuffle(clct)
print 'Going for '+str(len(clct[:max_]))+' tweets.'
for id_ in clct:
if twtn >= max_: break
try:
twtn+=1
twitter_client.retweet(id_)
print 'Tweeted '+str(twtn)
time.sleep(120)
twitter_client.create_favorite(id_)
except:
pass
#RT statuses from a given DataFrame d.
def RT_this(d,sleep_t=60,stop_at=500,allow_like=False,checkmedia=False):
err,twts,iters=0,0,0
for tweet in d.values:
if twts == stop_at:
print 'Got '+str(stop_at)+' tweets. Stopping.'
break
try:
like,publish,i = True,True,0
if type(tweet[11]) != None:
try:
if tweet[11] in banRT: like,publish = False,False
except:
pass
i=1
if len(tweet[27]) > 14:
if tweet[27][:15] in banTXT: like,publish = False,False
i=2
like = False
if like and rd.random() < 0.05:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
if publish == False:
time.sleep(60)
i=3
if checkmedia and publish:
publish = filter_gif(tweet) | i,twts=4,twts+1
print 'RTed : '+str(twts)+' at '+str(time.ctime())
time.sleep(sleep_t)
i=5
if type(tweet[11]) != None:
banRT.append(tweet[11])
if len(tweet[27]) > 14:
banTXT.append(tweet[27][:15])
except:
pass
try:
u_fol,u_fri=tweet[29]['followers_count'],tweet[29]['friends_count']
if (u_fol > 500 and u_fol < 10000) or (u_fol > 1.5*u_fri):
if i==4:
time.sleep(sleep_t)
i=6
twitter_client.create_friendship(tweet[29]['id'])
except:
pass
if like and allow_like:
try:
twitter_client.create_favorite(tweet[8])
print 'Liked '+tweet[27]
time.sleep(sleep_t/3)
except:
print 'Couldnt like'
save_banDF()
iters+=1
except:
err+=1
#StreamListener: DoggoListener is not currently on use as RTs of previous day and followers
#use most of the bot's daytime.
#List of words to avoid and possible tweet lexic
banword = ["porn","pron","p0rn","pr0n"]
doggolex = ['*doggehyped*']
class DoggoListener(StreamListener):
def on_data(self, data):
tweet = json.loads(data)
i,like,publish = 0,True,True
try:
for word in banword:
if word in tweet['text'].lower(): like,publish = False,False
i=1
if tweet.get('lang') and tweet.get('lang') != 'en': like,publish = False,False
i=2
try:
if type(tweet['user']['description']) != None:
if 'indie' not in tweet['user']['description'] or 'dev' not in tweet['user']['description'] or 'developer' not in tweet['user']['description']:
like = False
if tweet['user']['followers_count'] < 1000: publish = False
else:
like,publish = False,False
except:
like,publish = False,False
i=3
if type(tweet['in_reply_to_status_id']) != None:
if tweet['in_reply_to_status_id'] in banRT:
like,publish = False,False
i=4
if len(tweet['text']) > 14:
if tweet['text'][:15] in banTXT:
like,publish = False,False
i=5
if like:
twitter_client.create_favorite(tweet['id'])
print 'Liked '+tweet['text']
if publish == False:
time.sleep(10)
i=6
if publish:
twitter_client.retweet(tweet['id'])
#Some console output to check if stream is RTweeting.
try:
print 'RTd: '+str(tweet['text'])
except:
print '*Woof*'
i='t'
if type(tweet['in_reply_to_status_id']) != None:
i=7
banRT.append(tweet['in_reply_to_status_id'])
if len(tweet['text']) > 14:
i=8
banTXT.append(tweet['text'][:15])
save_banDF()
time.sleep(60)
except:
print i #For debugging purposes
return True
def run_doggo_run(): #Streams the doggolistener()
if __name__ == '__main__':
listener = DoggoListener()
stream = Stream(auth_handler, listener)
stream.filter(track=['indie game', 'indie games','gamedev','game dev','#indiedev',
'#gamedev','#indiegame','#steamNewRelease','#nintendoswitch'])
#From here you will find other useful functions that support Run_schedule and Run_doggo_run.
def save_banDF():
df = pd.Series(banRT)
df.to_pickle('ban_RT')
df = pd.Series(banTXT)
df.to_pickle('ban_TXT')
#Based on sixohsix
def latest_tweets(date=get_date(),key_="#indiedev #indiegame #gamedev"):
print "Search for "+str(date)
tweets = t.search.tweets(q=key_,count=5000,until=date)
dftwt = pd.DataFrame(tweets['statuses'])
while True:
try:
tweets = t.search.tweets(q=key_,max_id=min(list(dftwt.id)),count=5000,until=date)
tempdf = pd.DataFrame(tweets['statuses'])
dftwt = dftwt.append(tempdf)
except:
break
date = str(list(dftwt.created_at)[0][:3])
print "Check for only TODAYS ONLY!!!"
dftwt = only_with_date(date,dftwt)
return dftwt
def only_with_date(str_,df):
valid = []
for date in df.created_at:
if str_ in date[:3]:
valid.append(1)
else:
valid.append(0)
df['valid'] = valid
return df[df.valid == 1]
def plot_distro(d):
for column in ['favorite_count','retweet_count']:
t = list(d[column])
t.sort()
plt.title(column)
plt.plot(t)
plt.show()
a = [i[29]['followers_count'] for i in d.values]
a.sort()
plt.title('followers_count')
plt.plot(a)
plt.show()
def rank_sort(d):
d['follow'] = [i[29]['followers_count'] for i in d.values]
da = d[d.follow > 100]
a = [float(i[23])/float(i[29]['followers_count']) for i in da.values]
da['rank'] = list(a)
dd = da.sort_values(by='rank',ascending=False)
#ban,keep=[],[]
#for i in d.values:
# if i[27][:15] in ban:
# keep.append(0)
# else:
# keep.append(1)
# ban.append(i[27][:15])
#d['ktxt'] = keep
#d = d[d.ktxt == 1]
return dd
def remember_follow():
try:
rem = list(pd.read_pickle('remember'))
except:
rem = []
follow_me = twitter_client.followers_ids() #who follows me
follow_you = twitter_client.friends_ids() #who do I follow
remember = [id_ for id_ in follow_you if id_ not in follow_me and id_ not in rem]
unfollow = [id_ for id_ in follow_you if id_ not in follow_me and id_ in rem ]
rdf = pd.Series(remember)
rdf.to_pickle('remember')
return unfollow,len(rdf)
def unfollow_this(ids_):
for id_ in ids_:
twitter_client.destroy_friendship(id_)
time.sleep(10)
def filter_gif(tweet,v=False):
try:
if len(tweet[4]['media']) > 0: v=True
except:
pass
return v | if publish:
try:
twitter_client.retweet(tweet[8]) | random_line_split |
calibrate_gripper_g1r1.py | #Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_r1 import *
import m3.actuator_ec_pb2 as aec
# ####################################################################################################
config_default_g1_j0={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
config_default_g1_j1={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider=3V3
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
class M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):
def __init__(self):
M3CalibrateActuatorEcR1.__init__(self)
self.joint_names=['Left Digit J0',
'Right Digit J1']
self.config_default=[
config_default_g1_j0,
config_default_g1_j1]
def start(self,ctype):
if not M3CalibrateActuatorEcR1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
self.param_internal=self.config_default[self.jid]['param_internal']
print 'Calibrating joint',self.joint_names[self.jid]
return True
def do_task(self,ct):
if ct=='ch':
self.reset_sensor('torque')
self.calibrate_torque()
self.write_config()
return True
if ct=='tt':
self.reset_sensor('theta')
self.calibrate_theta()
self.write_config()
return True
if M3CalibrateActuatorEc.do_task(self,ct):
return True
return False
def print_tasks(self):
M3CalibrateActuatorEcR1.print_tasks(self)
print 'ch: calibrate torque'
print 'tt: calibrate theta'
def display_sensors(self):
M3CalibrateActuatorEcR1.display_sensors(self) | print 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r
raw=self.comp_ec.status.adc_torque
c=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)
mN=c/self.comp_j.config['calib']['cb_drive_radius_m']
print 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw
def calibrate_torque(self):
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Place digit in zero load condition'
print 'Hit enter when ready'
raw_input()
self.step()
raw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_a=0
print 'Hang 1Kg weight from gripper near slider'
print 'Hit enter to move joint in first direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
print 'Was load in the opening direction [y]?'
if m3t.get_yes_no('y'):
load_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
else:
load_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
print 'Hit enter to move joint in second direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_c=-1*load_b
log_adc_torque=[raw_a,raw_b,raw_c]
log_load_mNm=[load_a,load_b,load_c]
poly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)
self.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,
'cb_torque':poly,'cb_inv_torque':inv_poly})
self.comp_rt.config['calib']['torque']['cb_torque']=poly
self.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly
print 'Poly',poly
s=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])
m3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',
y1name='load',y2name='raw')
def calibrate_theta(self):
pconfig=self.comp_ec.param.config #disable qei limits
self.comp_ec.param.config=0
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Moving joint to first limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_a=self.comp_ec.status.qei_on
q_p_a=self.comp_ec.status.qei_period
q_r_a=self.comp_ec.status.qei_rollover
print 'RawA',q_on_a
print 'Moving joint to second limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_b=self.comp_ec.status.qei_on
q_p_b=self.comp_ec.status.qei_period
q_r_b=self.comp_ec.status.qei_rollover
print 'Rawb',q_on_b
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'Did this last motion open the gripper [y]?' #At zero position
if m3t.get_yes_no('y'):
theta_b=0
theta_a=abs(theta_bs-theta_as)
else:
theta_a=0
theta_b=abs(theta_bs-theta_as)
self.comp_rt.set_mode_off()
self.comp_ec.param.config=pconfig #enable qei limits
self.step()
self.proxy.make_safe_operational(self.name_rt)
self.step()
print 'Raw',[theta_as,theta_bs]
print 'True',[theta_a,theta_b]
poly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)
self.comp_rt.config['calib']['theta']['cb_scale']=poly[0]
self.comp_rt.config['calib']['theta']['cb_bias']=poly[1]
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'New calibrated range',theta_as,theta_bs
max_q=max(theta_as,theta_bs)
min_q=min(theta_as,theta_bs)
if self.comp_j is not None:
print 'Setting joint limits to',min_q,max_q
print 'Expected joint limits of',self.param_internal['joint_limits']
self.comp_j.param.max_q=float(max_q)
self.comp_j.param.min_q=float(min_q)
else:
print 'Joint component missing. Unable to set joint limits to',min_q,max_q
#Assume 0-Ndeg, where N is defined by the encoder soft limits
self.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100
self.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100
self.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100
self.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100
print 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max'] | q_on=self.comp_ec.status.qei_on
q_p=self.comp_ec.status.qei_period
q_r=self.comp_ec.status.qei_rollover
c=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)
pos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0 | random_line_split |
calibrate_gripper_g1r1.py | #Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_r1 import *
import m3.actuator_ec_pb2 as aec
# ####################################################################################################
config_default_g1_j0={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
config_default_g1_j1={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider=3V3
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
class M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):
def __init__(self):
M3CalibrateActuatorEcR1.__init__(self)
self.joint_names=['Left Digit J0',
'Right Digit J1']
self.config_default=[
config_default_g1_j0,
config_default_g1_j1]
def start(self,ctype):
if not M3CalibrateActuatorEcR1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
self.param_internal=self.config_default[self.jid]['param_internal']
print 'Calibrating joint',self.joint_names[self.jid]
return True
def do_task(self,ct):
if ct=='ch':
|
if ct=='tt':
self.reset_sensor('theta')
self.calibrate_theta()
self.write_config()
return True
if M3CalibrateActuatorEc.do_task(self,ct):
return True
return False
def print_tasks(self):
M3CalibrateActuatorEcR1.print_tasks(self)
print 'ch: calibrate torque'
print 'tt: calibrate theta'
def display_sensors(self):
M3CalibrateActuatorEcR1.display_sensors(self)
q_on=self.comp_ec.status.qei_on
q_p=self.comp_ec.status.qei_period
q_r=self.comp_ec.status.qei_rollover
c=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)
pos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0
print 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r
raw=self.comp_ec.status.adc_torque
c=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)
mN=c/self.comp_j.config['calib']['cb_drive_radius_m']
print 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw
def calibrate_torque(self):
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Place digit in zero load condition'
print 'Hit enter when ready'
raw_input()
self.step()
raw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_a=0
print 'Hang 1Kg weight from gripper near slider'
print 'Hit enter to move joint in first direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
print 'Was load in the opening direction [y]?'
if m3t.get_yes_no('y'):
load_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
else:
load_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
print 'Hit enter to move joint in second direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_c=-1*load_b
log_adc_torque=[raw_a,raw_b,raw_c]
log_load_mNm=[load_a,load_b,load_c]
poly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)
self.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,
'cb_torque':poly,'cb_inv_torque':inv_poly})
self.comp_rt.config['calib']['torque']['cb_torque']=poly
self.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly
print 'Poly',poly
s=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])
m3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',
y1name='load',y2name='raw')
def calibrate_theta(self):
pconfig=self.comp_ec.param.config #disable qei limits
self.comp_ec.param.config=0
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Moving joint to first limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_a=self.comp_ec.status.qei_on
q_p_a=self.comp_ec.status.qei_period
q_r_a=self.comp_ec.status.qei_rollover
print 'RawA',q_on_a
print 'Moving joint to second limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_b=self.comp_ec.status.qei_on
q_p_b=self.comp_ec.status.qei_period
q_r_b=self.comp_ec.status.qei_rollover
print 'Rawb',q_on_b
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'Did this last motion open the gripper [y]?' #At zero position
if m3t.get_yes_no('y'):
theta_b=0
theta_a=abs(theta_bs-theta_as)
else:
theta_a=0
theta_b=abs(theta_bs-theta_as)
self.comp_rt.set_mode_off()
self.comp_ec.param.config=pconfig #enable qei limits
self.step()
self.proxy.make_safe_operational(self.name_rt)
self.step()
print 'Raw',[theta_as,theta_bs]
print 'True',[theta_a,theta_b]
poly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)
self.comp_rt.config['calib']['theta']['cb_scale']=poly[0]
self.comp_rt.config['calib']['theta']['cb_bias']=poly[1]
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'New calibrated range',theta_as,theta_bs
max_q=max(theta_as,theta_bs)
min_q=min(theta_as,theta_bs)
if self.comp_j is not None:
print 'Setting joint limits to',min_q,max_q
print 'Expected joint limits of',self.param_internal['joint_limits']
self.comp_j.param.max_q=float(max_q)
self.comp_j.param.min_q=float(min_q)
else:
print 'Joint component missing. Unable to set joint limits to',min_q,max_q
#Assume 0-Ndeg, where N is defined by the encoder soft limits
self.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100
self.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100
self.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100
self.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100
print 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max']
| self.reset_sensor('torque')
self.calibrate_torque()
self.write_config()
return True | conditional_block |
calibrate_gripper_g1r1.py | #Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_r1 import *
import m3.actuator_ec_pb2 as aec
# ####################################################################################################
config_default_g1_j0={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
config_default_g1_j1={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider=3V3
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
class M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):
def __init__(self):
M3CalibrateActuatorEcR1.__init__(self)
self.joint_names=['Left Digit J0',
'Right Digit J1']
self.config_default=[
config_default_g1_j0,
config_default_g1_j1]
def start(self,ctype):
if not M3CalibrateActuatorEcR1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
self.param_internal=self.config_default[self.jid]['param_internal']
print 'Calibrating joint',self.joint_names[self.jid]
return True
def do_task(self,ct):
|
def print_tasks(self):
M3CalibrateActuatorEcR1.print_tasks(self)
print 'ch: calibrate torque'
print 'tt: calibrate theta'
def display_sensors(self):
M3CalibrateActuatorEcR1.display_sensors(self)
q_on=self.comp_ec.status.qei_on
q_p=self.comp_ec.status.qei_period
q_r=self.comp_ec.status.qei_rollover
c=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)
pos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0
print 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r
raw=self.comp_ec.status.adc_torque
c=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)
mN=c/self.comp_j.config['calib']['cb_drive_radius_m']
print 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw
def calibrate_torque(self):
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Place digit in zero load condition'
print 'Hit enter when ready'
raw_input()
self.step()
raw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_a=0
print 'Hang 1Kg weight from gripper near slider'
print 'Hit enter to move joint in first direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
print 'Was load in the opening direction [y]?'
if m3t.get_yes_no('y'):
load_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
else:
load_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
print 'Hit enter to move joint in second direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_c=-1*load_b
log_adc_torque=[raw_a,raw_b,raw_c]
log_load_mNm=[load_a,load_b,load_c]
poly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)
self.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,
'cb_torque':poly,'cb_inv_torque':inv_poly})
self.comp_rt.config['calib']['torque']['cb_torque']=poly
self.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly
print 'Poly',poly
s=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])
m3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',
y1name='load',y2name='raw')
def calibrate_theta(self):
pconfig=self.comp_ec.param.config #disable qei limits
self.comp_ec.param.config=0
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Moving joint to first limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_a=self.comp_ec.status.qei_on
q_p_a=self.comp_ec.status.qei_period
q_r_a=self.comp_ec.status.qei_rollover
print 'RawA',q_on_a
print 'Moving joint to second limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_b=self.comp_ec.status.qei_on
q_p_b=self.comp_ec.status.qei_period
q_r_b=self.comp_ec.status.qei_rollover
print 'Rawb',q_on_b
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'Did this last motion open the gripper [y]?' #At zero position
if m3t.get_yes_no('y'):
theta_b=0
theta_a=abs(theta_bs-theta_as)
else:
theta_a=0
theta_b=abs(theta_bs-theta_as)
self.comp_rt.set_mode_off()
self.comp_ec.param.config=pconfig #enable qei limits
self.step()
self.proxy.make_safe_operational(self.name_rt)
self.step()
print 'Raw',[theta_as,theta_bs]
print 'True',[theta_a,theta_b]
poly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)
self.comp_rt.config['calib']['theta']['cb_scale']=poly[0]
self.comp_rt.config['calib']['theta']['cb_bias']=poly[1]
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'New calibrated range',theta_as,theta_bs
max_q=max(theta_as,theta_bs)
min_q=min(theta_as,theta_bs)
if self.comp_j is not None:
print 'Setting joint limits to',min_q,max_q
print 'Expected joint limits of',self.param_internal['joint_limits']
self.comp_j.param.max_q=float(max_q)
self.comp_j.param.min_q=float(min_q)
else:
print 'Joint component missing. Unable to set joint limits to',min_q,max_q
#Assume 0-Ndeg, where N is defined by the encoder soft limits
self.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100
self.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100
self.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100
self.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100
print 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max']
| if ct=='ch':
self.reset_sensor('torque')
self.calibrate_torque()
self.write_config()
return True
if ct=='tt':
self.reset_sensor('theta')
self.calibrate_theta()
self.write_config()
return True
if M3CalibrateActuatorEc.do_task(self,ct):
return True
return False | identifier_body |
calibrate_gripper_g1r1.py | #Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_r1 import *
import m3.actuator_ec_pb2 as aec
# ####################################################################################################
config_default_g1_j0={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
config_default_g1_j1={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider=3V3
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
class M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):
def __init__(self):
M3CalibrateActuatorEcR1.__init__(self)
self.joint_names=['Left Digit J0',
'Right Digit J1']
self.config_default=[
config_default_g1_j0,
config_default_g1_j1]
def start(self,ctype):
if not M3CalibrateActuatorEcR1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
self.param_internal=self.config_default[self.jid]['param_internal']
print 'Calibrating joint',self.joint_names[self.jid]
return True
def | (self,ct):
if ct=='ch':
self.reset_sensor('torque')
self.calibrate_torque()
self.write_config()
return True
if ct=='tt':
self.reset_sensor('theta')
self.calibrate_theta()
self.write_config()
return True
if M3CalibrateActuatorEc.do_task(self,ct):
return True
return False
def print_tasks(self):
M3CalibrateActuatorEcR1.print_tasks(self)
print 'ch: calibrate torque'
print 'tt: calibrate theta'
def display_sensors(self):
M3CalibrateActuatorEcR1.display_sensors(self)
q_on=self.comp_ec.status.qei_on
q_p=self.comp_ec.status.qei_period
q_r=self.comp_ec.status.qei_rollover
c=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)
pos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0
print 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r
raw=self.comp_ec.status.adc_torque
c=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)
mN=c/self.comp_j.config['calib']['cb_drive_radius_m']
print 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw
def calibrate_torque(self):
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Place digit in zero load condition'
print 'Hit enter when ready'
raw_input()
self.step()
raw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_a=0
print 'Hang 1Kg weight from gripper near slider'
print 'Hit enter to move joint in first direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
print 'Was load in the opening direction [y]?'
if m3t.get_yes_no('y'):
load_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
else:
load_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
print 'Hit enter to move joint in second direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_c=-1*load_b
log_adc_torque=[raw_a,raw_b,raw_c]
log_load_mNm=[load_a,load_b,load_c]
poly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)
self.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,
'cb_torque':poly,'cb_inv_torque':inv_poly})
self.comp_rt.config['calib']['torque']['cb_torque']=poly
self.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly
print 'Poly',poly
s=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])
m3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',
y1name='load',y2name='raw')
def calibrate_theta(self):
pconfig=self.comp_ec.param.config #disable qei limits
self.comp_ec.param.config=0
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Moving joint to first limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_a=self.comp_ec.status.qei_on
q_p_a=self.comp_ec.status.qei_period
q_r_a=self.comp_ec.status.qei_rollover
print 'RawA',q_on_a
print 'Moving joint to second limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_b=self.comp_ec.status.qei_on
q_p_b=self.comp_ec.status.qei_period
q_r_b=self.comp_ec.status.qei_rollover
print 'Rawb',q_on_b
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'Did this last motion open the gripper [y]?' #At zero position
if m3t.get_yes_no('y'):
theta_b=0
theta_a=abs(theta_bs-theta_as)
else:
theta_a=0
theta_b=abs(theta_bs-theta_as)
self.comp_rt.set_mode_off()
self.comp_ec.param.config=pconfig #enable qei limits
self.step()
self.proxy.make_safe_operational(self.name_rt)
self.step()
print 'Raw',[theta_as,theta_bs]
print 'True',[theta_a,theta_b]
poly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)
self.comp_rt.config['calib']['theta']['cb_scale']=poly[0]
self.comp_rt.config['calib']['theta']['cb_bias']=poly[1]
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'New calibrated range',theta_as,theta_bs
max_q=max(theta_as,theta_bs)
min_q=min(theta_as,theta_bs)
if self.comp_j is not None:
print 'Setting joint limits to',min_q,max_q
print 'Expected joint limits of',self.param_internal['joint_limits']
self.comp_j.param.max_q=float(max_q)
self.comp_j.param.min_q=float(min_q)
else:
print 'Joint component missing. Unable to set joint limits to',min_q,max_q
#Assume 0-Ndeg, where N is defined by the encoder soft limits
self.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100
self.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100
self.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100
self.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100
print 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max']
| do_task | identifier_name |
getfood_base.py | import cv2
from enum import IntEnum
from rlkit.envs.gym_minigrid.gym_minigrid.register import register
from gym import spaces
import numpy as np
from collections import defaultdict
from rlkit.envs.gym_minigrid.gym_minigrid.minigrid_absolute import MiniGridAbsoluteEnv, Food, GridAbsolute, CELL_PIXELS
class FoodEnvBase(MiniGridAbsoluteEnv):
class Actions(IntEnum):
# Absolute directions
west = 0
east = 1
north = 2
south = 3
mine = 4
def __init__(self,
agent_start_pos=(1, 1),
health_cap=50,
food_rate=4,
grid_size=8,
obs_vision=False,
reward_type='delta',
fully_observed=False,
only_partial_obs=False,
can_die=True,
one_hot_obs=True,
mixing_time_periods=[],
mixing_time_period_length=120,
**kwargs
):
self.agent_start_pos = agent_start_pos
# self.agent_start_dir = agent_start_dir
self.food_rate = food_rate
self.health_cap = health_cap
self.health = health_cap
self.last_health = self.health
self.obs_vision = obs_vision
self.reward_type = reward_type
self.fully_observed = fully_observed
self.only_partial_obs = only_partial_obs
self.can_die = can_die
self.one_hot_obs = one_hot_obs
# for conditional entropy of s' | s
self.transition_count = {}
self.prev_obs_string = ''
# for mixing time
self.mixing_time_periods = mixing_time_periods
self.max_mixing_time_period = max(mixing_time_periods) if mixing_time_periods else 0
self.mixing_time_period_length = mixing_time_period_length
self.obs_counts = []
if not hasattr(self, 'actions'):
self.actions = FoodEnvBase.Actions
super().__init__(
# Set this to True for maximum speed
see_through_walls=True,
grid_size=grid_size,
**kwargs
)
def _reward(self):
if self.reward_type == 'survival':
rwd = 1
elif self.reward_type == 'delta':
rwd = self.health - self.last_health
elif self.reward_type == 'health':
rwd = self.health
else:
assert False, "Reward type not matched"
self.last_health = self.health
return rwd
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = GridAbsolute(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
# self.grid.set(width - 2, height - 2, Goal())
self.extra_gen_grid()
# Place the agent
if self.agent_start_pos is not None:
self.start_pos = self.agent_start_pos
# self.start_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = None
def step(self, action, incl_health=True):
done = False
matched = super().step(action, override=True)
# subclass-defined extra actions. if not caught by that, then unknown action
if not self.extra_step(action, matched):
assert False, "unknown action %d" % action
# decrease health bar
self.decay_health()
# generate new food
self.place_items()
# generate obs after action is caught and food is placed. generate reward before death check
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(scale=1 if self.fully_observed else 1 / 8, onehot=self.one_hot_obs)
# NOTE: below not nec due to onehot being passed into two func calls above. but leaving here for now in case.
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
rwd = self._reward()
# tick on each grid item
to_remove = []
for j in range(0, self.grid.height):
for i in range(0, self.grid.width):
cell = self.grid.get(i, j)
if cell is not None:
if not cell.step():
self.dead_obj(i, j, cell)
to_remove.append((i, j))
for idxs in to_remove:
self.grid.set(*idxs, None)
# dead.
if self.dead():
done = True
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
|
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
obs_string = obs.tostring()
# transition count stuff
self.transition_count.setdefault(hash(self.prev_obs_string), {})
self.transition_count[hash(self.prev_obs_string)][hash(obs_string)] = 1 + self.transition_count[hash(self.prev_obs_string)].get(hash(obs_string), 0)
# mixing time stuff
if self.step_count % self.mixing_time_period_length == 0:
self.obs_counts.append(self.obs_count.copy())
if hasattr(self, 'obs_count') and self.mixing_time_periods and len(self.obs_counts) > self.max_mixing_time_period:
self.obs_counts = self.obs_counts[-(self.max_mixing_time_period+1):]
self.prev_obs_string = obs_string
return obs, rwd, done, {}
def reset(self, incl_health=True):
super().reset()
self.health = self.health_cap
self.extra_reset()
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(onehot=self.one_hot_obs)
self.transition_count = {}
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
self.prev_obs_string = obs.tostring()
return obs
def get_full_img(self, scale=1 / 8, onehot=False):
""" Return the whole grid view """
if self.obs_vision:
full_img = self.get_full_obs_render(scale=scale)
else:
full_img = self.grid.encode(self, onehot=onehot)
# NOTE: in case need to scale here instead of in above func call: return cv2.resize(full_img, (0, 0), fx=0.125, fy=0.125, interpolation=cv2.INTER_AREA)
return full_img
def get_img(self, onehot=False):
""" Return the agent view """
if self.obs_vision:
img = self.gen_obs(onehot=False)
img = self.get_obs_render(img, CELL_PIXELS // 4)
else:
img = self.gen_obs(onehot=onehot)
return img
def extra_step(self, action, matched):
return matched
def extra_reset(self):
pass
def place_items(self):
pass
def extra_gen_grid(self):
pass
def place_prob(self, obj, prob, top=None, size=None):
if np.random.binomial(1, prob):
pos = self.place_obj(obj, top, size)
obj.cur_pos = pos
return True
return False
def decay_health(self):
self.add_health(-1)
def add_health(self, num):
# clip health between 0 and cap after adjustment
self.health = max(0, min(self.health_cap, self.health + num))
def count_type(self, type):
count = 0
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
if type is None and cell is None or cell is not None and cell.type == type:
count += 1
return count
def count_all_types(self):
counts = {}
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
type = cell.type if cell is not None else ''
counts[type] = counts.get(type, 0) + 1
if hasattr(self, 'monsters'):
counts['monster'] = len(self.monsters)
return counts
def exists_type(self, type):
""" Check if object of type TYPE exists in current grid. """
for i in range(1, self.grid_size - 1):
for j in range(1, self.grid_size - 1):
obj = self.grid.get(i, j)
if obj and obj.type == type:
return True
return False
def dead(self):
return self.can_die and self.health <= 0
def dead_obj(self, i, j, obj):
""" Called when OBJ dies at position (i, j). """
pass
def __getstate__(self):
d = self.__dict__.copy()
del d['grid_render']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class FoodEnvEmptyFullObs(FoodEnvBase):
def __init__(self):
super().__init__(fully_observed=True)
def decay_health(self):
pass
register(
id='MiniGrid-Food-8x8-Empty-FullObs-v1',
entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEmptyFullObs'
)
| obs = np.concatenate((img.flatten(), np.array([self.health]))) | conditional_block |
getfood_base.py | import cv2
from enum import IntEnum
from rlkit.envs.gym_minigrid.gym_minigrid.register import register
from gym import spaces
import numpy as np
from collections import defaultdict
from rlkit.envs.gym_minigrid.gym_minigrid.minigrid_absolute import MiniGridAbsoluteEnv, Food, GridAbsolute, CELL_PIXELS
class FoodEnvBase(MiniGridAbsoluteEnv):
class Actions(IntEnum):
# Absolute directions
west = 0
east = 1
north = 2
south = 3
mine = 4
def __init__(self,
agent_start_pos=(1, 1),
health_cap=50,
food_rate=4,
grid_size=8,
obs_vision=False,
reward_type='delta',
fully_observed=False,
only_partial_obs=False,
can_die=True,
one_hot_obs=True,
mixing_time_periods=[],
mixing_time_period_length=120,
**kwargs
):
self.agent_start_pos = agent_start_pos
# self.agent_start_dir = agent_start_dir
self.food_rate = food_rate
self.health_cap = health_cap
self.health = health_cap
self.last_health = self.health
self.obs_vision = obs_vision
self.reward_type = reward_type
self.fully_observed = fully_observed
self.only_partial_obs = only_partial_obs
self.can_die = can_die
self.one_hot_obs = one_hot_obs
# for conditional entropy of s' | s
self.transition_count = {}
self.prev_obs_string = ''
# for mixing time
self.mixing_time_periods = mixing_time_periods
self.max_mixing_time_period = max(mixing_time_periods) if mixing_time_periods else 0
self.mixing_time_period_length = mixing_time_period_length
self.obs_counts = []
if not hasattr(self, 'actions'):
self.actions = FoodEnvBase.Actions
super().__init__(
# Set this to True for maximum speed
see_through_walls=True,
grid_size=grid_size,
**kwargs
)
def _reward(self):
if self.reward_type == 'survival':
rwd = 1
elif self.reward_type == 'delta':
rwd = self.health - self.last_health
elif self.reward_type == 'health':
rwd = self.health
else:
assert False, "Reward type not matched"
self.last_health = self.health
return rwd
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = GridAbsolute(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
# self.grid.set(width - 2, height - 2, Goal())
self.extra_gen_grid()
# Place the agent
if self.agent_start_pos is not None:
self.start_pos = self.agent_start_pos
# self.start_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = None
def step(self, action, incl_health=True):
done = False
matched = super().step(action, override=True)
# subclass-defined extra actions. if not caught by that, then unknown action
if not self.extra_step(action, matched):
assert False, "unknown action %d" % action
# decrease health bar
self.decay_health()
# generate new food
self.place_items()
# generate obs after action is caught and food is placed. generate reward before death check
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(scale=1 if self.fully_observed else 1 / 8, onehot=self.one_hot_obs)
# NOTE: below not nec due to onehot being passed into two func calls above. but leaving here for now in case.
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
rwd = self._reward()
# tick on each grid item
to_remove = []
for j in range(0, self.grid.height):
for i in range(0, self.grid.width):
cell = self.grid.get(i, j)
if cell is not None:
if not cell.step():
self.dead_obj(i, j, cell)
to_remove.append((i, j))
for idxs in to_remove:
self.grid.set(*idxs, None)
# dead.
if self.dead():
done = True
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
obs_string = obs.tostring()
# transition count stuff
self.transition_count.setdefault(hash(self.prev_obs_string), {})
self.transition_count[hash(self.prev_obs_string)][hash(obs_string)] = 1 + self.transition_count[hash(self.prev_obs_string)].get(hash(obs_string), 0)
# mixing time stuff
if self.step_count % self.mixing_time_period_length == 0:
self.obs_counts.append(self.obs_count.copy())
if hasattr(self, 'obs_count') and self.mixing_time_periods and len(self.obs_counts) > self.max_mixing_time_period:
self.obs_counts = self.obs_counts[-(self.max_mixing_time_period+1):]
self.prev_obs_string = obs_string
return obs, rwd, done, {}
def reset(self, incl_health=True):
super().reset()
self.health = self.health_cap
self.extra_reset()
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(onehot=self.one_hot_obs)
self.transition_count = {}
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
self.prev_obs_string = obs.tostring()
return obs
def get_full_img(self, scale=1 / 8, onehot=False):
""" Return the whole grid view """
if self.obs_vision:
full_img = self.get_full_obs_render(scale=scale)
else:
full_img = self.grid.encode(self, onehot=onehot)
# NOTE: in case need to scale here instead of in above func call: return cv2.resize(full_img, (0, 0), fx=0.125, fy=0.125, interpolation=cv2.INTER_AREA)
return full_img
def get_img(self, onehot=False):
""" Return the agent view """
if self.obs_vision:
img = self.gen_obs(onehot=False)
img = self.get_obs_render(img, CELL_PIXELS // 4)
else:
img = self.gen_obs(onehot=onehot)
return img
def extra_step(self, action, matched):
return matched
def extra_reset(self):
pass
def place_items(self):
pass
def extra_gen_grid(self):
pass
def place_prob(self, obj, prob, top=None, size=None):
if np.random.binomial(1, prob):
pos = self.place_obj(obj, top, size)
obj.cur_pos = pos
return True
return False
def decay_health(self):
self.add_health(-1)
def add_health(self, num):
# clip health between 0 and cap after adjustment
self.health = max(0, min(self.health_cap, self.health + num))
def count_type(self, type):
count = 0
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
if type is None and cell is None or cell is not None and cell.type == type:
count += 1
return count
def count_all_types(self):
counts = {}
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
type = cell.type if cell is not None else ''
counts[type] = counts.get(type, 0) + 1
if hasattr(self, 'monsters'):
counts['monster'] = len(self.monsters)
return counts
def exists_type(self, type):
""" Check if object of type TYPE exists in current grid. """
for i in range(1, self.grid_size - 1):
for j in range(1, self.grid_size - 1):
obj = self.grid.get(i, j)
if obj and obj.type == type:
return True
return False
def dead(self):
return self.can_die and self.health <= 0
def dead_obj(self, i, j, obj):
""" Called when OBJ dies at position (i, j). """
pass
def __getstate__(self):
d = self.__dict__.copy()
del d['grid_render']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class | (FoodEnvBase):
def __init__(self):
super().__init__(fully_observed=True)
def decay_health(self):
pass
register(
id='MiniGrid-Food-8x8-Empty-FullObs-v1',
entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEmptyFullObs'
)
| FoodEnvEmptyFullObs | identifier_name |
getfood_base.py | import cv2
from enum import IntEnum
from rlkit.envs.gym_minigrid.gym_minigrid.register import register
from gym import spaces
import numpy as np
from collections import defaultdict
from rlkit.envs.gym_minigrid.gym_minigrid.minigrid_absolute import MiniGridAbsoluteEnv, Food, GridAbsolute, CELL_PIXELS
class FoodEnvBase(MiniGridAbsoluteEnv):
class Actions(IntEnum):
# Absolute directions
west = 0
east = 1
north = 2
south = 3
mine = 4
def __init__(self,
agent_start_pos=(1, 1),
health_cap=50,
food_rate=4,
grid_size=8,
obs_vision=False,
reward_type='delta',
fully_observed=False,
only_partial_obs=False,
can_die=True,
one_hot_obs=True,
mixing_time_periods=[],
mixing_time_period_length=120,
**kwargs
):
self.agent_start_pos = agent_start_pos
# self.agent_start_dir = agent_start_dir
self.food_rate = food_rate
self.health_cap = health_cap
self.health = health_cap
self.last_health = self.health
self.obs_vision = obs_vision
self.reward_type = reward_type
self.fully_observed = fully_observed
self.only_partial_obs = only_partial_obs
self.can_die = can_die
self.one_hot_obs = one_hot_obs
# for conditional entropy of s' | s
self.transition_count = {}
self.prev_obs_string = ''
# for mixing time
self.mixing_time_periods = mixing_time_periods
self.max_mixing_time_period = max(mixing_time_periods) if mixing_time_periods else 0
self.mixing_time_period_length = mixing_time_period_length
self.obs_counts = []
if not hasattr(self, 'actions'):
self.actions = FoodEnvBase.Actions
super().__init__(
# Set this to True for maximum speed
see_through_walls=True,
grid_size=grid_size,
**kwargs
)
def _reward(self):
if self.reward_type == 'survival':
rwd = 1
elif self.reward_type == 'delta':
rwd = self.health - self.last_health
elif self.reward_type == 'health':
rwd = self.health
else:
assert False, "Reward type not matched"
self.last_health = self.health
return rwd
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = GridAbsolute(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
# self.grid.set(width - 2, height - 2, Goal())
self.extra_gen_grid()
# Place the agent
if self.agent_start_pos is not None:
self.start_pos = self.agent_start_pos
# self.start_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = None
def step(self, action, incl_health=True):
done = False
matched = super().step(action, override=True)
# subclass-defined extra actions. if not caught by that, then unknown action
if not self.extra_step(action, matched):
assert False, "unknown action %d" % action
# decrease health bar
self.decay_health()
# generate new food
self.place_items()
# generate obs after action is caught and food is placed. generate reward before death check
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(scale=1 if self.fully_observed else 1 / 8, onehot=self.one_hot_obs)
# NOTE: below not nec due to onehot being passed into two func calls above. but leaving here for now in case.
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
rwd = self._reward()
# tick on each grid item
to_remove = []
for j in range(0, self.grid.height):
for i in range(0, self.grid.width):
cell = self.grid.get(i, j)
if cell is not None:
if not cell.step():
self.dead_obj(i, j, cell)
to_remove.append((i, j))
for idxs in to_remove:
self.grid.set(*idxs, None)
# dead.
if self.dead():
done = True
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
obs_string = obs.tostring()
# transition count stuff
self.transition_count.setdefault(hash(self.prev_obs_string), {})
self.transition_count[hash(self.prev_obs_string)][hash(obs_string)] = 1 + self.transition_count[hash(self.prev_obs_string)].get(hash(obs_string), 0)
# mixing time stuff
if self.step_count % self.mixing_time_period_length == 0:
self.obs_counts.append(self.obs_count.copy())
if hasattr(self, 'obs_count') and self.mixing_time_periods and len(self.obs_counts) > self.max_mixing_time_period:
self.obs_counts = self.obs_counts[-(self.max_mixing_time_period+1):]
self.prev_obs_string = obs_string
return obs, rwd, done, {}
def reset(self, incl_health=True):
super().reset()
self.health = self.health_cap
self.extra_reset()
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(onehot=self.one_hot_obs)
self.transition_count = {}
# if self.one_hot_obs: | if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
self.prev_obs_string = obs.tostring()
return obs
def get_full_img(self, scale=1 / 8, onehot=False):
""" Return the whole grid view """
if self.obs_vision:
full_img = self.get_full_obs_render(scale=scale)
else:
full_img = self.grid.encode(self, onehot=onehot)
# NOTE: in case need to scale here instead of in above func call: return cv2.resize(full_img, (0, 0), fx=0.125, fy=0.125, interpolation=cv2.INTER_AREA)
return full_img
def get_img(self, onehot=False):
""" Return the agent view """
if self.obs_vision:
img = self.gen_obs(onehot=False)
img = self.get_obs_render(img, CELL_PIXELS // 4)
else:
img = self.gen_obs(onehot=onehot)
return img
def extra_step(self, action, matched):
return matched
def extra_reset(self):
pass
def place_items(self):
pass
def extra_gen_grid(self):
pass
def place_prob(self, obj, prob, top=None, size=None):
if np.random.binomial(1, prob):
pos = self.place_obj(obj, top, size)
obj.cur_pos = pos
return True
return False
def decay_health(self):
self.add_health(-1)
def add_health(self, num):
# clip health between 0 and cap after adjustment
self.health = max(0, min(self.health_cap, self.health + num))
def count_type(self, type):
count = 0
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
if type is None and cell is None or cell is not None and cell.type == type:
count += 1
return count
def count_all_types(self):
counts = {}
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
type = cell.type if cell is not None else ''
counts[type] = counts.get(type, 0) + 1
if hasattr(self, 'monsters'):
counts['monster'] = len(self.monsters)
return counts
def exists_type(self, type):
""" Check if object of type TYPE exists in current grid. """
for i in range(1, self.grid_size - 1):
for j in range(1, self.grid_size - 1):
obj = self.grid.get(i, j)
if obj and obj.type == type:
return True
return False
def dead(self):
return self.can_die and self.health <= 0
def dead_obj(self, i, j, obj):
""" Called when OBJ dies at position (i, j). """
pass
def __getstate__(self):
d = self.__dict__.copy()
del d['grid_render']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class FoodEnvEmptyFullObs(FoodEnvBase):
def __init__(self):
super().__init__(fully_observed=True)
def decay_health(self):
pass
register(
id='MiniGrid-Food-8x8-Empty-FullObs-v1',
entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEmptyFullObs'
) | # # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]]) | random_line_split |
getfood_base.py | import cv2
from enum import IntEnum
from rlkit.envs.gym_minigrid.gym_minigrid.register import register
from gym import spaces
import numpy as np
from collections import defaultdict
from rlkit.envs.gym_minigrid.gym_minigrid.minigrid_absolute import MiniGridAbsoluteEnv, Food, GridAbsolute, CELL_PIXELS
class FoodEnvBase(MiniGridAbsoluteEnv):
class Actions(IntEnum):
# Absolute directions
west = 0
east = 1
north = 2
south = 3
mine = 4
def __init__(self,
agent_start_pos=(1, 1),
health_cap=50,
food_rate=4,
grid_size=8,
obs_vision=False,
reward_type='delta',
fully_observed=False,
only_partial_obs=False,
can_die=True,
one_hot_obs=True,
mixing_time_periods=[],
mixing_time_period_length=120,
**kwargs
):
self.agent_start_pos = agent_start_pos
# self.agent_start_dir = agent_start_dir
self.food_rate = food_rate
self.health_cap = health_cap
self.health = health_cap
self.last_health = self.health
self.obs_vision = obs_vision
self.reward_type = reward_type
self.fully_observed = fully_observed
self.only_partial_obs = only_partial_obs
self.can_die = can_die
self.one_hot_obs = one_hot_obs
# for conditional entropy of s' | s
self.transition_count = {}
self.prev_obs_string = ''
# for mixing time
self.mixing_time_periods = mixing_time_periods
self.max_mixing_time_period = max(mixing_time_periods) if mixing_time_periods else 0
self.mixing_time_period_length = mixing_time_period_length
self.obs_counts = []
if not hasattr(self, 'actions'):
self.actions = FoodEnvBase.Actions
super().__init__(
# Set this to True for maximum speed
see_through_walls=True,
grid_size=grid_size,
**kwargs
)
def _reward(self):
if self.reward_type == 'survival':
rwd = 1
elif self.reward_type == 'delta':
rwd = self.health - self.last_health
elif self.reward_type == 'health':
rwd = self.health
else:
assert False, "Reward type not matched"
self.last_health = self.health
return rwd
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = GridAbsolute(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
# self.grid.set(width - 2, height - 2, Goal())
self.extra_gen_grid()
# Place the agent
if self.agent_start_pos is not None:
self.start_pos = self.agent_start_pos
# self.start_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = None
def step(self, action, incl_health=True):
done = False
matched = super().step(action, override=True)
# subclass-defined extra actions. if not caught by that, then unknown action
if not self.extra_step(action, matched):
assert False, "unknown action %d" % action
# decrease health bar
self.decay_health()
# generate new food
self.place_items()
# generate obs after action is caught and food is placed. generate reward before death check
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(scale=1 if self.fully_observed else 1 / 8, onehot=self.one_hot_obs)
# NOTE: below not nec due to onehot being passed into two func calls above. but leaving here for now in case.
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
rwd = self._reward()
# tick on each grid item
to_remove = []
for j in range(0, self.grid.height):
for i in range(0, self.grid.width):
cell = self.grid.get(i, j)
if cell is not None:
if not cell.step():
self.dead_obj(i, j, cell)
to_remove.append((i, j))
for idxs in to_remove:
self.grid.set(*idxs, None)
# dead.
if self.dead():
done = True
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
obs_string = obs.tostring()
# transition count stuff
self.transition_count.setdefault(hash(self.prev_obs_string), {})
self.transition_count[hash(self.prev_obs_string)][hash(obs_string)] = 1 + self.transition_count[hash(self.prev_obs_string)].get(hash(obs_string), 0)
# mixing time stuff
if self.step_count % self.mixing_time_period_length == 0:
self.obs_counts.append(self.obs_count.copy())
if hasattr(self, 'obs_count') and self.mixing_time_periods and len(self.obs_counts) > self.max_mixing_time_period:
self.obs_counts = self.obs_counts[-(self.max_mixing_time_period+1):]
self.prev_obs_string = obs_string
return obs, rwd, done, {}
def reset(self, incl_health=True):
super().reset()
self.health = self.health_cap
self.extra_reset()
img = self.get_img(onehot=self.one_hot_obs)
full_img = self.get_full_img(onehot=self.one_hot_obs)
self.transition_count = {}
# if self.one_hot_obs:
# # ignore second channel since redundant (due to one-to-one mapping btwn color and obj type for now)
# img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in img[:1]])
# full_img = np.concatenate([np.eye(len(self.object_to_idx))[ch].transpose(2, 0, 1) for ch in full_img[:1]])
if self.fully_observed:
if incl_health:
obs = np.concatenate((full_img.flatten(), np.array([self.health]), np.array(self.agent_pos)))
else:
obs = np.concatenate((full_img.flatten(), np.array(self.agent_pos)))
elif self.only_partial_obs:
if incl_health:
obs = np.concatenate((img.flatten(), np.array([self.health])))
else:
obs = img.flatten()
else:
if incl_health:
obs = np.concatenate((img.flatten(), full_img.flatten(), np.array([self.health])))
else:
obs = np.concatenate((img.flatten(), full_img.flatten()))
self.prev_obs_string = obs.tostring()
return obs
def get_full_img(self, scale=1 / 8, onehot=False):
""" Return the whole grid view """
if self.obs_vision:
full_img = self.get_full_obs_render(scale=scale)
else:
full_img = self.grid.encode(self, onehot=onehot)
# NOTE: in case need to scale here instead of in above func call: return cv2.resize(full_img, (0, 0), fx=0.125, fy=0.125, interpolation=cv2.INTER_AREA)
return full_img
def get_img(self, onehot=False):
""" Return the agent view """
if self.obs_vision:
img = self.gen_obs(onehot=False)
img = self.get_obs_render(img, CELL_PIXELS // 4)
else:
img = self.gen_obs(onehot=onehot)
return img
def extra_step(self, action, matched):
return matched
def extra_reset(self):
|
def place_items(self):
pass
def extra_gen_grid(self):
pass
def place_prob(self, obj, prob, top=None, size=None):
if np.random.binomial(1, prob):
pos = self.place_obj(obj, top, size)
obj.cur_pos = pos
return True
return False
def decay_health(self):
self.add_health(-1)
def add_health(self, num):
# clip health between 0 and cap after adjustment
self.health = max(0, min(self.health_cap, self.health + num))
def count_type(self, type):
count = 0
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
if type is None and cell is None or cell is not None and cell.type == type:
count += 1
return count
def count_all_types(self):
counts = {}
for i in range(self.grid_size):
for j in range(self.grid_size):
cell = self.grid.get(i, j)
type = cell.type if cell is not None else ''
counts[type] = counts.get(type, 0) + 1
if hasattr(self, 'monsters'):
counts['monster'] = len(self.monsters)
return counts
def exists_type(self, type):
""" Check if object of type TYPE exists in current grid. """
for i in range(1, self.grid_size - 1):
for j in range(1, self.grid_size - 1):
obj = self.grid.get(i, j)
if obj and obj.type == type:
return True
return False
def dead(self):
return self.can_die and self.health <= 0
def dead_obj(self, i, j, obj):
""" Called when OBJ dies at position (i, j). """
pass
def __getstate__(self):
d = self.__dict__.copy()
del d['grid_render']
return d
def __setstate__(self, d):
self.__dict__.update(d)
class FoodEnvEmptyFullObs(FoodEnvBase):
def __init__(self):
super().__init__(fully_observed=True)
def decay_health(self):
pass
register(
id='MiniGrid-Food-8x8-Empty-FullObs-v1',
entry_point='rlkit.envs.gym_minigrid.gym_minigrid.envs:FoodEnvEmptyFullObs'
)
| pass | identifier_body |
codeHost.ts | import { trimStart } from 'lodash'
import { defer, of } from 'rxjs'
import { map } from 'rxjs/operators'
import { Omit } from 'utility-types'
import { AdjustmentDirection, PositionAdjuster } from '@sourcegraph/codeintellify'
import { NotificationType } from '@sourcegraph/shared/src/api/extension/extensionHostApi'
import { PlatformContext } from '@sourcegraph/shared/src/platform/context'
import { observeSystemIsLightTheme } from '@sourcegraph/shared/src/theme'
import {
FileSpec,
RepoSpec,
ResolvedRevisionSpec,
RevisionSpec,
toAbsoluteBlobURL,
} from '@sourcegraph/shared/src/util/url'
import { fetchBlobContentLines } from '../../repo/backend'
import { querySelectorAllOrSelf, querySelectorOrSelf } from '../../util/dom'
import { CodeHost, MountGetter } from '../shared/codeHost'
import { CodeView, toCodeViewResolver } from '../shared/codeViews'
import { createNotificationClassNameGetter } from '../shared/getNotificationClassName'
import { NativeTooltip } from '../shared/nativeTooltips'
import { getSelectionsFromHash, observeSelectionsFromHash } from '../shared/util/selections'
import { ViewResolver } from '../shared/views'
import { markdownBodyViewResolver } from './contentViews'
import { diffDomFunctions, searchCodeSnippetDOMFunctions, singleFileDOMFunctions } from './domFunctions'
import { getCommandPaletteMount } from './extensions'
import { resolveDiffFileInfo, resolveFileInfo, resolveSnippetFileInfo } from './fileInfo'
import { setElementTooltip } from './tooltip'
import { getFileContainers, parseURL } from './util'
/**
* Creates the mount element for the CodeViewToolbar on code views containing
* a `.file-actions` element, for instance:
* - A diff code view on a PR's files page, or a commit page
* - An older GHE single file code view (newer GitHub.com code views use createFileLineContainerToolbarMount)
*/
export function createFileActionsToolbarMount(codeView: HTMLElement): HTMLElement {
const className = 'github-file-actions-toolbar-mount'
const existingMount = codeView.querySelector('.' + className) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.className = className
const fileActions = codeView.querySelector('.file-actions')
if (!fileActions) {
throw new Error('Could not find GitHub file actions with selector .file-actions')
}
// Add a class to the .file-actions element, so that we can reliably match it in
// stylesheets without bleeding CSS to other code hosts (GitLab also uses .file-actions elements).
fileActions.classList.add('sg-github-file-actions')
// Old GitHub Enterprise PR views have a "☑ show comments" text that we want to insert *after*
const showCommentsElement = codeView.querySelector('.show-file-notes')
if (showCommentsElement) {
showCommentsElement.after(mountElement)
} else {
fileActions.prepend(mountElement)
}
return mountElement
}
const toolbarButtonProps = {
className: 'btn btn-sm tooltipped tooltipped-s',
}
const diffCodeView: Omit<CodeView, 'element'> = {
dom: diffDomFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo: resolveDiffFileInfo,
toolbarButtonProps,
getScrollBoundaries: codeView => {
const fileHeader = codeView.querySelector<HTMLElement>('.file-header')
if (!fileHeader) {
throw new Error('Could not find .file-header element in GitHub PR code view')
}
return [fileHeader]
},
}
const diffConversationCodeView: Omit<CodeView, 'element'> = {
...diffCodeView,
getToolbarMount: undefined,
}
const singleFileCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo,
toolbarButtonProps,
getSelections: getSelectionsFromHash,
observeSelections: observeSelectionsFromHash,
}
/**
* Some code snippets get leading white space trimmed. This adjusts based on
* this. See an example here https://github.com/sourcegraph/browser-extensions/issues/188.
*/
const getSnippetPositionAdjuster = (
requestGraphQL: PlatformContext['requestGraphQL']
): PositionAdjuster<RepoSpec & RevisionSpec & FileSpec & ResolvedRevisionSpec> => ({ direction, codeView, position }) =>
fetchBlobContentLines({ ...position, requestGraphQL }).pipe(
map(lines => {
const codeElement = singleFileDOMFunctions.getCodeElementFromLineNumber(
codeView,
position.line,
position.part
)
if (!codeElement) {
| const actualLine = lines[position.line - 1]
const documentLine = codeElement.textContent || ''
const actualLeadingWhiteSpace = actualLine.length - trimStart(actualLine).length
const documentLeadingWhiteSpace = documentLine.length - trimStart(documentLine).length
const modifier = direction === AdjustmentDirection.ActualToCodeView ? -1 : 1
const delta = Math.abs(actualLeadingWhiteSpace - documentLeadingWhiteSpace) * modifier
return {
line: position.line,
character: position.character + delta,
}
})
)
const searchResultCodeViewResolver = toCodeViewResolver('.code-list-item', {
dom: searchCodeSnippetDOMFunctions,
getPositionAdjuster: getSnippetPositionAdjuster,
resolveFileInfo: resolveSnippetFileInfo,
toolbarButtonProps,
})
const snippetCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
resolveFileInfo: resolveSnippetFileInfo,
getPositionAdjuster: getSnippetPositionAdjuster,
}
export const createFileLineContainerToolbarMount: NonNullable<CodeView['getToolbarMount']> = (
codeViewElement: HTMLElement
): HTMLElement => {
const className = 'sourcegraph-github-file-code-view-toolbar-mount'
const existingMount = codeViewElement.querySelector(`.${className}`) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.style.display = 'inline-flex'
mountElement.style.verticalAlign = 'middle'
mountElement.style.alignItems = 'center'
mountElement.className = className
const rawURLLink = codeViewElement.querySelector('#raw-url')
const buttonGroup = rawURLLink?.closest('.BtnGroup')
if (!buttonGroup?.parentNode) {
throw new Error('File actions not found')
}
buttonGroup.parentNode.insertBefore(mountElement, buttonGroup)
return mountElement
}
/**
* Matches the modern single-file code view, or snippets embedded in comments.
*
*/
export const fileLineContainerResolver: ViewResolver<CodeView> = {
selector: '.js-file-line-container',
resolveView: (fileLineContainer: HTMLElement): CodeView | null => {
const embeddedBlobWrapper = fileLineContainer.closest('.blob-wrapper-embedded')
if (embeddedBlobWrapper) {
// This is a snippet embedded in a comment.
// Resolve to `.blob-wrapper-embedded`'s parent element,
// the smallest element that contains both the code and
// the HTML anchor allowing to resolve the file info.
const element = embeddedBlobWrapper.parentElement!
return {
element,
...snippetCodeView,
}
}
const { pageType } = parseURL()
if (pageType !== 'blob') {
// this is not a single-file code view
return null
}
const repositoryContent = fileLineContainer.closest('.repository-content')
if (!repositoryContent) {
throw new Error('Could not find repository content element')
}
return {
element: repositoryContent as HTMLElement,
...singleFileCodeView,
getToolbarMount: createFileLineContainerToolbarMount,
}
},
}
const genericCodeViewResolver: ViewResolver<CodeView> = {
selector: target => {
const codeViews = new Set<HTMLElement>()
// Logic to support large diffs that are loaded asynchronously:
// https://github.com/sourcegraph/sourcegraph/issues/18337
// - Don't return `.file` elements that have yet to be loaded (loading is triggered by user)
// - When the user triggers diff loading, the mutation observer will tell us about
// .js-blob-wrapper, since the actual '.file' has been in the DOM the whole time. Return
// the closest ancestor '.file'
for (const file of querySelectorAllOrSelf<HTMLElement>(target, '.file')) {
if (file.querySelectorAll('.js-diff-load-container').length === 0) {
codeViews.add(file)
}
}
for (const blobWrapper of querySelectorAllOrSelf(target, '.js-blob-wrapper')) {
const file = blobWrapper.closest('.file')
if (file instanceof HTMLElement) {
codeViews.add(file)
}
}
return [...codeViews]
},
resolveView: (element: HTMLElement): CodeView | null => {
if (element.querySelector('article.markdown-body')) {
// This code view is rendered markdown, we shouldn't add code intelligence
return null
}
// This is a suggested change on a GitHub PR
if (element.closest('.js-suggested-changes-blob')) {
return null
}
const { pageType } = parseURL()
const isSingleCodeFile =
pageType === 'blob' &&
document.querySelectorAll('.file').length === 1 &&
document.querySelectorAll('.diff-view').length === 0
if (isSingleCodeFile) {
return { element, ...singleFileCodeView }
}
if (element.closest('.discussion-item-body') || element.classList.contains('js-comment-container')) {
// This code view is embedded on a PR conversation page.
return { element, ...diffConversationCodeView }
}
return { element, ...diffCodeView }
},
}
/**
* Returns true if the current page is GitHub Enterprise.
*/
export function checkIsGitHubEnterprise(): boolean {
const ogSiteName = document.head.querySelector<HTMLMetaElement>('meta[property="og:site_name"]')
return (
!!ogSiteName &&
// GitHub Enterprise v2.14.11 has "GitHub" as og:site_name
(ogSiteName.content === 'GitHub Enterprise' || ogSiteName.content === 'GitHub') &&
document.body.classList.contains('enterprise')
)
}
/**
* Returns true if the current page is github.com.
*/
export const checkIsGitHubDotCom = (url = window.location.href): boolean => /^https?:\/\/(www\.)?github\.com/.test(url)
/**
* Returns true if the current page is either github.com or GitHub Enterprise.
*/
export const checkIsGitHub = (): boolean => checkIsGitHubDotCom() || checkIsGitHubEnterprise()
const OPEN_ON_SOURCEGRAPH_ID = 'open-on-sourcegraph'
export const createOpenOnSourcegraphIfNotExists: MountGetter = (container: HTMLElement): HTMLElement | null => {
const pageheadActions = querySelectorOrSelf(container, '.pagehead-actions')
// If ran on page that isn't under a repository namespace.
if (!pageheadActions || pageheadActions.children.length === 0) {
return null
}
// Check for existing
let mount = pageheadActions.querySelector<HTMLElement>('#' + OPEN_ON_SOURCEGRAPH_ID)
if (mount) {
return mount
}
// Create new
mount = document.createElement('li')
mount.id = OPEN_ON_SOURCEGRAPH_ID
pageheadActions.prepend(mount)
return mount
}
const nativeTooltipResolver: ViewResolver<NativeTooltip> = {
selector: '.js-tagsearch-popover',
resolveView: element => ({ element }),
}
const iconClassName = 'icon--github v-align-text-bottom'
const notificationClassNames = {
[NotificationType.Log]: 'flash',
[NotificationType.Success]: 'flash flash-success',
[NotificationType.Info]: 'flash',
[NotificationType.Warning]: 'flash flash-warn',
[NotificationType.Error]: 'flash flash-error',
}
export const githubCodeHost: CodeHost = {
type: 'github',
name: checkIsGitHubEnterprise() ? 'GitHub Enterprise' : 'GitHub',
codeViewResolvers: [genericCodeViewResolver, fileLineContainerResolver, searchResultCodeViewResolver],
contentViewResolvers: [markdownBodyViewResolver],
nativeTooltipResolvers: [nativeTooltipResolver],
getContext: () => {
const repoHeaderHasPrivateMarker =
!!document.querySelector('.repohead .private') ||
!!document.querySelector('h1 .octicon-lock ~ [itemprop="author"] ~ [itemprop="name"]') ||
!!(
document
.querySelector('h1 [itemprop="author"] ~ [itemprop="name"] ~ .Label')
?.textContent?.trim()
.toLowerCase() === 'private'
)
const parsedURL = parseURL()
return {
...parsedURL,
revision:
parsedURL.pageType === 'blob' || parsedURL.pageType === 'tree'
? resolveFileInfo().blob.revision
: undefined,
privateRepository: window.location.hostname !== 'github.com' || repoHeaderHasPrivateMarker,
}
},
isLightTheme: defer(() => {
const mode = document.documentElement.dataset.colorMode as 'auto' | 'light' | 'dark' | undefined
if (mode === 'auto') {
return observeSystemIsLightTheme()
}
return of(mode !== 'dark')
}),
getViewContextOnSourcegraphMount: createOpenOnSourcegraphIfNotExists,
viewOnSourcegraphButtonClassProps: {
className: 'btn btn-sm tooltipped tooltipped-s',
iconClassName,
},
check: checkIsGitHub,
getCommandPaletteMount,
notificationClassNames,
commandPaletteClassProps: {
buttonClassName: 'Header-link',
popoverClassName: 'Box',
formClassName: 'p-1',
inputClassName: 'form-control input-sm header-search-input jump-to-field',
listClassName: 'p-0 m-0 js-navigation-container jump-to-suggestions-results-container',
selectedListItemClassName: 'navigation-focus',
listItemClassName:
'd-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search',
actionItemClassName:
'command-palette-action-item--github no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
noResultsClassName: 'd-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
iconClassName,
},
codeViewToolbarClassProps: {
className: 'code-view-toolbar--github',
listItemClass: 'code-view-toolbar__item--github BtnGroup',
actionItemClass: 'btn btn-sm tooltipped tooltipped-s BtnGroup-item action-item--github',
actionItemPressedClass: 'selected',
actionItemIconClass: 'icon--github v-align-text-bottom',
},
hoverOverlayClassProps: {
className: 'Box',
actionItemClassName: 'btn btn-secondary',
actionItemPressedClassName: 'active',
closeButtonClassName: 'btn-octicon p-0 hover-overlay__close-button--github',
badgeClassName: 'label hover-overlay__badge--github',
getAlertClassName: createNotificationClassNameGetter(notificationClassNames, 'flash-full'),
iconClassName,
},
setElementTooltip,
linkPreviewContentClass: 'text-small text-gray p-1 mx-1 border rounded-1 bg-gray text-gray-dark',
urlToFile: (sourcegraphURL, target, context) => {
if (target.viewState) {
// A view state means that a panel must be shown, and panels are currently only supported on
// Sourcegraph (not code hosts).
return toAbsoluteBlobURL(sourcegraphURL, target)
}
// Make sure the location is also on this github instance, return an absolute URL otherwise.
const sameCodeHost = target.rawRepoName.startsWith(window.location.hostname)
if (!sameCodeHost) {
return toAbsoluteBlobURL(sourcegraphURL, target)
}
const revision = target.revision || 'HEAD'
// If we're provided options, we can make the j2d URL more specific.
const { rawRepoName } = parseURL()
// Stay on same page in PR if possible.
// TODO to be entirely correct, this would need to compare the revision of the code view with the target revision.
const isSameRepo = rawRepoName === target.rawRepoName
if (isSameRepo && context.part !== undefined) {
const containers = getFileContainers()
for (const container of containers) {
const header = container.querySelector<HTMLElement & { dataset: { path: string; anchor: string } }>(
'.file-header[data-path][data-anchor]'
)
if (!header) {
// E.g. suggestion snippet
continue
}
const anchorPath = header.dataset.path
if (anchorPath === target.filePath) {
const anchorUrl = header.dataset.anchor
const url = new URL(window.location.href)
url.hash = anchorUrl
if (target.position) {
// GitHub uses L for the left side, R for both right side and the unchanged/white parts
url.hash += `${context.part === 'base' ? 'L' : 'R'}${target.position.line}`
}
// Only use URL if it is visible
// TODO: Expand hidden lines to reveal
if (!document.querySelector(url.hash)) {
break
}
return url.href
}
}
}
// Go to blob URL
const fragment = target.position
? `#L${target.position.line}${target.position.character ? `:${target.position.character}` : ''}`
: ''
return `https://${target.rawRepoName}/blob/${revision}/${target.filePath}${fragment}`
},
codeViewsRequireTokenization: true,
}
| throw new Error('(adjustPosition) could not find code element for line provided')
}
| conditional_block |
codeHost.ts | import { trimStart } from 'lodash'
import { defer, of } from 'rxjs'
import { map } from 'rxjs/operators'
import { Omit } from 'utility-types'
import { AdjustmentDirection, PositionAdjuster } from '@sourcegraph/codeintellify'
import { NotificationType } from '@sourcegraph/shared/src/api/extension/extensionHostApi'
import { PlatformContext } from '@sourcegraph/shared/src/platform/context'
import { observeSystemIsLightTheme } from '@sourcegraph/shared/src/theme'
import {
FileSpec,
RepoSpec,
ResolvedRevisionSpec,
RevisionSpec,
toAbsoluteBlobURL,
} from '@sourcegraph/shared/src/util/url'
import { fetchBlobContentLines } from '../../repo/backend'
import { querySelectorAllOrSelf, querySelectorOrSelf } from '../../util/dom'
import { CodeHost, MountGetter } from '../shared/codeHost'
import { CodeView, toCodeViewResolver } from '../shared/codeViews'
import { createNotificationClassNameGetter } from '../shared/getNotificationClassName'
import { NativeTooltip } from '../shared/nativeTooltips'
import { getSelectionsFromHash, observeSelectionsFromHash } from '../shared/util/selections'
import { ViewResolver } from '../shared/views'
import { markdownBodyViewResolver } from './contentViews'
import { diffDomFunctions, searchCodeSnippetDOMFunctions, singleFileDOMFunctions } from './domFunctions'
import { getCommandPaletteMount } from './extensions'
import { resolveDiffFileInfo, resolveFileInfo, resolveSnippetFileInfo } from './fileInfo'
import { setElementTooltip } from './tooltip'
import { getFileContainers, parseURL } from './util'
/**
* Creates the mount element for the CodeViewToolbar on code views containing
* a `.file-actions` element, for instance:
* - A diff code view on a PR's files page, or a commit page
* - An older GHE single file code view (newer GitHub.com code views use createFileLineContainerToolbarMount)
*/
export function createFileActionsToolbarMount(codeView: HTMLElement): HTMLElement {
const className = 'github-file-actions-toolbar-mount'
const existingMount = codeView.querySelector('.' + className) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.className = className
const fileActions = codeView.querySelector('.file-actions')
if (!fileActions) {
throw new Error('Could not find GitHub file actions with selector .file-actions')
}
// Add a class to the .file-actions element, so that we can reliably match it in
// stylesheets without bleeding CSS to other code hosts (GitLab also uses .file-actions elements).
fileActions.classList.add('sg-github-file-actions')
// Old GitHub Enterprise PR views have a "☑ show comments" text that we want to insert *after*
const showCommentsElement = codeView.querySelector('.show-file-notes')
if (showCommentsElement) {
showCommentsElement.after(mountElement)
} else {
fileActions.prepend(mountElement)
}
return mountElement
}
const toolbarButtonProps = {
className: 'btn btn-sm tooltipped tooltipped-s',
}
const diffCodeView: Omit<CodeView, 'element'> = {
dom: diffDomFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo: resolveDiffFileInfo,
toolbarButtonProps,
getScrollBoundaries: codeView => {
const fileHeader = codeView.querySelector<HTMLElement>('.file-header')
if (!fileHeader) {
throw new Error('Could not find .file-header element in GitHub PR code view')
}
return [fileHeader]
},
}
const diffConversationCodeView: Omit<CodeView, 'element'> = {
...diffCodeView,
getToolbarMount: undefined,
}
const singleFileCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo,
toolbarButtonProps,
getSelections: getSelectionsFromHash,
observeSelections: observeSelectionsFromHash,
}
/**
* Some code snippets get leading white space trimmed. This adjusts based on
* this. See an example here https://github.com/sourcegraph/browser-extensions/issues/188.
*/
const getSnippetPositionAdjuster = (
requestGraphQL: PlatformContext['requestGraphQL']
): PositionAdjuster<RepoSpec & RevisionSpec & FileSpec & ResolvedRevisionSpec> => ({ direction, codeView, position }) =>
fetchBlobContentLines({ ...position, requestGraphQL }).pipe(
map(lines => {
const codeElement = singleFileDOMFunctions.getCodeElementFromLineNumber(
codeView,
position.line,
position.part
)
if (!codeElement) {
throw new Error('(adjustPosition) could not find code element for line provided')
}
const actualLine = lines[position.line - 1]
const documentLine = codeElement.textContent || ''
const actualLeadingWhiteSpace = actualLine.length - trimStart(actualLine).length
const documentLeadingWhiteSpace = documentLine.length - trimStart(documentLine).length
const modifier = direction === AdjustmentDirection.ActualToCodeView ? -1 : 1
const delta = Math.abs(actualLeadingWhiteSpace - documentLeadingWhiteSpace) * modifier
return {
line: position.line,
character: position.character + delta,
}
})
)
const searchResultCodeViewResolver = toCodeViewResolver('.code-list-item', {
dom: searchCodeSnippetDOMFunctions,
getPositionAdjuster: getSnippetPositionAdjuster,
resolveFileInfo: resolveSnippetFileInfo,
toolbarButtonProps,
})
const snippetCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
resolveFileInfo: resolveSnippetFileInfo,
getPositionAdjuster: getSnippetPositionAdjuster,
}
export const createFileLineContainerToolbarMount: NonNullable<CodeView['getToolbarMount']> = (
codeViewElement: HTMLElement
): HTMLElement => {
const className = 'sourcegraph-github-file-code-view-toolbar-mount'
const existingMount = codeViewElement.querySelector(`.${className}`) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.style.display = 'inline-flex'
mountElement.style.verticalAlign = 'middle'
mountElement.style.alignItems = 'center'
mountElement.className = className
const rawURLLink = codeViewElement.querySelector('#raw-url')
const buttonGroup = rawURLLink?.closest('.BtnGroup')
if (!buttonGroup?.parentNode) {
throw new Error('File actions not found')
}
buttonGroup.parentNode.insertBefore(mountElement, buttonGroup)
return mountElement
}
/**
* Matches the modern single-file code view, or snippets embedded in comments.
*
*/
export const fileLineContainerResolver: ViewResolver<CodeView> = {
selector: '.js-file-line-container',
resolveView: (fileLineContainer: HTMLElement): CodeView | null => {
const embeddedBlobWrapper = fileLineContainer.closest('.blob-wrapper-embedded')
if (embeddedBlobWrapper) {
// This is a snippet embedded in a comment.
// Resolve to `.blob-wrapper-embedded`'s parent element,
// the smallest element that contains both the code and
// the HTML anchor allowing to resolve the file info.
const element = embeddedBlobWrapper.parentElement!
return {
element,
...snippetCodeView,
}
}
const { pageType } = parseURL()
if (pageType !== 'blob') {
// this is not a single-file code view
return null
}
const repositoryContent = fileLineContainer.closest('.repository-content')
if (!repositoryContent) {
throw new Error('Could not find repository content element')
}
return {
element: repositoryContent as HTMLElement,
...singleFileCodeView,
getToolbarMount: createFileLineContainerToolbarMount,
}
},
}
const genericCodeViewResolver: ViewResolver<CodeView> = {
selector: target => {
const codeViews = new Set<HTMLElement>()
// Logic to support large diffs that are loaded asynchronously:
// https://github.com/sourcegraph/sourcegraph/issues/18337
// - Don't return `.file` elements that have yet to be loaded (loading is triggered by user)
// - When the user triggers diff loading, the mutation observer will tell us about
// .js-blob-wrapper, since the actual '.file' has been in the DOM the whole time. Return
// the closest ancestor '.file'
for (const file of querySelectorAllOrSelf<HTMLElement>(target, '.file')) {
if (file.querySelectorAll('.js-diff-load-container').length === 0) {
codeViews.add(file)
}
}
for (const blobWrapper of querySelectorAllOrSelf(target, '.js-blob-wrapper')) {
const file = blobWrapper.closest('.file')
if (file instanceof HTMLElement) {
codeViews.add(file)
}
}
return [...codeViews]
},
resolveView: (element: HTMLElement): CodeView | null => {
if (element.querySelector('article.markdown-body')) {
// This code view is rendered markdown, we shouldn't add code intelligence
return null
}
// This is a suggested change on a GitHub PR
if (element.closest('.js-suggested-changes-blob')) {
return null
}
const { pageType } = parseURL()
const isSingleCodeFile =
pageType === 'blob' &&
document.querySelectorAll('.file').length === 1 &&
document.querySelectorAll('.diff-view').length === 0
if (isSingleCodeFile) {
return { element, ...singleFileCodeView }
}
if (element.closest('.discussion-item-body') || element.classList.contains('js-comment-container')) {
// This code view is embedded on a PR conversation page.
return { element, ...diffConversationCodeView }
}
return { element, ...diffCodeView }
},
}
/**
* Returns true if the current page is GitHub Enterprise.
*/
export function checkIsGitHubEnterprise(): boolean {
| /**
* Returns true if the current page is github.com.
*/
export const checkIsGitHubDotCom = (url = window.location.href): boolean => /^https?:\/\/(www\.)?github\.com/.test(url)
/**
* Returns true if the current page is either github.com or GitHub Enterprise.
*/
export const checkIsGitHub = (): boolean => checkIsGitHubDotCom() || checkIsGitHubEnterprise()
const OPEN_ON_SOURCEGRAPH_ID = 'open-on-sourcegraph'
export const createOpenOnSourcegraphIfNotExists: MountGetter = (container: HTMLElement): HTMLElement | null => {
const pageheadActions = querySelectorOrSelf(container, '.pagehead-actions')
// If ran on page that isn't under a repository namespace.
if (!pageheadActions || pageheadActions.children.length === 0) {
return null
}
// Check for existing
let mount = pageheadActions.querySelector<HTMLElement>('#' + OPEN_ON_SOURCEGRAPH_ID)
if (mount) {
return mount
}
// Create new
mount = document.createElement('li')
mount.id = OPEN_ON_SOURCEGRAPH_ID
pageheadActions.prepend(mount)
return mount
}
const nativeTooltipResolver: ViewResolver<NativeTooltip> = {
selector: '.js-tagsearch-popover',
resolveView: element => ({ element }),
}
const iconClassName = 'icon--github v-align-text-bottom'
const notificationClassNames = {
[NotificationType.Log]: 'flash',
[NotificationType.Success]: 'flash flash-success',
[NotificationType.Info]: 'flash',
[NotificationType.Warning]: 'flash flash-warn',
[NotificationType.Error]: 'flash flash-error',
}
export const githubCodeHost: CodeHost = {
type: 'github',
name: checkIsGitHubEnterprise() ? 'GitHub Enterprise' : 'GitHub',
codeViewResolvers: [genericCodeViewResolver, fileLineContainerResolver, searchResultCodeViewResolver],
contentViewResolvers: [markdownBodyViewResolver],
nativeTooltipResolvers: [nativeTooltipResolver],
getContext: () => {
const repoHeaderHasPrivateMarker =
!!document.querySelector('.repohead .private') ||
!!document.querySelector('h1 .octicon-lock ~ [itemprop="author"] ~ [itemprop="name"]') ||
!!(
document
.querySelector('h1 [itemprop="author"] ~ [itemprop="name"] ~ .Label')
?.textContent?.trim()
.toLowerCase() === 'private'
)
const parsedURL = parseURL()
return {
...parsedURL,
revision:
parsedURL.pageType === 'blob' || parsedURL.pageType === 'tree'
? resolveFileInfo().blob.revision
: undefined,
privateRepository: window.location.hostname !== 'github.com' || repoHeaderHasPrivateMarker,
}
},
isLightTheme: defer(() => {
const mode = document.documentElement.dataset.colorMode as 'auto' | 'light' | 'dark' | undefined
if (mode === 'auto') {
return observeSystemIsLightTheme()
}
return of(mode !== 'dark')
}),
getViewContextOnSourcegraphMount: createOpenOnSourcegraphIfNotExists,
viewOnSourcegraphButtonClassProps: {
className: 'btn btn-sm tooltipped tooltipped-s',
iconClassName,
},
check: checkIsGitHub,
getCommandPaletteMount,
notificationClassNames,
commandPaletteClassProps: {
buttonClassName: 'Header-link',
popoverClassName: 'Box',
formClassName: 'p-1',
inputClassName: 'form-control input-sm header-search-input jump-to-field',
listClassName: 'p-0 m-0 js-navigation-container jump-to-suggestions-results-container',
selectedListItemClassName: 'navigation-focus',
listItemClassName:
'd-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search',
actionItemClassName:
'command-palette-action-item--github no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
noResultsClassName: 'd-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
iconClassName,
},
codeViewToolbarClassProps: {
className: 'code-view-toolbar--github',
listItemClass: 'code-view-toolbar__item--github BtnGroup',
actionItemClass: 'btn btn-sm tooltipped tooltipped-s BtnGroup-item action-item--github',
actionItemPressedClass: 'selected',
actionItemIconClass: 'icon--github v-align-text-bottom',
},
hoverOverlayClassProps: {
className: 'Box',
actionItemClassName: 'btn btn-secondary',
actionItemPressedClassName: 'active',
closeButtonClassName: 'btn-octicon p-0 hover-overlay__close-button--github',
badgeClassName: 'label hover-overlay__badge--github',
getAlertClassName: createNotificationClassNameGetter(notificationClassNames, 'flash-full'),
iconClassName,
},
setElementTooltip,
linkPreviewContentClass: 'text-small text-gray p-1 mx-1 border rounded-1 bg-gray text-gray-dark',
urlToFile: (sourcegraphURL, target, context) => {
if (target.viewState) {
// A view state means that a panel must be shown, and panels are currently only supported on
// Sourcegraph (not code hosts).
return toAbsoluteBlobURL(sourcegraphURL, target)
}
// Make sure the location is also on this github instance, return an absolute URL otherwise.
const sameCodeHost = target.rawRepoName.startsWith(window.location.hostname)
if (!sameCodeHost) {
return toAbsoluteBlobURL(sourcegraphURL, target)
}
const revision = target.revision || 'HEAD'
// If we're provided options, we can make the j2d URL more specific.
const { rawRepoName } = parseURL()
// Stay on same page in PR if possible.
// TODO to be entirely correct, this would need to compare the revision of the code view with the target revision.
const isSameRepo = rawRepoName === target.rawRepoName
if (isSameRepo && context.part !== undefined) {
const containers = getFileContainers()
for (const container of containers) {
const header = container.querySelector<HTMLElement & { dataset: { path: string; anchor: string } }>(
'.file-header[data-path][data-anchor]'
)
if (!header) {
// E.g. suggestion snippet
continue
}
const anchorPath = header.dataset.path
if (anchorPath === target.filePath) {
const anchorUrl = header.dataset.anchor
const url = new URL(window.location.href)
url.hash = anchorUrl
if (target.position) {
// GitHub uses L for the left side, R for both right side and the unchanged/white parts
url.hash += `${context.part === 'base' ? 'L' : 'R'}${target.position.line}`
}
// Only use URL if it is visible
// TODO: Expand hidden lines to reveal
if (!document.querySelector(url.hash)) {
break
}
return url.href
}
}
}
// Go to blob URL
const fragment = target.position
? `#L${target.position.line}${target.position.character ? `:${target.position.character}` : ''}`
: ''
return `https://${target.rawRepoName}/blob/${revision}/${target.filePath}${fragment}`
},
codeViewsRequireTokenization: true,
}
| const ogSiteName = document.head.querySelector<HTMLMetaElement>('meta[property="og:site_name"]')
return (
!!ogSiteName &&
// GitHub Enterprise v2.14.11 has "GitHub" as og:site_name
(ogSiteName.content === 'GitHub Enterprise' || ogSiteName.content === 'GitHub') &&
document.body.classList.contains('enterprise')
)
}
| identifier_body |
codeHost.ts | import { trimStart } from 'lodash'
import { defer, of } from 'rxjs'
import { map } from 'rxjs/operators'
import { Omit } from 'utility-types'
import { AdjustmentDirection, PositionAdjuster } from '@sourcegraph/codeintellify'
import { NotificationType } from '@sourcegraph/shared/src/api/extension/extensionHostApi'
import { PlatformContext } from '@sourcegraph/shared/src/platform/context'
import { observeSystemIsLightTheme } from '@sourcegraph/shared/src/theme'
import {
FileSpec,
RepoSpec,
ResolvedRevisionSpec,
RevisionSpec,
toAbsoluteBlobURL,
} from '@sourcegraph/shared/src/util/url'
import { fetchBlobContentLines } from '../../repo/backend'
import { querySelectorAllOrSelf, querySelectorOrSelf } from '../../util/dom'
import { CodeHost, MountGetter } from '../shared/codeHost'
import { CodeView, toCodeViewResolver } from '../shared/codeViews'
import { createNotificationClassNameGetter } from '../shared/getNotificationClassName'
import { NativeTooltip } from '../shared/nativeTooltips'
import { getSelectionsFromHash, observeSelectionsFromHash } from '../shared/util/selections'
import { ViewResolver } from '../shared/views'
import { markdownBodyViewResolver } from './contentViews'
import { diffDomFunctions, searchCodeSnippetDOMFunctions, singleFileDOMFunctions } from './domFunctions'
import { getCommandPaletteMount } from './extensions'
import { resolveDiffFileInfo, resolveFileInfo, resolveSnippetFileInfo } from './fileInfo'
import { setElementTooltip } from './tooltip'
import { getFileContainers, parseURL } from './util'
/**
* Creates the mount element for the CodeViewToolbar on code views containing
* a `.file-actions` element, for instance:
* - A diff code view on a PR's files page, or a commit page
* - An older GHE single file code view (newer GitHub.com code views use createFileLineContainerToolbarMount)
*/
export function | (codeView: HTMLElement): HTMLElement {
const className = 'github-file-actions-toolbar-mount'
const existingMount = codeView.querySelector('.' + className) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.className = className
const fileActions = codeView.querySelector('.file-actions')
if (!fileActions) {
throw new Error('Could not find GitHub file actions with selector .file-actions')
}
// Add a class to the .file-actions element, so that we can reliably match it in
// stylesheets without bleeding CSS to other code hosts (GitLab also uses .file-actions elements).
fileActions.classList.add('sg-github-file-actions')
// Old GitHub Enterprise PR views have a "☑ show comments" text that we want to insert *after*
const showCommentsElement = codeView.querySelector('.show-file-notes')
if (showCommentsElement) {
showCommentsElement.after(mountElement)
} else {
fileActions.prepend(mountElement)
}
return mountElement
}
const toolbarButtonProps = {
className: 'btn btn-sm tooltipped tooltipped-s',
}
const diffCodeView: Omit<CodeView, 'element'> = {
dom: diffDomFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo: resolveDiffFileInfo,
toolbarButtonProps,
getScrollBoundaries: codeView => {
const fileHeader = codeView.querySelector<HTMLElement>('.file-header')
if (!fileHeader) {
throw new Error('Could not find .file-header element in GitHub PR code view')
}
return [fileHeader]
},
}
const diffConversationCodeView: Omit<CodeView, 'element'> = {
...diffCodeView,
getToolbarMount: undefined,
}
const singleFileCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo,
toolbarButtonProps,
getSelections: getSelectionsFromHash,
observeSelections: observeSelectionsFromHash,
}
/**
* Some code snippets get leading white space trimmed. This adjusts based on
* this. See an example here https://github.com/sourcegraph/browser-extensions/issues/188.
*/
const getSnippetPositionAdjuster = (
requestGraphQL: PlatformContext['requestGraphQL']
): PositionAdjuster<RepoSpec & RevisionSpec & FileSpec & ResolvedRevisionSpec> => ({ direction, codeView, position }) =>
fetchBlobContentLines({ ...position, requestGraphQL }).pipe(
map(lines => {
const codeElement = singleFileDOMFunctions.getCodeElementFromLineNumber(
codeView,
position.line,
position.part
)
if (!codeElement) {
throw new Error('(adjustPosition) could not find code element for line provided')
}
const actualLine = lines[position.line - 1]
const documentLine = codeElement.textContent || ''
const actualLeadingWhiteSpace = actualLine.length - trimStart(actualLine).length
const documentLeadingWhiteSpace = documentLine.length - trimStart(documentLine).length
const modifier = direction === AdjustmentDirection.ActualToCodeView ? -1 : 1
const delta = Math.abs(actualLeadingWhiteSpace - documentLeadingWhiteSpace) * modifier
return {
line: position.line,
character: position.character + delta,
}
})
)
const searchResultCodeViewResolver = toCodeViewResolver('.code-list-item', {
dom: searchCodeSnippetDOMFunctions,
getPositionAdjuster: getSnippetPositionAdjuster,
resolveFileInfo: resolveSnippetFileInfo,
toolbarButtonProps,
})
const snippetCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
resolveFileInfo: resolveSnippetFileInfo,
getPositionAdjuster: getSnippetPositionAdjuster,
}
export const createFileLineContainerToolbarMount: NonNullable<CodeView['getToolbarMount']> = (
codeViewElement: HTMLElement
): HTMLElement => {
const className = 'sourcegraph-github-file-code-view-toolbar-mount'
const existingMount = codeViewElement.querySelector(`.${className}`) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.style.display = 'inline-flex'
mountElement.style.verticalAlign = 'middle'
mountElement.style.alignItems = 'center'
mountElement.className = className
const rawURLLink = codeViewElement.querySelector('#raw-url')
const buttonGroup = rawURLLink?.closest('.BtnGroup')
if (!buttonGroup?.parentNode) {
throw new Error('File actions not found')
}
buttonGroup.parentNode.insertBefore(mountElement, buttonGroup)
return mountElement
}
/**
* Matches the modern single-file code view, or snippets embedded in comments.
*
*/
export const fileLineContainerResolver: ViewResolver<CodeView> = {
selector: '.js-file-line-container',
resolveView: (fileLineContainer: HTMLElement): CodeView | null => {
const embeddedBlobWrapper = fileLineContainer.closest('.blob-wrapper-embedded')
if (embeddedBlobWrapper) {
// This is a snippet embedded in a comment.
// Resolve to `.blob-wrapper-embedded`'s parent element,
// the smallest element that contains both the code and
// the HTML anchor allowing to resolve the file info.
const element = embeddedBlobWrapper.parentElement!
return {
element,
...snippetCodeView,
}
}
const { pageType } = parseURL()
if (pageType !== 'blob') {
// this is not a single-file code view
return null
}
const repositoryContent = fileLineContainer.closest('.repository-content')
if (!repositoryContent) {
throw new Error('Could not find repository content element')
}
return {
element: repositoryContent as HTMLElement,
...singleFileCodeView,
getToolbarMount: createFileLineContainerToolbarMount,
}
},
}
const genericCodeViewResolver: ViewResolver<CodeView> = {
selector: target => {
const codeViews = new Set<HTMLElement>()
// Logic to support large diffs that are loaded asynchronously:
// https://github.com/sourcegraph/sourcegraph/issues/18337
// - Don't return `.file` elements that have yet to be loaded (loading is triggered by user)
// - When the user triggers diff loading, the mutation observer will tell us about
// .js-blob-wrapper, since the actual '.file' has been in the DOM the whole time. Return
// the closest ancestor '.file'
for (const file of querySelectorAllOrSelf<HTMLElement>(target, '.file')) {
if (file.querySelectorAll('.js-diff-load-container').length === 0) {
codeViews.add(file)
}
}
for (const blobWrapper of querySelectorAllOrSelf(target, '.js-blob-wrapper')) {
const file = blobWrapper.closest('.file')
if (file instanceof HTMLElement) {
codeViews.add(file)
}
}
return [...codeViews]
},
resolveView: (element: HTMLElement): CodeView | null => {
if (element.querySelector('article.markdown-body')) {
// This code view is rendered markdown, we shouldn't add code intelligence
return null
}
// This is a suggested change on a GitHub PR
if (element.closest('.js-suggested-changes-blob')) {
return null
}
const { pageType } = parseURL()
const isSingleCodeFile =
pageType === 'blob' &&
document.querySelectorAll('.file').length === 1 &&
document.querySelectorAll('.diff-view').length === 0
if (isSingleCodeFile) {
return { element, ...singleFileCodeView }
}
if (element.closest('.discussion-item-body') || element.classList.contains('js-comment-container')) {
// This code view is embedded on a PR conversation page.
return { element, ...diffConversationCodeView }
}
return { element, ...diffCodeView }
},
}
/**
* Returns true if the current page is GitHub Enterprise.
*/
export function checkIsGitHubEnterprise(): boolean {
const ogSiteName = document.head.querySelector<HTMLMetaElement>('meta[property="og:site_name"]')
return (
!!ogSiteName &&
// GitHub Enterprise v2.14.11 has "GitHub" as og:site_name
(ogSiteName.content === 'GitHub Enterprise' || ogSiteName.content === 'GitHub') &&
document.body.classList.contains('enterprise')
)
}
/**
* Returns true if the current page is github.com.
*/
export const checkIsGitHubDotCom = (url = window.location.href): boolean => /^https?:\/\/(www\.)?github\.com/.test(url)
/**
* Returns true if the current page is either github.com or GitHub Enterprise.
*/
export const checkIsGitHub = (): boolean => checkIsGitHubDotCom() || checkIsGitHubEnterprise()
const OPEN_ON_SOURCEGRAPH_ID = 'open-on-sourcegraph'
export const createOpenOnSourcegraphIfNotExists: MountGetter = (container: HTMLElement): HTMLElement | null => {
const pageheadActions = querySelectorOrSelf(container, '.pagehead-actions')
// If ran on page that isn't under a repository namespace.
if (!pageheadActions || pageheadActions.children.length === 0) {
return null
}
// Check for existing
let mount = pageheadActions.querySelector<HTMLElement>('#' + OPEN_ON_SOURCEGRAPH_ID)
if (mount) {
return mount
}
// Create new
mount = document.createElement('li')
mount.id = OPEN_ON_SOURCEGRAPH_ID
pageheadActions.prepend(mount)
return mount
}
const nativeTooltipResolver: ViewResolver<NativeTooltip> = {
selector: '.js-tagsearch-popover',
resolveView: element => ({ element }),
}
const iconClassName = 'icon--github v-align-text-bottom'
const notificationClassNames = {
[NotificationType.Log]: 'flash',
[NotificationType.Success]: 'flash flash-success',
[NotificationType.Info]: 'flash',
[NotificationType.Warning]: 'flash flash-warn',
[NotificationType.Error]: 'flash flash-error',
}
export const githubCodeHost: CodeHost = {
type: 'github',
name: checkIsGitHubEnterprise() ? 'GitHub Enterprise' : 'GitHub',
codeViewResolvers: [genericCodeViewResolver, fileLineContainerResolver, searchResultCodeViewResolver],
contentViewResolvers: [markdownBodyViewResolver],
nativeTooltipResolvers: [nativeTooltipResolver],
getContext: () => {
const repoHeaderHasPrivateMarker =
!!document.querySelector('.repohead .private') ||
!!document.querySelector('h1 .octicon-lock ~ [itemprop="author"] ~ [itemprop="name"]') ||
!!(
document
.querySelector('h1 [itemprop="author"] ~ [itemprop="name"] ~ .Label')
?.textContent?.trim()
.toLowerCase() === 'private'
)
const parsedURL = parseURL()
return {
...parsedURL,
revision:
parsedURL.pageType === 'blob' || parsedURL.pageType === 'tree'
? resolveFileInfo().blob.revision
: undefined,
privateRepository: window.location.hostname !== 'github.com' || repoHeaderHasPrivateMarker,
}
},
isLightTheme: defer(() => {
const mode = document.documentElement.dataset.colorMode as 'auto' | 'light' | 'dark' | undefined
if (mode === 'auto') {
return observeSystemIsLightTheme()
}
return of(mode !== 'dark')
}),
getViewContextOnSourcegraphMount: createOpenOnSourcegraphIfNotExists,
viewOnSourcegraphButtonClassProps: {
className: 'btn btn-sm tooltipped tooltipped-s',
iconClassName,
},
check: checkIsGitHub,
getCommandPaletteMount,
notificationClassNames,
commandPaletteClassProps: {
buttonClassName: 'Header-link',
popoverClassName: 'Box',
formClassName: 'p-1',
inputClassName: 'form-control input-sm header-search-input jump-to-field',
listClassName: 'p-0 m-0 js-navigation-container jump-to-suggestions-results-container',
selectedListItemClassName: 'navigation-focus',
listItemClassName:
'd-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search',
actionItemClassName:
'command-palette-action-item--github no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
noResultsClassName: 'd-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
iconClassName,
},
codeViewToolbarClassProps: {
className: 'code-view-toolbar--github',
listItemClass: 'code-view-toolbar__item--github BtnGroup',
actionItemClass: 'btn btn-sm tooltipped tooltipped-s BtnGroup-item action-item--github',
actionItemPressedClass: 'selected',
actionItemIconClass: 'icon--github v-align-text-bottom',
},
hoverOverlayClassProps: {
className: 'Box',
actionItemClassName: 'btn btn-secondary',
actionItemPressedClassName: 'active',
closeButtonClassName: 'btn-octicon p-0 hover-overlay__close-button--github',
badgeClassName: 'label hover-overlay__badge--github',
getAlertClassName: createNotificationClassNameGetter(notificationClassNames, 'flash-full'),
iconClassName,
},
setElementTooltip,
linkPreviewContentClass: 'text-small text-gray p-1 mx-1 border rounded-1 bg-gray text-gray-dark',
urlToFile: (sourcegraphURL, target, context) => {
if (target.viewState) {
// A view state means that a panel must be shown, and panels are currently only supported on
// Sourcegraph (not code hosts).
return toAbsoluteBlobURL(sourcegraphURL, target)
}
// Make sure the location is also on this github instance, return an absolute URL otherwise.
const sameCodeHost = target.rawRepoName.startsWith(window.location.hostname)
if (!sameCodeHost) {
return toAbsoluteBlobURL(sourcegraphURL, target)
}
const revision = target.revision || 'HEAD'
// If we're provided options, we can make the j2d URL more specific.
const { rawRepoName } = parseURL()
// Stay on same page in PR if possible.
// TODO to be entirely correct, this would need to compare the revision of the code view with the target revision.
const isSameRepo = rawRepoName === target.rawRepoName
if (isSameRepo && context.part !== undefined) {
const containers = getFileContainers()
for (const container of containers) {
const header = container.querySelector<HTMLElement & { dataset: { path: string; anchor: string } }>(
'.file-header[data-path][data-anchor]'
)
if (!header) {
// E.g. suggestion snippet
continue
}
const anchorPath = header.dataset.path
if (anchorPath === target.filePath) {
const anchorUrl = header.dataset.anchor
const url = new URL(window.location.href)
url.hash = anchorUrl
if (target.position) {
// GitHub uses L for the left side, R for both right side and the unchanged/white parts
url.hash += `${context.part === 'base' ? 'L' : 'R'}${target.position.line}`
}
// Only use URL if it is visible
// TODO: Expand hidden lines to reveal
if (!document.querySelector(url.hash)) {
break
}
return url.href
}
}
}
// Go to blob URL
const fragment = target.position
? `#L${target.position.line}${target.position.character ? `:${target.position.character}` : ''}`
: ''
return `https://${target.rawRepoName}/blob/${revision}/${target.filePath}${fragment}`
},
codeViewsRequireTokenization: true,
}
| createFileActionsToolbarMount | identifier_name |
codeHost.ts | import { trimStart } from 'lodash'
import { defer, of } from 'rxjs'
import { map } from 'rxjs/operators'
import { Omit } from 'utility-types'
import { AdjustmentDirection, PositionAdjuster } from '@sourcegraph/codeintellify'
import { NotificationType } from '@sourcegraph/shared/src/api/extension/extensionHostApi'
import { PlatformContext } from '@sourcegraph/shared/src/platform/context'
import { observeSystemIsLightTheme } from '@sourcegraph/shared/src/theme'
import {
FileSpec,
RepoSpec,
ResolvedRevisionSpec,
RevisionSpec,
toAbsoluteBlobURL,
} from '@sourcegraph/shared/src/util/url'
import { fetchBlobContentLines } from '../../repo/backend'
import { querySelectorAllOrSelf, querySelectorOrSelf } from '../../util/dom'
import { CodeHost, MountGetter } from '../shared/codeHost'
import { CodeView, toCodeViewResolver } from '../shared/codeViews'
import { createNotificationClassNameGetter } from '../shared/getNotificationClassName'
import { NativeTooltip } from '../shared/nativeTooltips'
import { getSelectionsFromHash, observeSelectionsFromHash } from '../shared/util/selections'
import { ViewResolver } from '../shared/views'
import { markdownBodyViewResolver } from './contentViews'
import { diffDomFunctions, searchCodeSnippetDOMFunctions, singleFileDOMFunctions } from './domFunctions'
import { getCommandPaletteMount } from './extensions'
import { resolveDiffFileInfo, resolveFileInfo, resolveSnippetFileInfo } from './fileInfo'
import { setElementTooltip } from './tooltip'
import { getFileContainers, parseURL } from './util'
/**
* Creates the mount element for the CodeViewToolbar on code views containing
* a `.file-actions` element, for instance:
* - A diff code view on a PR's files page, or a commit page
* - An older GHE single file code view (newer GitHub.com code views use createFileLineContainerToolbarMount)
*/
export function createFileActionsToolbarMount(codeView: HTMLElement): HTMLElement {
const className = 'github-file-actions-toolbar-mount'
const existingMount = codeView.querySelector('.' + className) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.className = className
const fileActions = codeView.querySelector('.file-actions')
if (!fileActions) {
throw new Error('Could not find GitHub file actions with selector .file-actions')
}
// Add a class to the .file-actions element, so that we can reliably match it in
// stylesheets without bleeding CSS to other code hosts (GitLab also uses .file-actions elements).
fileActions.classList.add('sg-github-file-actions')
// Old GitHub Enterprise PR views have a "☑ show comments" text that we want to insert *after*
const showCommentsElement = codeView.querySelector('.show-file-notes')
if (showCommentsElement) {
showCommentsElement.after(mountElement)
} else {
fileActions.prepend(mountElement)
}
return mountElement
}
const toolbarButtonProps = {
className: 'btn btn-sm tooltipped tooltipped-s',
}
const diffCodeView: Omit<CodeView, 'element'> = {
dom: diffDomFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo: resolveDiffFileInfo,
toolbarButtonProps,
getScrollBoundaries: codeView => {
const fileHeader = codeView.querySelector<HTMLElement>('.file-header')
if (!fileHeader) {
throw new Error('Could not find .file-header element in GitHub PR code view')
}
return [fileHeader]
},
}
const diffConversationCodeView: Omit<CodeView, 'element'> = {
...diffCodeView,
getToolbarMount: undefined,
}
const singleFileCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
getToolbarMount: createFileActionsToolbarMount,
resolveFileInfo,
toolbarButtonProps,
getSelections: getSelectionsFromHash,
observeSelections: observeSelectionsFromHash,
}
/**
* Some code snippets get leading white space trimmed. This adjusts based on
* this. See an example here https://github.com/sourcegraph/browser-extensions/issues/188.
*/
const getSnippetPositionAdjuster = (
requestGraphQL: PlatformContext['requestGraphQL']
): PositionAdjuster<RepoSpec & RevisionSpec & FileSpec & ResolvedRevisionSpec> => ({ direction, codeView, position }) =>
fetchBlobContentLines({ ...position, requestGraphQL }).pipe(
map(lines => {
const codeElement = singleFileDOMFunctions.getCodeElementFromLineNumber(
codeView,
position.line,
position.part
)
if (!codeElement) {
throw new Error('(adjustPosition) could not find code element for line provided')
}
const actualLine = lines[position.line - 1]
const documentLine = codeElement.textContent || ''
const actualLeadingWhiteSpace = actualLine.length - trimStart(actualLine).length
const documentLeadingWhiteSpace = documentLine.length - trimStart(documentLine).length
const modifier = direction === AdjustmentDirection.ActualToCodeView ? -1 : 1
const delta = Math.abs(actualLeadingWhiteSpace - documentLeadingWhiteSpace) * modifier
return {
line: position.line,
character: position.character + delta,
}
})
)
const searchResultCodeViewResolver = toCodeViewResolver('.code-list-item', {
dom: searchCodeSnippetDOMFunctions,
getPositionAdjuster: getSnippetPositionAdjuster,
resolveFileInfo: resolveSnippetFileInfo,
toolbarButtonProps,
})
const snippetCodeView: Omit<CodeView, 'element'> = {
dom: singleFileDOMFunctions,
resolveFileInfo: resolveSnippetFileInfo,
getPositionAdjuster: getSnippetPositionAdjuster,
}
export const createFileLineContainerToolbarMount: NonNullable<CodeView['getToolbarMount']> = (
codeViewElement: HTMLElement
): HTMLElement => {
const className = 'sourcegraph-github-file-code-view-toolbar-mount'
const existingMount = codeViewElement.querySelector(`.${className}`) as HTMLElement
if (existingMount) {
return existingMount
}
const mountElement = document.createElement('div')
mountElement.style.display = 'inline-flex'
mountElement.style.verticalAlign = 'middle'
mountElement.style.alignItems = 'center'
mountElement.className = className
const rawURLLink = codeViewElement.querySelector('#raw-url')
const buttonGroup = rawURLLink?.closest('.BtnGroup')
if (!buttonGroup?.parentNode) {
throw new Error('File actions not found')
}
buttonGroup.parentNode.insertBefore(mountElement, buttonGroup)
return mountElement
}
/**
* Matches the modern single-file code view, or snippets embedded in comments.
*
*/
export const fileLineContainerResolver: ViewResolver<CodeView> = {
selector: '.js-file-line-container',
resolveView: (fileLineContainer: HTMLElement): CodeView | null => {
const embeddedBlobWrapper = fileLineContainer.closest('.blob-wrapper-embedded')
if (embeddedBlobWrapper) {
// This is a snippet embedded in a comment.
// Resolve to `.blob-wrapper-embedded`'s parent element,
// the smallest element that contains both the code and
// the HTML anchor allowing to resolve the file info.
const element = embeddedBlobWrapper.parentElement!
return {
element,
...snippetCodeView,
}
}
const { pageType } = parseURL()
if (pageType !== 'blob') {
// this is not a single-file code view
return null
}
const repositoryContent = fileLineContainer.closest('.repository-content')
if (!repositoryContent) {
throw new Error('Could not find repository content element')
}
return {
element: repositoryContent as HTMLElement,
...singleFileCodeView,
getToolbarMount: createFileLineContainerToolbarMount,
}
},
}
const genericCodeViewResolver: ViewResolver<CodeView> = {
selector: target => {
const codeViews = new Set<HTMLElement>()
// Logic to support large diffs that are loaded asynchronously:
// https://github.com/sourcegraph/sourcegraph/issues/18337
// - Don't return `.file` elements that have yet to be loaded (loading is triggered by user)
// - When the user triggers diff loading, the mutation observer will tell us about
// .js-blob-wrapper, since the actual '.file' has been in the DOM the whole time. Return
// the closest ancestor '.file'
for (const file of querySelectorAllOrSelf<HTMLElement>(target, '.file')) {
if (file.querySelectorAll('.js-diff-load-container').length === 0) {
codeViews.add(file)
}
}
for (const blobWrapper of querySelectorAllOrSelf(target, '.js-blob-wrapper')) {
const file = blobWrapper.closest('.file')
if (file instanceof HTMLElement) {
codeViews.add(file)
}
}
return [...codeViews]
},
resolveView: (element: HTMLElement): CodeView | null => {
if (element.querySelector('article.markdown-body')) {
// This code view is rendered markdown, we shouldn't add code intelligence
return null
}
// This is a suggested change on a GitHub PR
if (element.closest('.js-suggested-changes-blob')) {
return null
}
const { pageType } = parseURL()
const isSingleCodeFile =
pageType === 'blob' &&
document.querySelectorAll('.file').length === 1 &&
document.querySelectorAll('.diff-view').length === 0
if (isSingleCodeFile) {
return { element, ...singleFileCodeView }
}
if (element.closest('.discussion-item-body') || element.classList.contains('js-comment-container')) {
// This code view is embedded on a PR conversation page.
return { element, ...diffConversationCodeView }
}
return { element, ...diffCodeView }
},
}
/**
* Returns true if the current page is GitHub Enterprise.
*/
export function checkIsGitHubEnterprise(): boolean {
const ogSiteName = document.head.querySelector<HTMLMetaElement>('meta[property="og:site_name"]')
return (
!!ogSiteName &&
// GitHub Enterprise v2.14.11 has "GitHub" as og:site_name
(ogSiteName.content === 'GitHub Enterprise' || ogSiteName.content === 'GitHub') &&
document.body.classList.contains('enterprise')
)
}
/**
* Returns true if the current page is github.com.
*/
export const checkIsGitHubDotCom = (url = window.location.href): boolean => /^https?:\/\/(www\.)?github\.com/.test(url)
/**
* Returns true if the current page is either github.com or GitHub Enterprise.
*/
export const checkIsGitHub = (): boolean => checkIsGitHubDotCom() || checkIsGitHubEnterprise()
const OPEN_ON_SOURCEGRAPH_ID = 'open-on-sourcegraph'
export const createOpenOnSourcegraphIfNotExists: MountGetter = (container: HTMLElement): HTMLElement | null => {
const pageheadActions = querySelectorOrSelf(container, '.pagehead-actions')
// If ran on page that isn't under a repository namespace.
if (!pageheadActions || pageheadActions.children.length === 0) {
return null
}
// Check for existing
let mount = pageheadActions.querySelector<HTMLElement>('#' + OPEN_ON_SOURCEGRAPH_ID)
if (mount) {
return mount
}
// Create new
mount = document.createElement('li')
mount.id = OPEN_ON_SOURCEGRAPH_ID
pageheadActions.prepend(mount)
return mount
}
const nativeTooltipResolver: ViewResolver<NativeTooltip> = {
selector: '.js-tagsearch-popover',
resolveView: element => ({ element }),
}
const iconClassName = 'icon--github v-align-text-bottom'
const notificationClassNames = {
[NotificationType.Log]: 'flash',
[NotificationType.Success]: 'flash flash-success',
[NotificationType.Info]: 'flash',
[NotificationType.Warning]: 'flash flash-warn',
[NotificationType.Error]: 'flash flash-error',
}
export const githubCodeHost: CodeHost = {
type: 'github',
name: checkIsGitHubEnterprise() ? 'GitHub Enterprise' : 'GitHub',
codeViewResolvers: [genericCodeViewResolver, fileLineContainerResolver, searchResultCodeViewResolver],
contentViewResolvers: [markdownBodyViewResolver],
nativeTooltipResolvers: [nativeTooltipResolver],
getContext: () => {
const repoHeaderHasPrivateMarker =
!!document.querySelector('.repohead .private') ||
!!document.querySelector('h1 .octicon-lock ~ [itemprop="author"] ~ [itemprop="name"]') ||
!!(
document
.querySelector('h1 [itemprop="author"] ~ [itemprop="name"] ~ .Label')
?.textContent?.trim()
.toLowerCase() === 'private'
)
const parsedURL = parseURL()
return {
...parsedURL,
revision:
parsedURL.pageType === 'blob' || parsedURL.pageType === 'tree'
? resolveFileInfo().blob.revision
: undefined,
privateRepository: window.location.hostname !== 'github.com' || repoHeaderHasPrivateMarker,
}
},
isLightTheme: defer(() => {
const mode = document.documentElement.dataset.colorMode as 'auto' | 'light' | 'dark' | undefined
if (mode === 'auto') {
return observeSystemIsLightTheme()
}
return of(mode !== 'dark')
}),
getViewContextOnSourcegraphMount: createOpenOnSourcegraphIfNotExists,
viewOnSourcegraphButtonClassProps: {
className: 'btn btn-sm tooltipped tooltipped-s',
iconClassName,
},
check: checkIsGitHub,
getCommandPaletteMount,
notificationClassNames,
commandPaletteClassProps: {
buttonClassName: 'Header-link',
popoverClassName: 'Box',
formClassName: 'p-1',
inputClassName: 'form-control input-sm header-search-input jump-to-field',
listClassName: 'p-0 m-0 js-navigation-container jump-to-suggestions-results-container',
selectedListItemClassName: 'navigation-focus',
listItemClassName:
'd-flex flex-justify-start flex-items-center p-0 f5 navigation-item js-navigation-item js-jump-to-scoped-search',
actionItemClassName:
'command-palette-action-item--github no-underline d-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
noResultsClassName: 'd-flex flex-auto flex-items-center jump-to-suggestions-path p-2',
iconClassName,
},
codeViewToolbarClassProps: {
className: 'code-view-toolbar--github',
listItemClass: 'code-view-toolbar__item--github BtnGroup',
actionItemClass: 'btn btn-sm tooltipped tooltipped-s BtnGroup-item action-item--github',
actionItemPressedClass: 'selected',
actionItemIconClass: 'icon--github v-align-text-bottom',
},
hoverOverlayClassProps: {
className: 'Box',
actionItemClassName: 'btn btn-secondary',
actionItemPressedClassName: 'active',
closeButtonClassName: 'btn-octicon p-0 hover-overlay__close-button--github',
badgeClassName: 'label hover-overlay__badge--github',
getAlertClassName: createNotificationClassNameGetter(notificationClassNames, 'flash-full'),
iconClassName,
},
setElementTooltip,
linkPreviewContentClass: 'text-small text-gray p-1 mx-1 border rounded-1 bg-gray text-gray-dark',
urlToFile: (sourcegraphURL, target, context) => {
if (target.viewState) {
// A view state means that a panel must be shown, and panels are currently only supported on
// Sourcegraph (not code hosts).
return toAbsoluteBlobURL(sourcegraphURL, target)
}
// Make sure the location is also on this github instance, return an absolute URL otherwise.
const sameCodeHost = target.rawRepoName.startsWith(window.location.hostname)
if (!sameCodeHost) {
return toAbsoluteBlobURL(sourcegraphURL, target)
}
const revision = target.revision || 'HEAD'
// If we're provided options, we can make the j2d URL more specific.
const { rawRepoName } = parseURL() | // Stay on same page in PR if possible.
// TODO to be entirely correct, this would need to compare the revision of the code view with the target revision.
const isSameRepo = rawRepoName === target.rawRepoName
if (isSameRepo && context.part !== undefined) {
const containers = getFileContainers()
for (const container of containers) {
const header = container.querySelector<HTMLElement & { dataset: { path: string; anchor: string } }>(
'.file-header[data-path][data-anchor]'
)
if (!header) {
// E.g. suggestion snippet
continue
}
const anchorPath = header.dataset.path
if (anchorPath === target.filePath) {
const anchorUrl = header.dataset.anchor
const url = new URL(window.location.href)
url.hash = anchorUrl
if (target.position) {
// GitHub uses L for the left side, R for both right side and the unchanged/white parts
url.hash += `${context.part === 'base' ? 'L' : 'R'}${target.position.line}`
}
// Only use URL if it is visible
// TODO: Expand hidden lines to reveal
if (!document.querySelector(url.hash)) {
break
}
return url.href
}
}
}
// Go to blob URL
const fragment = target.position
? `#L${target.position.line}${target.position.character ? `:${target.position.character}` : ''}`
: ''
return `https://${target.rawRepoName}/blob/${revision}/${target.filePath}${fragment}`
},
codeViewsRequireTokenization: true,
} | random_line_split | |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().persistence().time_interval()
}
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn session_cleanup_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
|
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
}
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send + 'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_() != "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
} | // wait for the rest to exit
future::join_all(sidecars).await; | random_line_split |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration |
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn session_cleanup_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
// wait for the rest to exit
future::join_all(sidecars).await;
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
}
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send + 'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_() != "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
}
| {
settings.broker().persistence().time_interval()
} | identifier_body |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().persistence().time_interval()
}
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn | (&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
// wait for the rest to exit
future::join_all(sidecars).await;
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
}
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send + 'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_() != "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
}
| session_cleanup_interval | identifier_name |
edgehub.rs | use std::{
env, fs,
future::Future,
path::{Path, PathBuf},
time::Duration as StdDuration,
};
use anyhow::{bail, Context, Result};
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc};
use futures_util::{
future::{self, Either},
FutureExt,
};
use tracing::{debug, error, info};
use mqtt_bridge::{settings::BridgeSettings, BridgeController};
use mqtt_broker::{
auth::Authorizer,
sidecar::{Sidecar, SidecarShutdownHandle},
Broker, BrokerBuilder, BrokerHandle, BrokerReady, BrokerSnapshot, FilePersistor,
MakeMqttPacketProcessor, Message, Persist, Server, ServerCertificate, SystemEvent,
VersionedFileFormat,
};
use mqtt_edgehub::{
auth::{
EdgeHubAuthenticator, EdgeHubAuthorizer, LocalAuthenticator, LocalAuthorizer,
PolicyAuthorizer,
},
command::{
AuthorizedIdentitiesCommand, BridgeUpdateCommand, CommandHandler, DisconnectCommand,
PolicyUpdateCommand,
},
connection::MakeEdgeHubPacketProcessor,
settings::Settings,
};
use super::{shutdown, Bootstrap};
const DEVICE_ID_ENV: &str = "IOTEDGE_DEVICEID";
const IOTHUB_HOSTNAME_ENV: &str = "IOTEDGE_IOTHUBHOSTNAME";
#[derive(Default)]
pub struct EdgeHubBootstrap {
broker_ready: BrokerReady,
}
#[async_trait]
impl Bootstrap for EdgeHubBootstrap {
type Settings = Settings;
fn load_config<P: AsRef<Path>>(&self, path: P) -> Result<Self::Settings> {
info!("loading settings from a file {}", path.as_ref().display());
Ok(Self::Settings::from_file(path)?)
}
type Authorizer = LocalAuthorizer<EdgeHubAuthorizer<PolicyAuthorizer>>;
async fn make_broker(
&self,
settings: &Self::Settings,
) -> Result<(Broker<Self::Authorizer>, FilePersistor<VersionedFileFormat>)> {
info!("loading state...");
let persistence_config = settings.broker().persistence();
let state_dir = persistence_config.file_path();
fs::create_dir_all(state_dir.clone())?;
let mut persistor = FilePersistor::new(state_dir, VersionedFileFormat::default());
let state = persistor.load().await?;
info!("state loaded.");
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let iothub_id = env::var(IOTHUB_HOSTNAME_ENV).context(IOTHUB_HOSTNAME_ENV)?;
let authorizer = LocalAuthorizer::new(EdgeHubAuthorizer::new(
PolicyAuthorizer::new(device_id.clone(), self.broker_ready.handle()),
device_id,
iothub_id,
self.broker_ready.handle(),
));
let broker = BrokerBuilder::default()
.with_authorizer(authorizer)
.with_state(state.unwrap_or_default())
.with_config(settings.broker().clone())
.build();
Ok((broker, persistor))
}
fn snapshot_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().persistence().time_interval()
}
fn session_expiration(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().expiration()
}
fn session_cleanup_interval(&self, settings: &Self::Settings) -> StdDuration {
settings.broker().session().cleanup_interval()
}
async fn run(
self,
config: Self::Settings,
broker: Broker<Self::Authorizer>,
) -> Result<BrokerSnapshot> {
let broker_handle = broker.handle();
let sidecars = make_sidecars(&broker_handle, &config)?;
info!("starting server...");
let server = make_server(config, broker, self.broker_ready).await?;
let shutdown_signal = shutdown_signal(&server);
let server = tokio::spawn(server.serve(shutdown_signal));
info!("starting sidecars...");
let mut shutdowns = Vec::new();
let mut sidecar_joins = Vec::new();
for sidecar in sidecars {
shutdowns.push(sidecar.shutdown_handle()?);
sidecar_joins.push(tokio::spawn(sidecar.run()));
}
let state = match future::select(server, future::select_all(sidecar_joins)).await {
// server exited first
Either::Left((snapshot, sidecars)) => {
// send shutdown event to each sidecar
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// awaits for at least one to finish
let (_res, _stopped, sidecars) = sidecars.await;
// wait for the rest to exit
future::join_all(sidecars).await;
snapshot??
}
// one of sidecars exited first
Either::Right(((res, stopped, sidecars), server)) => |
};
Ok(state)
}
}
async fn make_server<Z>(
config: Settings,
broker: Broker<Z>,
broker_ready: BrokerReady,
) -> Result<Server<Z, MakeEdgeHubPacketProcessor<MakeMqttPacketProcessor>>>
where
Z: Authorizer + Send + 'static,
{
let broker_handle = broker.handle();
let make_processor = MakeEdgeHubPacketProcessor::new_default(broker_handle.clone());
let mut server = Server::from_broker(broker).with_packet_processor(make_processor);
// Add system transport to allow communication between edgehub components
let authenticator = LocalAuthenticator::new();
server.with_tcp(config.listener().system().addr(), authenticator, None)?;
// Add regular MQTT over TCP transport
let authenticator = EdgeHubAuthenticator::new(config.auth().url());
if let Some(tcp) = config.listener().tcp() {
let broker_ready = Some(broker_ready.signal());
server.with_tcp(tcp.addr(), authenticator.clone(), broker_ready)?;
}
// Add regular MQTT over TLS transport
if let Some(tls) = config.listener().tls() {
let identity = if let Some(config) = tls.certificate() {
info!("loading identity from {}", config.cert_path().display());
ServerCertificate::from_pem(config.cert_path(), config.private_key_path())
.with_context(|| {
ServerCertificateLoadError::File(
config.cert_path().to_path_buf(),
config.private_key_path().to_path_buf(),
)
})?
} else {
info!("downloading identity from edgelet");
download_server_certificate()
.await
.with_context(|| ServerCertificateLoadError::Edgelet)?
};
let broker_ready = Some(broker_ready.signal());
server.with_tls(tls.addr(), identity, authenticator.clone(), broker_ready)?;
};
Ok(server)
}
fn make_sidecars(
broker_handle: &BrokerHandle,
config: &Settings,
) -> Result<Vec<Box<dyn Sidecar + Send>>> {
let mut sidecars: Vec<Box<dyn Sidecar + Send>> = Vec::new();
let system_address = config.listener().system().addr().to_string();
let device_id = env::var(DEVICE_ID_ENV).context(DEVICE_ID_ENV)?;
let settings = BridgeSettings::new()?;
let bridge_controller =
BridgeController::new(system_address.clone(), device_id.to_owned(), settings);
let bridge_controller_handle = bridge_controller.handle();
sidecars.push(Box::new(bridge_controller));
let mut command_handler = CommandHandler::new(system_address, &device_id);
command_handler.add_command(DisconnectCommand::new(&broker_handle));
command_handler.add_command(AuthorizedIdentitiesCommand::new(&broker_handle));
command_handler.add_command(PolicyUpdateCommand::new(broker_handle));
command_handler.add_command(BridgeUpdateCommand::new(bridge_controller_handle));
sidecars.push(Box::new(command_handler));
Ok(sidecars)
}
pub const WORKLOAD_URI: &str = "IOTEDGE_WORKLOADURI";
pub const EDGE_DEVICE_HOST_NAME: &str = "EdgeDeviceHostName";
pub const MODULE_ID: &str = "IOTEDGE_MODULEID";
pub const MODULE_GENERATION_ID: &str = "IOTEDGE_MODULEGENERATIONID";
pub const CERTIFICATE_VALIDITY_DAYS: i64 = 90;
async fn download_server_certificate() -> Result<ServerCertificate> {
let uri = env::var(WORKLOAD_URI).context(WORKLOAD_URI)?;
let hostname = env::var(EDGE_DEVICE_HOST_NAME).context(EDGE_DEVICE_HOST_NAME)?;
let module_id = env::var(MODULE_ID).context(MODULE_ID)?;
let generation_id = env::var(MODULE_GENERATION_ID).context(MODULE_GENERATION_ID)?;
let expiration = Utc::now() + Duration::days(CERTIFICATE_VALIDITY_DAYS);
let client = edgelet_client::workload(&uri)?;
let cert = client
.create_server_cert(&module_id, &generation_id, &hostname, expiration)
.await?;
if cert.private_key().type_() != "key" {
bail!(
"unknown type of private key: {}",
cert.private_key().type_()
);
}
if let Some(private_key) = cert.private_key().bytes() {
let identity = ServerCertificate::from_pem_pair(cert.certificate(), private_key)?;
Ok(identity)
} else {
bail!("missing private key");
}
}
fn shutdown_signal<Z, P>(server: &Server<Z, P>) -> impl Future<Output = ()> {
server
.listeners()
.iter()
.find_map(|listener| listener.transport().identity())
.map_or_else(
|| Either::Left(shutdown::shutdown()),
|identity| {
let system_or_cert_expired = future::select(
Box::pin(server_certificate_renewal(identity.not_after())),
Box::pin(shutdown::shutdown()),
);
Either::Right(system_or_cert_expired.map(drop))
},
)
}
async fn server_certificate_renewal(renew_at: DateTime<Utc>) {
let delay = renew_at - Utc::now();
if delay > Duration::zero() {
info!(
"scheduled server certificate renewal timer for {}",
renew_at
);
let delay = delay.to_std().expect("duration must not be negative");
crate::time::sleep(delay).await;
info!("restarting the broker to perform certificate renewal");
} else {
error!("server certificate expired at {}", renew_at);
}
}
#[derive(Debug, thiserror::Error)]
pub enum ServerCertificateLoadError {
#[error("unable to load server certificate from file {0} and private key {1}")]
File(PathBuf, PathBuf),
#[error("unable to download certificate from edgelet")]
Edgelet,
}
| {
debug!("a sidecar has stopped. shutting down all sidecars...");
if let Err(e) = res {
error!(message = "failed waiting for sidecar shutdown", error = %e);
}
// send shutdown event to each of the rest sidecars
shutdowns.remove(stopped);
let shutdowns = shutdowns.into_iter().map(SidecarShutdownHandle::shutdown);
future::join_all(shutdowns).await;
// wait for the rest to exit
future::join_all(sidecars).await;
// signal server
broker_handle.send(Message::System(SystemEvent::Shutdown))?;
server.await??
} | conditional_block |
utils.ts | import {
IVertex,
Ellipse,
DrawPolygonOption,
DrawLineOption,
DrawCircleOption,
DrawEllipseOption,
DrawRectOption,
LinearGradientOption,
DrawRoundRectConfig,
} from './layer.d';
/**
* 16进制颜色值转为 rgba
* @param hexColor ("#ffffff")
* @param opacity number
*/
export function hex2rgba(hexCo | city) {
const hex = hexColor.replace('#', '');
const r = parseInt(hex.substring(0, 2), 16);
const g = parseInt(hex.substring(2, 4), 16);
const b = parseInt(hex.substring(4, 6), 16);
const result = `rgba(${r}, ${g}, ${b}, ${opacity})`;
return result;
}
/**
*
* @param target {object}
*/
export function isObject(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object object]'
);
}
/**
*
* @param target {string | any}
*/
export function isString(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object string]'
);
}
/**
*
* @param target {object}
* @param options {object}
*/
export function mergeOptions(target, options) {
if (!isObject(target) || !isObject(options)) {
throw new Error('params must be object');
}
Object.keys(options).forEach(key => {
if (options[key] && !target[key]) {
target[key] = options[key];
return;
}
if (isObject(target[key]) && isObject(options[key])) {
mergeOptions(target[key], options[key]);
} else {
const isType =
Object.prototype.toString.call(options[key]).toLowerCase() ===
Object.prototype.toString.call(target[key]).toLowerCase();
if (!isType && target[key] != undefined) {
throw new Error(`params ${key} must be ${typeof target[key]}`);
} else {
target[key] = options[key];
}
}
});
}
/**
*
* @param ctx
* @param vertex
* @param radius
* @param options
*/
export const drawCircle = (
ctx: any,
vertex: IVertex,
radius,
options: DrawCircleOption
) => {
if (!options.hasOwnProperty('dashed') || options.dashed === false) {
ctx.setLineDash([]);
} else {
const _dashedConfig =
options.dashedConfig && options.dashedConfig.length
? options.dashedConfig
: [5, 5, 5];
ctx.setLineDash(_dashedConfig);
}
const { lineColor, weight, opacity, fillColor } = options;
ctx.beginPath();
ctx.arc(vertex.x, vertex.y, radius, 0, 2 * Math.PI, false);
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.fill();
ctx.lineWidth = weight;
ctx.strokeStyle = lineColor;
ctx.stroke();
};
/**
*
* @param ctx
* @param vertexes
* @param drawPolyOption
*/
export const drawPolygon = (
ctx: any,
vertexes: IVertex[],
drawPolyOption: DrawPolygonOption
) => {
const { lineColor, fillColor, weight, opacity } = drawPolyOption;
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
ctx.beginPath();
ctx.moveTo(vertexes[0].x, vertexes[0].y);
for (let i = 1; i < vertexes.length; i++) {
const { x, y } = vertexes[i];
ctx.lineTo(x, y);
}
ctx.lineTo(vertexes[0].x, vertexes[0].y);
ctx.closePath();
ctx.fill();
ctx.stroke();
};
/**
*
* @param ctx
* @param start
* @param end
* @param drawLineOption
*/
export const drawLine = (
ctx: any,
start: IVertex,
end: IVertex,
drawLineOption: DrawLineOption
): any => {
const { color, weight, opacity, strokeStyle, noStrokeStyle } = drawLineOption;
ctx.beginPath();
ctx.moveTo(start.x, start.y);
ctx.lineTo(end.x, end.y);
if (!noStrokeStyle) {
const style = hex2rgba(color, opacity);
ctx.strokeStyle = strokeStyle ? strokeStyle : style;
}
ctx.lineWidth = weight;
ctx.stroke();
return {
start,
end
};
};
export function drawRect(
ctx: any,
rect: [IVertex, IVertex],
drawRectOption: DrawRectOption
) {
const {
lineColor,
fillColor,
weight,
opacity,
dashed,
dashedConfig
} = drawRectOption;
ctx.beginPath();
// 虚线设置
if (dashed) {
const _dashedConfig =
dashedConfig && dashedConfig.length ? dashedConfig : [5, 5, 5];
ctx.setLineDash(_dashedConfig);
} else {
ctx.setLineDash([]);
}
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
if (fillColor) { ctx.fillStyle = hex2rgba(fillColor, opacity) };
const [{ x: startX, y: startY }, { x: endX, y: endY }] = rect;
const width = endX - startX;
const height = endY - startY;
ctx.rect(startX, startY, width, height);
fillColor && ctx.fill()
ctx.stroke();
return rect;
}
export function drawEllipse(
ctx: any,
ellipse: Ellipse,
drawEllipseOption: DrawEllipseOption
) {
const {
vertex: { x, y },
radius: { minorAxis, macroAxis }
} = ellipse;
const {
fillColor,
opacity,
linearFlag,
linearStartPoint,
linearEndPoint,
startRadius,
endRadius,
colorItems
} = drawEllipseOption;
ctx.save();
let grd = null;
if (linearFlag) {
grd = ctx.createLinearGradient(
linearStartPoint.x,
linearStartPoint.y,
linearEndPoint.x,
linearEndPoint.y
);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
} else {
grd = ctx.createRadialGradient(x, y, startRadius, x, y, endRadius);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
}
ctx.fillStyle = grd;
var step = minorAxis > macroAxis ? 1 / minorAxis : 1 / macroAxis;
ctx.beginPath();
ctx.moveTo(x + minorAxis, y);
for (let i = 0; i < 2 * Math.PI; i += step) {
ctx.lineTo(x + minorAxis * Math.cos(i), y + macroAxis * Math.sin(i));
}
ctx.closePath();
ctx.fill();
}
/**
*
* @param ctx
* @param rect
*/
export const clearCanvas = (
ctx: any,
rect: { width: number; height: number }
) => {
const { width, height } = rect;
ctx.clearRect(0, 0, width, height);
};
/**
*
* @param event {mouseEvent}
*/
export function transformVertex(event, zoom = 1): IVertex {
const { offsetX: x, offsetY: y } = event;
return { x: x / zoom, y: y / zoom };
}
export function isInRect(vertex: IVertex, rect: [IVertex, IVertex]): boolean {
if (
vertex.x > rect[0].x &&
vertex.x < rect[1].x &&
vertex.y > rect[0].y &&
vertex.y < rect[1].y
) {
return true;
}
return false;
}
/**
* 在 canvas 上下文新建一个渐变区域
* @param ctx
* @param option
*/
export const createLinearGradient = (
ctx: CanvasRenderingContext2D,
option: LinearGradientOption
) => {
const { scope, colorSteps } = option;
const [start, end] = scope;
const gradient = ctx.createLinearGradient(start.x, start.y, end.x, end.y);
colorSteps.forEach((step, index) => {
gradient.addColorStop(step.distance, step.color);
});
return gradient;
};
/**
* 获取两个点连线的方向
* @param start IVetex
* @param end IVetex
*/
export function getDirection(start: IVertex, end: IVertex) {
let left: boolean = false;
let top: boolean = false;
let right: boolean = false;
let bottom: boolean = false;
if (start.x <= end.x) {
right = true;
} else {
left = true;
}
if (start.y <= end.y) {
bottom = true;
} else {
top = true;
}
return {
left,
top,
right,
bottom
};
}
/**
* @param start IVertex
* @param end IVertex
* 知道两点, 获取两点连线 斜率 和 纵轴交点
*/
export function getSlopeAndB(
start: IVertex,
end: IVertex
): { slope: number; b: number } {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = x1 - x2;
const yDistance = y1 - y2;
const b = y1 - x1 * (yDistance / xDistance);
return {
b,
slope: yDistance / xDistance
};
}
/**
* 按照斜率方程(斜率/纵轴交点) 和 步数以及总步数来 算出下一个点的坐标
* @param start: IVertex,
* @param end: IVertex,
* @param slope: number,
* @param stepIndex: number,
* @param totalStep: number,
* @param b: number
*/
export function getNextVertex(
start: IVertex,
end: IVertex,
slope: number,
stepIndex: number,
totalStep: number,
b: number
): IVertex {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = Math.abs(x1 - x2) * (stepIndex / totalStep);
const direction = getDirection(start, end);
let x = x1 + xDistance;
if (direction.left) {
x = x1 - xDistance;
}
const y = slope * x + b;
return { x, y };
}
export interface ArrowOptions {
strokeStyle: string;
colorFill: [string, string];
}
const defaultArrowOpitons: ArrowOptions = {
strokeStyle: '#68cdfa',
colorFill: ['#68cdfa', '#68cdfa']
};
/* 获取画箭头两端的点 */
export function getArrowPoint(
pixelStart: IVertex,
pixelEnd: IVertex,
length: number = 15
) {
// 绘制箭头的函数
// const length = 12;
const angleValue = Math.PI / 7;
const angle = angleValue; // 箭头和主线的夹角
const r = length; // r/Math.sin(angle)代表箭头长度
let delta = 0; // 主线斜率,垂直时无斜率
let param = 0; // 代码简洁考虑
let pixelTemX = 0;
let pixelTemY = 0; // 临时点坐标
let pixelX = 0;
let pixelY = 0;
let pixelX1 = 0;
let pixelY1 = 0; // 箭头两个点
if (pixelEnd.x - pixelStart.x === 0) {
// 斜率不存在是时
pixelTemX = pixelEnd.x;
if (pixelEnd.y > pixelStart.y) {
pixelTemY = pixelEnd.y - r;
} else {
pixelTemY = pixelEnd.y + r;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX - r * Math.tan(angle);
pixelX1 = pixelTemX + r * Math.tan(angle);
pixelY = pixelY1 = pixelTemY;
} else {
// 斜率存在时
delta = (pixelEnd.y - pixelStart.y) / (pixelEnd.x - pixelStart.x);
param = Math.sqrt(delta * delta + 1);
if (pixelEnd.x - pixelStart.x < 0) {
// 第二、三象限
pixelTemX = pixelEnd.x + r / param;
pixelTemY = pixelEnd.y + (delta * r) / param;
} else {
// 第一、四象限
pixelTemX = pixelEnd.x - r / param;
pixelTemY = pixelEnd.y - (delta * r) / param;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX + (Math.tan(angle) * r * delta) / param;
pixelY = pixelTemY - (Math.tan(angle) * r) / param;
pixelX1 = pixelTemX - (Math.tan(angle) * r * delta) / param;
pixelY1 = pixelTemY + (Math.tan(angle) * r) / param;
}
return {
leftArrowPoint: {
x: pixelX,
y: pixelY
},
rightArrowPoint: {
x: pixelX1,
y: pixelY1
}
};
}
/* 根据计算出来的箭头的点, 画箭头 */
export function drawArrow(
ctx: CanvasRenderingContext2D,
arrowPoints: {
endPoint: IVertex;
leftArrowPoint: IVertex;
rightArrowPoint: IVertex;
},
options: ArrowOptions = defaultArrowOpitons
) {
const { endPoint, leftArrowPoint, rightArrowPoint } = arrowPoints;
// 画第一条箭头线
ctx.beginPath();
ctx.strokeStyle = options.strokeStyle || defaultArrowOpitons.strokeStyle;
ctx.lineWidth = 1;
ctx.moveTo(endPoint.x, endPoint.y);
ctx.lineTo(leftArrowPoint.x, leftArrowPoint.y);
ctx.lineTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.moveTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.lineTo(endPoint.x, endPoint.y);
const grd = ctx.createLinearGradient(0, 0, endPoint.x, 0); // 使用渐变颜色填充,从(0,0)到(200,0) (左到右)
grd.addColorStop(0, options.colorFill[0] || defaultArrowOpitons.colorFill[1]); // 起始颜色
grd.addColorStop(1, options.colorFill[1] || defaultArrowOpitons.colorFill[1]); // 终点颜色
ctx.fillStyle = grd; // 以上面定义的渐变填充
ctx.fill(); // 闭合形状并且以填充方式绘制出来
ctx.stroke();
ctx.closePath();
ctx.restore();
ctx.save();
}
/**
* 绘制圆角矩形
* @param ctx
* @param roundRect: DrawRoundRectConfig 圆角矩形配置结构
* @param options : DrawRectOption 圆角矩形样式
*/
export function drawRoundRect(
ctx:CanvasRenderingContext2D,
roundRect:DrawRoundRectConfig,
options:DrawRectOption
){
// start 为左上角第一个顶点坐标
const {
start,
width,
height,
radius,
} = roundRect;
const {x,y} = start;
ctx.beginPath();
ctx.moveTo(x, y);
// 上边线
ctx.lineTo(x + width, y);
// 右上圆角
ctx.arcTo(x + width + radius, y, x + width + radius, y + radius, radius);
// 右边线
ctx.lineTo(x + width + radius, y + height + radius);
// 右下圆角
ctx.arcTo(x + width + radius, y + height + 2 * radius, x + width, y + height + 2 * radius, radius);
// 下边线
ctx.lineTo(x, y + height + 2 * radius);
// 左下圆角
ctx.arcTo(x - radius, y + height + 2 * radius, x + -radius, y + height + radius, radius);
// 左边线
ctx.lineTo(x - radius, y + radius);
// 左上圆角
ctx.arcTo(x - radius, y, x, y, radius);
ctx.lineTo(x, y);
ctx.closePath();
ctx.fillStyle = options.fillColor;
ctx.lineWidth = options.weight;
ctx.strokeStyle = options.lineColor;
ctx.stroke();
ctx.fill();
}
| lor, opa | identifier_name |
utils.ts | import {
IVertex,
Ellipse,
DrawPolygonOption,
DrawLineOption,
DrawCircleOption,
DrawEllipseOption,
DrawRectOption,
LinearGradientOption,
DrawRoundRectConfig,
} from './layer.d';
/**
* 16进制颜色值转为 rgba
* @param hexColor ("#ffffff")
* @param opacity number
*/
export function hex2rgba(hexColor, opacity) {
const hex = hexColor.replace('#', '');
const r = parseInt(hex.substring(0, 2), 16);
const g = parseInt(hex.substring(2, 4), 16);
const b = parseInt(hex.substring(4, 6), 16);
const result = `rgba(${r}, ${g}, ${b}, ${opacity})`;
return result;
}
/**
*
* @param target {object}
*/
export function isObject(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object object]'
);
}
/**
*
* @param target {string | any}
*/
export function isString(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object string]'
);
}
/**
*
* @param target {object}
* @param options {object}
*/
export function mergeOptions(target, options) {
if (!isObject(target) || !isObject(options)) {
throw new Error('params must be object');
}
Object.keys(options).forEach(key => {
if (options[key] && !target[key]) {
target[key] = options[key];
return;
}
if (isObject(target[key]) && isObject(options[key])) {
mergeOptions(target[key], options[key]);
} else {
const isType =
Object.prototype.toString.call(options[key]).toLowerCase() ===
Object.prototype.toString.call(target[key]).toLowerCase();
if (!isType && target[key] != undefined) {
throw new Error(`params ${key} must be ${typeof target[key]}`);
} else {
target[key] = options[key];
}
}
});
}
/**
*
* @param ctx
* @param vertex
* @param radius
* @param options
*/
export const drawCircle = (
ctx: any,
vertex: IVertex,
radius,
options: DrawCircleOption
) => {
if (!options.hasOwnProperty('dashed') || options.dashed === false) {
ctx.setLineDash([]);
} else {
const _dashedConfig =
options.dashedConfig && options.dashedConfig.length
? options.dashedConfig
: [5, 5, 5];
ctx.setLineDash(_dashedConfig);
}
const { lineColor, weight, opacity, fillColor } = options;
ctx.beginPath();
ctx.arc(vertex.x, vertex.y, radius, 0, 2 * Math.PI, false);
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.fill();
ctx.lineWidth = weight;
ctx.strokeStyle = lineColor;
ctx.stroke();
};
/**
*
* @param ctx
* @param vertexes
* @param drawPolyOption
*/
export const drawPolygon = (
ctx: any,
vertexes: IVertex[],
drawPolyOption: DrawPolygonOption
) => {
const { lineColor, fillColor, weight, opacity } = drawPolyOption;
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
ctx.beginPath();
ctx.moveTo(vertexes[0].x, vertexes[0].y);
for (let i = 1; i < vertexes.length; i++) {
const { x, y } = vertexes[i];
ctx.lineTo(x, y);
}
ctx.lineTo(vertexes[0].x, vertexes[0].y);
ctx.closePath();
ctx.fill();
ctx.stroke();
};
/**
*
* @param ctx
* @param start
* @param end
* @param drawLineOption
*/
export const drawLine = (
ctx: any,
start: IVertex,
end: IVertex,
drawLineOption: DrawLineOption
): any => {
const { color, weight, opacity, strokeStyle, noStrokeStyle } = drawLineOption;
ctx.beginPath();
ctx.moveTo(start.x, start.y);
ctx.lineTo(end.x, end.y);
if (!noStrokeStyle) {
const style = hex2rgba(color, opacity);
ctx.strokeStyle = strokeStyle ? strokeStyle : style;
}
ctx.lineWidth = weight;
ctx.stroke();
return {
start,
end
};
};
export function drawRect(
ctx: any,
rect: [IVertex, IVertex],
drawRectOption: DrawRectOption
) {
const {
lineColor,
fillColor,
weight,
opacity,
dashed,
dashedConfig
} = drawRectOption;
ctx.beginPath();
// 虚线设置
if (dashed) {
const _dashedConfig =
dashedConfig && dashedConfig.length ? dashedConfig : [5, 5, 5];
ctx.setLineDash(_dashedConfig);
} else {
ctx.setLineDash([]);
}
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
if (fillColor) { ctx.fillStyle = hex2rgba(fillColor, opacity) };
const [{ x: startX, y: startY }, { x: endX, y: endY }] = rect;
const width = endX - startX;
const height = endY - startY;
ctx.rect(startX, startY, width, height);
fillColor && ctx.fill()
ctx.stroke();
return rect;
}
export function drawEllipse(
ctx: any,
ellipse: Ellipse,
drawEllipseOption: DrawEllipseOption
) {
const {
vertex: { x, y },
radius: { minorAxis, macroAxis }
} = ellipse;
const {
fillColor,
opacity,
linearFlag,
linearStartPoint,
linearEndPoint,
startRadius,
endRadius,
colorItems
} = drawEllipseOption;
ctx.save();
let grd = null;
if (linearFlag) {
grd = ctx.createLinearGradient(
linearStartPoint.x,
linearStartPoint.y,
linearEndPoint.x,
linearEndPoint.y
);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
} else {
grd = ctx.createRadialGradient(x, y, startRadius, x, y, endRadius);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
}
ctx.fillStyle = grd;
var step = minorAxis > macroAxis ? 1 / minorAxis : 1 / macroAxis;
ctx.beginPath();
ctx.moveTo(x + minorAxis, y);
for (let i = 0; i < 2 * Math.PI; i += step) {
ctx.lineTo(x + minorAxis * Math.cos(i), y + macroAxis * Math.sin(i));
}
ctx.closePath();
ctx.fill();
}
/**
*
* @param ctx
* @param rect
*/
export const clearCanvas = (
ctx: any,
rect: { width: number; height: number }
) => {
const { width, height } = rect;
ctx.clearRect(0, 0, width, height);
};
/**
*
* @param event {mouseEvent}
*/
export function transformVertex(event, zoom = 1): IVertex {
const { offsetX: x, offsetY: y } = event;
return { x: x / zoom, y: y / zoom };
}
export function isInRect(vertex: IVertex, rect: [IVertex, IVertex]): boolean {
if (
vertex.x > rect[0].x &&
vertex.x < rect[1].x &&
vertex.y > rect[0].y &&
vertex.y < rect[1].y
) {
return true;
}
return false;
}
/**
* 在 canvas 上下文新建一个渐变区域
* @param ctx
* @param option
*/
export const createLinearGradient = (
ctx: CanvasRenderingContext2D,
option: LinearGradientOption
) => {
const { scope, colorSteps } = option;
const [start, end] = scope;
const gradient = ctx.createLinearGradient(start.x, start.y, end.x, end.y);
colorSteps.forEach((step, index) => {
gradient.addColorStop(step.distance, step.color);
});
return gradient;
};
/**
* 获取两个点连线的方向
* @param start IVetex
* @param end IVetex | export function getDirection(start: IVertex, end: IVertex) {
let left: boolean = false;
let top: boolean = false;
let right: boolean = false;
let bottom: boolean = false;
if (start.x <= end.x) {
right = true;
} else {
left = true;
}
if (start.y <= end.y) {
bottom = true;
} else {
top = true;
}
return {
left,
top,
right,
bottom
};
}
/**
* @param start IVertex
* @param end IVertex
* 知道两点, 获取两点连线 斜率 和 纵轴交点
*/
export function getSlopeAndB(
start: IVertex,
end: IVertex
): { slope: number; b: number } {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = x1 - x2;
const yDistance = y1 - y2;
const b = y1 - x1 * (yDistance / xDistance);
return {
b,
slope: yDistance / xDistance
};
}
/**
* 按照斜率方程(斜率/纵轴交点) 和 步数以及总步数来 算出下一个点的坐标
* @param start: IVertex,
* @param end: IVertex,
* @param slope: number,
* @param stepIndex: number,
* @param totalStep: number,
* @param b: number
*/
export function getNextVertex(
start: IVertex,
end: IVertex,
slope: number,
stepIndex: number,
totalStep: number,
b: number
): IVertex {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = Math.abs(x1 - x2) * (stepIndex / totalStep);
const direction = getDirection(start, end);
let x = x1 + xDistance;
if (direction.left) {
x = x1 - xDistance;
}
const y = slope * x + b;
return { x, y };
}
export interface ArrowOptions {
strokeStyle: string;
colorFill: [string, string];
}
const defaultArrowOpitons: ArrowOptions = {
strokeStyle: '#68cdfa',
colorFill: ['#68cdfa', '#68cdfa']
};
/* 获取画箭头两端的点 */
export function getArrowPoint(
pixelStart: IVertex,
pixelEnd: IVertex,
length: number = 15
) {
// 绘制箭头的函数
// const length = 12;
const angleValue = Math.PI / 7;
const angle = angleValue; // 箭头和主线的夹角
const r = length; // r/Math.sin(angle)代表箭头长度
let delta = 0; // 主线斜率,垂直时无斜率
let param = 0; // 代码简洁考虑
let pixelTemX = 0;
let pixelTemY = 0; // 临时点坐标
let pixelX = 0;
let pixelY = 0;
let pixelX1 = 0;
let pixelY1 = 0; // 箭头两个点
if (pixelEnd.x - pixelStart.x === 0) {
// 斜率不存在是时
pixelTemX = pixelEnd.x;
if (pixelEnd.y > pixelStart.y) {
pixelTemY = pixelEnd.y - r;
} else {
pixelTemY = pixelEnd.y + r;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX - r * Math.tan(angle);
pixelX1 = pixelTemX + r * Math.tan(angle);
pixelY = pixelY1 = pixelTemY;
} else {
// 斜率存在时
delta = (pixelEnd.y - pixelStart.y) / (pixelEnd.x - pixelStart.x);
param = Math.sqrt(delta * delta + 1);
if (pixelEnd.x - pixelStart.x < 0) {
// 第二、三象限
pixelTemX = pixelEnd.x + r / param;
pixelTemY = pixelEnd.y + (delta * r) / param;
} else {
// 第一、四象限
pixelTemX = pixelEnd.x - r / param;
pixelTemY = pixelEnd.y - (delta * r) / param;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX + (Math.tan(angle) * r * delta) / param;
pixelY = pixelTemY - (Math.tan(angle) * r) / param;
pixelX1 = pixelTemX - (Math.tan(angle) * r * delta) / param;
pixelY1 = pixelTemY + (Math.tan(angle) * r) / param;
}
return {
leftArrowPoint: {
x: pixelX,
y: pixelY
},
rightArrowPoint: {
x: pixelX1,
y: pixelY1
}
};
}
/* 根据计算出来的箭头的点, 画箭头 */
export function drawArrow(
ctx: CanvasRenderingContext2D,
arrowPoints: {
endPoint: IVertex;
leftArrowPoint: IVertex;
rightArrowPoint: IVertex;
},
options: ArrowOptions = defaultArrowOpitons
) {
const { endPoint, leftArrowPoint, rightArrowPoint } = arrowPoints;
// 画第一条箭头线
ctx.beginPath();
ctx.strokeStyle = options.strokeStyle || defaultArrowOpitons.strokeStyle;
ctx.lineWidth = 1;
ctx.moveTo(endPoint.x, endPoint.y);
ctx.lineTo(leftArrowPoint.x, leftArrowPoint.y);
ctx.lineTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.moveTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.lineTo(endPoint.x, endPoint.y);
const grd = ctx.createLinearGradient(0, 0, endPoint.x, 0); // 使用渐变颜色填充,从(0,0)到(200,0) (左到右)
grd.addColorStop(0, options.colorFill[0] || defaultArrowOpitons.colorFill[1]); // 起始颜色
grd.addColorStop(1, options.colorFill[1] || defaultArrowOpitons.colorFill[1]); // 终点颜色
ctx.fillStyle = grd; // 以上面定义的渐变填充
ctx.fill(); // 闭合形状并且以填充方式绘制出来
ctx.stroke();
ctx.closePath();
ctx.restore();
ctx.save();
}
/**
* 绘制圆角矩形
* @param ctx
* @param roundRect: DrawRoundRectConfig 圆角矩形配置结构
* @param options : DrawRectOption 圆角矩形样式
*/
export function drawRoundRect(
ctx:CanvasRenderingContext2D,
roundRect:DrawRoundRectConfig,
options:DrawRectOption
){
// start 为左上角第一个顶点坐标
const {
start,
width,
height,
radius,
} = roundRect;
const {x,y} = start;
ctx.beginPath();
ctx.moveTo(x, y);
// 上边线
ctx.lineTo(x + width, y);
// 右上圆角
ctx.arcTo(x + width + radius, y, x + width + radius, y + radius, radius);
// 右边线
ctx.lineTo(x + width + radius, y + height + radius);
// 右下圆角
ctx.arcTo(x + width + radius, y + height + 2 * radius, x + width, y + height + 2 * radius, radius);
// 下边线
ctx.lineTo(x, y + height + 2 * radius);
// 左下圆角
ctx.arcTo(x - radius, y + height + 2 * radius, x + -radius, y + height + radius, radius);
// 左边线
ctx.lineTo(x - radius, y + radius);
// 左上圆角
ctx.arcTo(x - radius, y, x, y, radius);
ctx.lineTo(x, y);
ctx.closePath();
ctx.fillStyle = options.fillColor;
ctx.lineWidth = options.weight;
ctx.strokeStyle = options.lineColor;
ctx.stroke();
ctx.fill();
} | */ | random_line_split |
utils.ts | import {
IVertex,
Ellipse,
DrawPolygonOption,
DrawLineOption,
DrawCircleOption,
DrawEllipseOption,
DrawRectOption,
LinearGradientOption,
DrawRoundRectConfig,
} from './layer.d';
/**
* 16进制颜色值转为 rgba
* @param hexColor ("#ffffff")
* @param opacity number
*/
export function hex2rgba(hexColor, opacity) {
const hex = hexColor.replace('#', '');
const r = parseInt(hex.substring(0, 2), 16);
const g = parseInt(hex.substring(2, 4), 16);
const b = parseInt(hex.substring(4, 6), 16);
const result = `rgba(${r}, ${g}, ${b}, ${opacity})`;
return result;
}
/**
*
* @param target {object}
*/
export function isObject(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object object]'
);
}
/**
*
* @param target {string | any}
*/
export function isString(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object string]'
);
}
/**
*
* @param target {object}
* @param options {object}
*/
export function mergeOptions(target, options) {
if (!isObject(target) || !isObject(options)) {
throw new Error('params must be object');
}
Object.keys(options).forEach(key => {
if (options[key] && !target[key]) {
target[key] = options[key];
return;
}
if (isObject(target[key]) && isObject(options[key])) {
mergeOptions(target[key], options[key]);
} else {
const isType =
Object.prototype.toString.call(options[key]).toLowerCase() ===
Object.prototype.toString.call(target[key]).toLowerCase();
if (!isType && target[key] != undefined) {
throw new Error(`params ${key} must be ${typeof target[key]}`);
} else {
targ |
/**
*
* @param ctx
* @param vertex
* @param radius
* @param options
*/
export const drawCircle = (
ctx: any,
vertex: IVertex,
radius,
options: DrawCircleOption
) => {
if (!options.hasOwnProperty('dashed') || options.dashed === false) {
ctx.setLineDash([]);
} else {
const _dashedConfig =
options.dashedConfig && options.dashedConfig.length
? options.dashedConfig
: [5, 5, 5];
ctx.setLineDash(_dashedConfig);
}
const { lineColor, weight, opacity, fillColor } = options;
ctx.beginPath();
ctx.arc(vertex.x, vertex.y, radius, 0, 2 * Math.PI, false);
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.fill();
ctx.lineWidth = weight;
ctx.strokeStyle = lineColor;
ctx.stroke();
};
/**
*
* @param ctx
* @param vertexes
* @param drawPolyOption
*/
export const drawPolygon = (
ctx: any,
vertexes: IVertex[],
drawPolyOption: DrawPolygonOption
) => {
const { lineColor, fillColor, weight, opacity } = drawPolyOption;
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
ctx.beginPath();
ctx.moveTo(vertexes[0].x, vertexes[0].y);
for (let i = 1; i < vertexes.length; i++) {
const { x, y } = vertexes[i];
ctx.lineTo(x, y);
}
ctx.lineTo(vertexes[0].x, vertexes[0].y);
ctx.closePath();
ctx.fill();
ctx.stroke();
};
/**
*
* @param ctx
* @param start
* @param end
* @param drawLineOption
*/
export const drawLine = (
ctx: any,
start: IVertex,
end: IVertex,
drawLineOption: DrawLineOption
): any => {
const { color, weight, opacity, strokeStyle, noStrokeStyle } = drawLineOption;
ctx.beginPath();
ctx.moveTo(start.x, start.y);
ctx.lineTo(end.x, end.y);
if (!noStrokeStyle) {
const style = hex2rgba(color, opacity);
ctx.strokeStyle = strokeStyle ? strokeStyle : style;
}
ctx.lineWidth = weight;
ctx.stroke();
return {
start,
end
};
};
export function drawRect(
ctx: any,
rect: [IVertex, IVertex],
drawRectOption: DrawRectOption
) {
const {
lineColor,
fillColor,
weight,
opacity,
dashed,
dashedConfig
} = drawRectOption;
ctx.beginPath();
// 虚线设置
if (dashed) {
const _dashedConfig =
dashedConfig && dashedConfig.length ? dashedConfig : [5, 5, 5];
ctx.setLineDash(_dashedConfig);
} else {
ctx.setLineDash([]);
}
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
if (fillColor) { ctx.fillStyle = hex2rgba(fillColor, opacity) };
const [{ x: startX, y: startY }, { x: endX, y: endY }] = rect;
const width = endX - startX;
const height = endY - startY;
ctx.rect(startX, startY, width, height);
fillColor && ctx.fill()
ctx.stroke();
return rect;
}
export function drawEllipse(
ctx: any,
ellipse: Ellipse,
drawEllipseOption: DrawEllipseOption
) {
const {
vertex: { x, y },
radius: { minorAxis, macroAxis }
} = ellipse;
const {
fillColor,
opacity,
linearFlag,
linearStartPoint,
linearEndPoint,
startRadius,
endRadius,
colorItems
} = drawEllipseOption;
ctx.save();
let grd = null;
if (linearFlag) {
grd = ctx.createLinearGradient(
linearStartPoint.x,
linearStartPoint.y,
linearEndPoint.x,
linearEndPoint.y
);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
} else {
grd = ctx.createRadialGradient(x, y, startRadius, x, y, endRadius);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
}
ctx.fillStyle = grd;
var step = minorAxis > macroAxis ? 1 / minorAxis : 1 / macroAxis;
ctx.beginPath();
ctx.moveTo(x + minorAxis, y);
for (let i = 0; i < 2 * Math.PI; i += step) {
ctx.lineTo(x + minorAxis * Math.cos(i), y + macroAxis * Math.sin(i));
}
ctx.closePath();
ctx.fill();
}
/**
*
* @param ctx
* @param rect
*/
export const clearCanvas = (
ctx: any,
rect: { width: number; height: number }
) => {
const { width, height } = rect;
ctx.clearRect(0, 0, width, height);
};
/**
*
* @param event {mouseEvent}
*/
export function transformVertex(event, zoom = 1): IVertex {
const { offsetX: x, offsetY: y } = event;
return { x: x / zoom, y: y / zoom };
}
export function isInRect(vertex: IVertex, rect: [IVertex, IVertex]): boolean {
if (
vertex.x > rect[0].x &&
vertex.x < rect[1].x &&
vertex.y > rect[0].y &&
vertex.y < rect[1].y
) {
return true;
}
return false;
}
/**
* 在 canvas 上下文新建一个渐变区域
* @param ctx
* @param option
*/
export const createLinearGradient = (
ctx: CanvasRenderingContext2D,
option: LinearGradientOption
) => {
const { scope, colorSteps } = option;
const [start, end] = scope;
const gradient = ctx.createLinearGradient(start.x, start.y, end.x, end.y);
colorSteps.forEach((step, index) => {
gradient.addColorStop(step.distance, step.color);
});
return gradient;
};
/**
* 获取两个点连线的方向
* @param start IVetex
* @param end IVetex
*/
export function getDirection(start: IVertex, end: IVertex) {
let left: boolean = false;
let top: boolean = false;
let right: boolean = false;
let bottom: boolean = false;
if (start.x <= end.x) {
right = true;
} else {
left = true;
}
if (start.y <= end.y) {
bottom = true;
} else {
top = true;
}
return {
left,
top,
right,
bottom
};
}
/**
* @param start IVertex
* @param end IVertex
* 知道两点, 获取两点连线 斜率 和 纵轴交点
*/
export function getSlopeAndB(
start: IVertex,
end: IVertex
): { slope: number; b: number } {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = x1 - x2;
const yDistance = y1 - y2;
const b = y1 - x1 * (yDistance / xDistance);
return {
b,
slope: yDistance / xDistance
};
}
/**
* 按照斜率方程(斜率/纵轴交点) 和 步数以及总步数来 算出下一个点的坐标
* @param start: IVertex,
* @param end: IVertex,
* @param slope: number,
* @param stepIndex: number,
* @param totalStep: number,
* @param b: number
*/
export function getNextVertex(
start: IVertex,
end: IVertex,
slope: number,
stepIndex: number,
totalStep: number,
b: number
): IVertex {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = Math.abs(x1 - x2) * (stepIndex / totalStep);
const direction = getDirection(start, end);
let x = x1 + xDistance;
if (direction.left) {
x = x1 - xDistance;
}
const y = slope * x + b;
return { x, y };
}
export interface ArrowOptions {
strokeStyle: string;
colorFill: [string, string];
}
const defaultArrowOpitons: ArrowOptions = {
strokeStyle: '#68cdfa',
colorFill: ['#68cdfa', '#68cdfa']
};
/* 获取画箭头两端的点 */
export function getArrowPoint(
pixelStart: IVertex,
pixelEnd: IVertex,
length: number = 15
) {
// 绘制箭头的函数
// const length = 12;
const angleValue = Math.PI / 7;
const angle = angleValue; // 箭头和主线的夹角
const r = length; // r/Math.sin(angle)代表箭头长度
let delta = 0; // 主线斜率,垂直时无斜率
let param = 0; // 代码简洁考虑
let pixelTemX = 0;
let pixelTemY = 0; // 临时点坐标
let pixelX = 0;
let pixelY = 0;
let pixelX1 = 0;
let pixelY1 = 0; // 箭头两个点
if (pixelEnd.x - pixelStart.x === 0) {
// 斜率不存在是时
pixelTemX = pixelEnd.x;
if (pixelEnd.y > pixelStart.y) {
pixelTemY = pixelEnd.y - r;
} else {
pixelTemY = pixelEnd.y + r;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX - r * Math.tan(angle);
pixelX1 = pixelTemX + r * Math.tan(angle);
pixelY = pixelY1 = pixelTemY;
} else {
// 斜率存在时
delta = (pixelEnd.y - pixelStart.y) / (pixelEnd.x - pixelStart.x);
param = Math.sqrt(delta * delta + 1);
if (pixelEnd.x - pixelStart.x < 0) {
// 第二、三象限
pixelTemX = pixelEnd.x + r / param;
pixelTemY = pixelEnd.y + (delta * r) / param;
} else {
// 第一、四象限
pixelTemX = pixelEnd.x - r / param;
pixelTemY = pixelEnd.y - (delta * r) / param;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX + (Math.tan(angle) * r * delta) / param;
pixelY = pixelTemY - (Math.tan(angle) * r) / param;
pixelX1 = pixelTemX - (Math.tan(angle) * r * delta) / param;
pixelY1 = pixelTemY + (Math.tan(angle) * r) / param;
}
return {
leftArrowPoint: {
x: pixelX,
y: pixelY
},
rightArrowPoint: {
x: pixelX1,
y: pixelY1
}
};
}
/* 根据计算出来的箭头的点, 画箭头 */
export function drawArrow(
ctx: CanvasRenderingContext2D,
arrowPoints: {
endPoint: IVertex;
leftArrowPoint: IVertex;
rightArrowPoint: IVertex;
},
options: ArrowOptions = defaultArrowOpitons
) {
const { endPoint, leftArrowPoint, rightArrowPoint } = arrowPoints;
// 画第一条箭头线
ctx.beginPath();
ctx.strokeStyle = options.strokeStyle || defaultArrowOpitons.strokeStyle;
ctx.lineWidth = 1;
ctx.moveTo(endPoint.x, endPoint.y);
ctx.lineTo(leftArrowPoint.x, leftArrowPoint.y);
ctx.lineTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.moveTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.lineTo(endPoint.x, endPoint.y);
const grd = ctx.createLinearGradient(0, 0, endPoint.x, 0); // 使用渐变颜色填充,从(0,0)到(200,0) (左到右)
grd.addColorStop(0, options.colorFill[0] || defaultArrowOpitons.colorFill[1]); // 起始颜色
grd.addColorStop(1, options.colorFill[1] || defaultArrowOpitons.colorFill[1]); // 终点颜色
ctx.fillStyle = grd; // 以上面定义的渐变填充
ctx.fill(); // 闭合形状并且以填充方式绘制出来
ctx.stroke();
ctx.closePath();
ctx.restore();
ctx.save();
}
/**
* 绘制圆角矩形
* @param ctx
* @param roundRect: DrawRoundRectConfig 圆角矩形配置结构
* @param options : DrawRectOption 圆角矩形样式
*/
export function drawRoundRect(
ctx:CanvasRenderingContext2D,
roundRect:DrawRoundRectConfig,
options:DrawRectOption
){
// start 为左上角第一个顶点坐标
const {
start,
width,
height,
radius,
} = roundRect;
const {x,y} = start;
ctx.beginPath();
ctx.moveTo(x, y);
// 上边线
ctx.lineTo(x + width, y);
// 右上圆角
ctx.arcTo(x + width + radius, y, x + width + radius, y + radius, radius);
// 右边线
ctx.lineTo(x + width + radius, y + height + radius);
// 右下圆角
ctx.arcTo(x + width + radius, y + height + 2 * radius, x + width, y + height + 2 * radius, radius);
// 下边线
ctx.lineTo(x, y + height + 2 * radius);
// 左下圆角
ctx.arcTo(x - radius, y + height + 2 * radius, x + -radius, y + height + radius, radius);
// 左边线
ctx.lineTo(x - radius, y + radius);
// 左上圆角
ctx.arcTo(x - radius, y, x, y, radius);
ctx.lineTo(x, y);
ctx.closePath();
ctx.fillStyle = options.fillColor;
ctx.lineWidth = options.weight;
ctx.strokeStyle = options.lineColor;
ctx.stroke();
ctx.fill();
}
| et[key] = options[key];
}
}
});
} | conditional_block |
utils.ts | import {
IVertex,
Ellipse,
DrawPolygonOption,
DrawLineOption,
DrawCircleOption,
DrawEllipseOption,
DrawRectOption,
LinearGradientOption,
DrawRoundRectConfig,
} from './layer.d';
/**
* 16进制颜色值转为 rgba
* @param hexColor ("#ffffff")
* @param opacity number
*/
export function hex2rgba(hexColor, opacity) {
const hex = hexColor.replace('#', '');
const r = parseInt(hex.substring(0, 2), 16);
const g = parseInt(hex.substring(2, 4), 16);
const b = parseInt(hex.substring(4, 6), 16);
const result = `rgba(${r}, ${g}, ${b}, ${opacity})`;
return result;
}
/**
*
* @param target {object}
*/
export function isObject(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object object]'
);
}
/**
*
* @param target {string | any}
*/
export function isString(target) {
return (
Object.prototype.toString.call(target).toLowerCase() === '[object string]'
);
}
/**
*
* @param target {object}
* @param options {object}
*/
export function mergeOptions(target, options) {
if (!isObject(target) || !isObject(options)) {
throw new Error('params must be object');
}
Object.keys(options).forEach(key => {
if (options[key] && !target[key]) {
target[key] = options[key];
return;
}
if (isObject(target[key]) && isObject(options[key])) {
mergeOptions(target[key], options[key]);
} else {
const isType =
Object.prototype.toString.call(options[key]).toLowerCase() ===
Object.prototype.toString.call(target[key]).toLowerCase();
if (!isType && target[key] != undefined) {
throw new Error(`params ${key} must be ${typeof target[key]}`);
} else {
target[key] = options[key];
}
}
});
}
/**
*
* @param ctx
* @param vertex
* @param radius
* @param options
*/
export const drawCircle = (
ctx: any,
vertex: IVertex,
radius,
options: DrawCircleOption
) => {
if (!options.hasOwnProperty('dashed') || options.dashed === false) {
ctx.setLineDash([]);
} else {
const _dashedConfig =
options.dashedConfig && options.dashedConfig.length
? options.dashedConfig
: [5, 5, 5];
ctx.setLineDash(_dashedConfig);
}
const { lineColor, weight, opacity, fillColor } = options;
ctx.beginPath();
ctx.arc(vertex.x, vertex.y, radius, 0, 2 * Math.PI, false);
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.fill();
ctx.lineWidth = weight;
ctx.strokeStyle = lineColor;
ctx.stroke();
};
/**
*
* @param ctx
* @param vertexes
* @param drawPolyOption
*/
export const drawPolygon = (
ctx: any,
vertexes: IVertex[],
drawPolyOption: DrawPolygonOption
) => {
const { lineColor, fillColor, weight, opacity } = drawPolyOption;
ctx.fillStyle = hex2rgba(fillColor, opacity);
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
ctx.beginPath();
ctx.moveTo(vertexes[0].x, vertexes[0].y);
for (let i = 1; i < vertexes.length; i++) {
const { x, y } = vertexes[i];
ctx.lineTo(x, y);
}
ctx.lineTo(vertexes[0].x, vertexes[0].y);
ctx.closePath();
ctx.fill();
ctx.stroke();
};
/**
*
* @param ctx
* @param start
* @param end
* @param drawLineOption
*/
export const drawLine = (
ctx: any,
start: IVertex,
end: IVertex,
drawLineOption: DrawLineOption
): any => {
const { color, weight, opacity, strokeStyle, noStrokeStyle } = drawLineOption;
ctx.beginPath();
ctx.moveTo(start.x, start.y);
ctx.lineTo(end.x, end.y);
if (!noStrokeStyle) {
const style = hex2rgba(color, opacity);
ctx.strokeStyle = strokeStyle ? strokeStyle : style;
}
ctx.lineWidth = weight;
ctx.stroke();
return {
start,
end
};
};
export function drawRect(
ctx: any,
rect: [IVertex, IVertex],
drawRectOption: DrawRectOption
) {
const {
lineColor,
fillColor,
weight,
opacity,
dashed,
dashedConfig
} = drawRectOption;
ctx.beginPath();
// 虚线设置
if (dashed) {
const _dashedConfig =
dashedConfig && dashedConfig.length ? dashedConfig : [5, 5, 5];
ctx.setLineDash(_dashedConfig);
} else {
ctx.setLineDash([]);
}
ctx.lineWidth = weight;
ctx.strokeStyle = hex2rgba(lineColor, opacity);
if (fillColor) { ctx.fillStyle = hex2rgba(fillColor, opacity) };
const [{ x: startX, y: startY }, { x: endX, y: endY }] = rect;
const width = endX - startX;
const height = endY - startY;
ctx.rect(startX, startY, width, height);
fillColor && ctx.fill()
ctx.stroke();
return rect;
}
export function drawEllipse(
ctx: any,
ellipse: Ellipse,
drawEllipseOption: DrawEllipseOption
) {
const {
vertex: { x, y },
radius: { minorAxis, macroAxis }
} = ellipse;
const {
fillColor,
opacity,
linearFlag,
linearStartPoint,
linearEndPoint,
startRadius,
endRadius,
colorItems
} = drawEllipseOption;
ctx.save();
let grd = null;
if (linearFlag) {
grd = ctx.createLinearGradient(
linearStartPoint.x,
linearStartPoint.y,
linearEndPoint.x,
linearEndPoint.y
);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
} else {
grd = ctx.createRadialGradient(x, y, startRadius, x, y, endRadius);
colorItems.forEach(item => {
grd.addColorStop(item.position, item.color);
});
}
ctx.fillStyle = grd;
var step = minorAxis > macroAxis ? 1 / minorAxis : 1 / macroAxis;
ctx.beginPath();
ctx.moveTo(x + minorAxis, y);
for (let i = 0; i < 2 * Math.PI; i += step) {
ctx.lineTo(x + minorAxis * Math.cos(i), y + macroAxis * Math.sin(i));
}
ctx.closePath();
ctx.fill();
}
/**
*
* @param ctx
* @param rect
*/
export const clearCanvas = (
ctx: any,
rect: { width: number; height: number }
) => {
const { width, height } = rect;
ctx.clearRect(0, 0, width, height);
};
/**
*
* @param event {mouseEvent}
*/
export function transformVertex(event, zoom = 1): IVertex {
const { offsetX: x | Rect(vertex: IVertex, rect: [IVertex, IVertex]): boolean {
if (
vertex.x > rect[0].x &&
vertex.x < rect[1].x &&
vertex.y > rect[0].y &&
vertex.y < rect[1].y
) {
return true;
}
return false;
}
/**
* 在 canvas 上下文新建一个渐变区域
* @param ctx
* @param option
*/
export const createLinearGradient = (
ctx: CanvasRenderingContext2D,
option: LinearGradientOption
) => {
const { scope, colorSteps } = option;
const [start, end] = scope;
const gradient = ctx.createLinearGradient(start.x, start.y, end.x, end.y);
colorSteps.forEach((step, index) => {
gradient.addColorStop(step.distance, step.color);
});
return gradient;
};
/**
* 获取两个点连线的方向
* @param start IVetex
* @param end IVetex
*/
export function getDirection(start: IVertex, end: IVertex) {
let left: boolean = false;
let top: boolean = false;
let right: boolean = false;
let bottom: boolean = false;
if (start.x <= end.x) {
right = true;
} else {
left = true;
}
if (start.y <= end.y) {
bottom = true;
} else {
top = true;
}
return {
left,
top,
right,
bottom
};
}
/**
* @param start IVertex
* @param end IVertex
* 知道两点, 获取两点连线 斜率 和 纵轴交点
*/
export function getSlopeAndB(
start: IVertex,
end: IVertex
): { slope: number; b: number } {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = x1 - x2;
const yDistance = y1 - y2;
const b = y1 - x1 * (yDistance / xDistance);
return {
b,
slope: yDistance / xDistance
};
}
/**
* 按照斜率方程(斜率/纵轴交点) 和 步数以及总步数来 算出下一个点的坐标
* @param start: IVertex,
* @param end: IVertex,
* @param slope: number,
* @param stepIndex: number,
* @param totalStep: number,
* @param b: number
*/
export function getNextVertex(
start: IVertex,
end: IVertex,
slope: number,
stepIndex: number,
totalStep: number,
b: number
): IVertex {
const { y: y1, x: x1 } = start;
const { y: y2, x: x2 } = end;
const xDistance = Math.abs(x1 - x2) * (stepIndex / totalStep);
const direction = getDirection(start, end);
let x = x1 + xDistance;
if (direction.left) {
x = x1 - xDistance;
}
const y = slope * x + b;
return { x, y };
}
export interface ArrowOptions {
strokeStyle: string;
colorFill: [string, string];
}
const defaultArrowOpitons: ArrowOptions = {
strokeStyle: '#68cdfa',
colorFill: ['#68cdfa', '#68cdfa']
};
/* 获取画箭头两端的点 */
export function getArrowPoint(
pixelStart: IVertex,
pixelEnd: IVertex,
length: number = 15
) {
// 绘制箭头的函数
// const length = 12;
const angleValue = Math.PI / 7;
const angle = angleValue; // 箭头和主线的夹角
const r = length; // r/Math.sin(angle)代表箭头长度
let delta = 0; // 主线斜率,垂直时无斜率
let param = 0; // 代码简洁考虑
let pixelTemX = 0;
let pixelTemY = 0; // 临时点坐标
let pixelX = 0;
let pixelY = 0;
let pixelX1 = 0;
let pixelY1 = 0; // 箭头两个点
if (pixelEnd.x - pixelStart.x === 0) {
// 斜率不存在是时
pixelTemX = pixelEnd.x;
if (pixelEnd.y > pixelStart.y) {
pixelTemY = pixelEnd.y - r;
} else {
pixelTemY = pixelEnd.y + r;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX - r * Math.tan(angle);
pixelX1 = pixelTemX + r * Math.tan(angle);
pixelY = pixelY1 = pixelTemY;
} else {
// 斜率存在时
delta = (pixelEnd.y - pixelStart.y) / (pixelEnd.x - pixelStart.x);
param = Math.sqrt(delta * delta + 1);
if (pixelEnd.x - pixelStart.x < 0) {
// 第二、三象限
pixelTemX = pixelEnd.x + r / param;
pixelTemY = pixelEnd.y + (delta * r) / param;
} else {
// 第一、四象限
pixelTemX = pixelEnd.x - r / param;
pixelTemY = pixelEnd.y - (delta * r) / param;
}
// 已知直角三角形两个点坐标及其中一个角,求另外一个点坐标算法
pixelX = pixelTemX + (Math.tan(angle) * r * delta) / param;
pixelY = pixelTemY - (Math.tan(angle) * r) / param;
pixelX1 = pixelTemX - (Math.tan(angle) * r * delta) / param;
pixelY1 = pixelTemY + (Math.tan(angle) * r) / param;
}
return {
leftArrowPoint: {
x: pixelX,
y: pixelY
},
rightArrowPoint: {
x: pixelX1,
y: pixelY1
}
};
}
/* 根据计算出来的箭头的点, 画箭头 */
export function drawArrow(
ctx: CanvasRenderingContext2D,
arrowPoints: {
endPoint: IVertex;
leftArrowPoint: IVertex;
rightArrowPoint: IVertex;
},
options: ArrowOptions = defaultArrowOpitons
) {
const { endPoint, leftArrowPoint, rightArrowPoint } = arrowPoints;
// 画第一条箭头线
ctx.beginPath();
ctx.strokeStyle = options.strokeStyle || defaultArrowOpitons.strokeStyle;
ctx.lineWidth = 1;
ctx.moveTo(endPoint.x, endPoint.y);
ctx.lineTo(leftArrowPoint.x, leftArrowPoint.y);
ctx.lineTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.moveTo(rightArrowPoint.x, rightArrowPoint.y);
ctx.lineTo(endPoint.x, endPoint.y);
const grd = ctx.createLinearGradient(0, 0, endPoint.x, 0); // 使用渐变颜色填充,从(0,0)到(200,0) (左到右)
grd.addColorStop(0, options.colorFill[0] || defaultArrowOpitons.colorFill[1]); // 起始颜色
grd.addColorStop(1, options.colorFill[1] || defaultArrowOpitons.colorFill[1]); // 终点颜色
ctx.fillStyle = grd; // 以上面定义的渐变填充
ctx.fill(); // 闭合形状并且以填充方式绘制出来
ctx.stroke();
ctx.closePath();
ctx.restore();
ctx.save();
}
/**
* 绘制圆角矩形
* @param ctx
* @param roundRect: DrawRoundRectConfig 圆角矩形配置结构
* @param options : DrawRectOption 圆角矩形样式
*/
export function drawRoundRect(
ctx:CanvasRenderingContext2D,
roundRect:DrawRoundRectConfig,
options:DrawRectOption
){
// start 为左上角第一个顶点坐标
const {
start,
width,
height,
radius,
} = roundRect;
const {x,y} = start;
ctx.beginPath();
ctx.moveTo(x, y);
// 上边线
ctx.lineTo(x + width, y);
// 右上圆角
ctx.arcTo(x + width + radius, y, x + width + radius, y + radius, radius);
// 右边线
ctx.lineTo(x + width + radius, y + height + radius);
// 右下圆角
ctx.arcTo(x + width + radius, y + height + 2 * radius, x + width, y + height + 2 * radius, radius);
// 下边线
ctx.lineTo(x, y + height + 2 * radius);
// 左下圆角
ctx.arcTo(x - radius, y + height + 2 * radius, x + -radius, y + height + radius, radius);
// 左边线
ctx.lineTo(x - radius, y + radius);
// 左上圆角
ctx.arcTo(x - radius, y, x, y, radius);
ctx.lineTo(x, y);
ctx.closePath();
ctx.fillStyle = options.fillColor;
ctx.lineWidth = options.weight;
ctx.strokeStyle = options.lineColor;
ctx.stroke();
ctx.fill();
}
| , offsetY: y } = event;
return { x: x / zoom, y: y / zoom };
}
export function isIn | identifier_body |
v4-signature.go | /*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package signature4 implements helper functions to validate AWS
// Signature Version '4' authorization header.
//
// This package provides comprehensive helpers for following signature
// types.
// - Based on Authorization header.
// - Based on Query parameters.
// - Based on Form POST policy.
package signature4
import (
"bytes"
"encoding/hex"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256" |
// Sign - local variables
type Sign struct {
accessKeyID string
secretAccessKey string
region string
httpRequest *http.Request
extractedSignedHeaders http.Header
}
// AWS Signature Version '4' constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// New - initialize a new authorization checkes.
func New(accessKeyID, secretAccessKey, region string) (*Sign, *probe.Error) {
if !isValidAccessKey.MatchString(accessKeyID) {
return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
}
if !isValidSecretKey.MatchString(secretAccessKey) {
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
}
if region == "" {
return nil, ErrRegionISEmpty("Region is empty.").Trace()
}
signature := &Sign{
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
}
return signature, nil
}
// SetHTTPRequestToVerify - sets the http request which needs to be verified.
func (s *Sign) SetHTTPRequestToVerify(r *http.Request) *Sign {
// Do not set http request if its 'nil'.
if r == nil {
return s
}
s.httpRequest = r
return s
}
// getCanonicalHeaders generate a list of request headers with their values
func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string {
var headers []string
vals := make(http.Header)
for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(s.httpRequest.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (s Sign) getSignedHeaders(signedHeaders http.Header) string {
var headers []string
for k := range signedHeaders {
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s *Sign) getCanonicalRequest() string {
payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
s.httpRequest.URL.RawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
payload,
}, "\n")
return canonicalRequest
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s Sign) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
rawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
"UNSIGNED-PAYLOAD",
}, "\n")
return canonicalRequest
}
// getScope generate a string of a specific date, an AWS region, and a service.
func (s Sign) getScope(t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
s.region,
"s3",
"aws4_request",
}, "/")
return scope
}
// getStringToSign a string based on selected query values.
func (s Sign) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + s.getScope(t) + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign
}
// getSigningKey hmac seed to calculate final signature.
func (s Sign) getSigningKey(t time.Time) []byte {
secret := s.secretAccessKey
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte(s.region))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form.
func (s Sign) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
// Parse credential tag.
credential, err := parseCredential("Credential=" + formValues["X-Amz-Credential"])
if err != nil {
return false, err.Trace(formValues["X-Amz-Credential"])
}
// Verify if the access key id matches.
if credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", credential.accessKeyID).Trace(credential.accessKeyID)
}
// Verify if the region is valid.
reqRegion := credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Parse date string.
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
if e != nil {
return false, probe.NewError(e)
}
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, formValues["Policy"])
if newSignature != formValues["X-Amz-Signature"] {
return false, nil
}
return true, nil
}
// DoesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) {
// Parse request query string.
preSignValues, err := parsePreSignV4(s.httpRequest.URL.Query())
if err != nil {
return false, err.Trace(s.httpRequest.URL.String())
}
// Verify if the access key id matches.
if preSignValues.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignValues.Credential.accessKeyID).Trace(preSignValues.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := preSignValues.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(preSignValues.SignedHeaders, s.httpRequest.Header)
// Construct new query.
query := make(url.Values)
query.Set("X-Amz-Algorithm", signV4Algorithm)
if time.Now().UTC().Sub(preSignValues.Date) > time.Duration(preSignValues.Expires) {
return false, ErrExpiredPresignRequest("Presigned request already expired, please initiate a new request.")
}
// Save the date and expires.
t := preSignValues.Date
expireSeconds := int(time.Duration(preSignValues.Expires) / time.Second)
// Construct the query.
query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders))
query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t))
// Save other headers available in the request parameters.
for k, v := range s.httpRequest.URL.Query() {
if strings.HasPrefix(strings.ToLower(k), "x-amz") {
continue
}
query[k] = v
}
// Get the encoded query.
encodedQuery := query.Encode()
// Verify if date query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return false, nil
}
// Verify if expires query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return false, nil
}
// Verify if signed headers query is same.
if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return false, nil
}
// Verify if credential query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return false, nil
}
// Verify finally if signature is same.
newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t))
if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
return false, nil
}
return true, nil
}
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// Save authorization header.
v4Auth := s.httpRequest.Header.Get("Authorization")
// Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth)
if err != nil {
return false, err.Trace(v4Auth)
}
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)
// Verify if the access key id matches.
if signV4Values.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Credential.accessKeyID).Trace(signV4Values.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := signV4Values.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Set input payload.
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Extract date, if not present throw error.
var date string
if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = s.httpRequest.Header.Get("Date"); date == "" {
return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
}
}
// Parse date header.
t, e := time.Parse(iso8601Format, date)
if e != nil {
return false, probe.NewError(e)
}
// Signature version '4'.
canonicalRequest := s.getCanonicalRequest()
stringToSign := s.getStringToSign(canonicalRequest, t)
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, stringToSign)
// Verify if signature match.
if newSignature != signV4Values.Signature {
return false, nil
}
return true, nil
} | "github.com/minio/minio/pkg/probe"
) | random_line_split |
v4-signature.go | /*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package signature4 implements helper functions to validate AWS
// Signature Version '4' authorization header.
//
// This package provides comprehensive helpers for following signature
// types.
// - Based on Authorization header.
// - Based on Query parameters.
// - Based on Form POST policy.
package signature4
import (
"bytes"
"encoding/hex"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
)
// Sign - local variables
type Sign struct {
accessKeyID string
secretAccessKey string
region string
httpRequest *http.Request
extractedSignedHeaders http.Header
}
// AWS Signature Version '4' constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// New - initialize a new authorization checkes.
func New(accessKeyID, secretAccessKey, region string) (*Sign, *probe.Error) {
if !isValidAccessKey.MatchString(accessKeyID) {
return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
}
if !isValidSecretKey.MatchString(secretAccessKey) {
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
}
if region == "" {
return nil, ErrRegionISEmpty("Region is empty.").Trace()
}
signature := &Sign{
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
}
return signature, nil
}
// SetHTTPRequestToVerify - sets the http request which needs to be verified.
func (s *Sign) SetHTTPRequestToVerify(r *http.Request) *Sign {
// Do not set http request if its 'nil'.
if r == nil {
return s
}
s.httpRequest = r
return s
}
// getCanonicalHeaders generate a list of request headers with their values
func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string {
var headers []string
vals := make(http.Header)
for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(s.httpRequest.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (s Sign) getSignedHeaders(signedHeaders http.Header) string {
var headers []string
for k := range signedHeaders {
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s *Sign) getCanonicalRequest() string {
payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
s.httpRequest.URL.RawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
payload,
}, "\n")
return canonicalRequest
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s Sign) | (presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
rawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
"UNSIGNED-PAYLOAD",
}, "\n")
return canonicalRequest
}
// getScope generate a string of a specific date, an AWS region, and a service.
func (s Sign) getScope(t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
s.region,
"s3",
"aws4_request",
}, "/")
return scope
}
// getStringToSign a string based on selected query values.
func (s Sign) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + s.getScope(t) + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign
}
// getSigningKey hmac seed to calculate final signature.
func (s Sign) getSigningKey(t time.Time) []byte {
secret := s.secretAccessKey
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte(s.region))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form.
func (s Sign) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
// Parse credential tag.
credential, err := parseCredential("Credential=" + formValues["X-Amz-Credential"])
if err != nil {
return false, err.Trace(formValues["X-Amz-Credential"])
}
// Verify if the access key id matches.
if credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", credential.accessKeyID).Trace(credential.accessKeyID)
}
// Verify if the region is valid.
reqRegion := credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Parse date string.
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
if e != nil {
return false, probe.NewError(e)
}
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, formValues["Policy"])
if newSignature != formValues["X-Amz-Signature"] {
return false, nil
}
return true, nil
}
// DoesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) {
// Parse request query string.
preSignValues, err := parsePreSignV4(s.httpRequest.URL.Query())
if err != nil {
return false, err.Trace(s.httpRequest.URL.String())
}
// Verify if the access key id matches.
if preSignValues.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignValues.Credential.accessKeyID).Trace(preSignValues.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := preSignValues.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(preSignValues.SignedHeaders, s.httpRequest.Header)
// Construct new query.
query := make(url.Values)
query.Set("X-Amz-Algorithm", signV4Algorithm)
if time.Now().UTC().Sub(preSignValues.Date) > time.Duration(preSignValues.Expires) {
return false, ErrExpiredPresignRequest("Presigned request already expired, please initiate a new request.")
}
// Save the date and expires.
t := preSignValues.Date
expireSeconds := int(time.Duration(preSignValues.Expires) / time.Second)
// Construct the query.
query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders))
query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t))
// Save other headers available in the request parameters.
for k, v := range s.httpRequest.URL.Query() {
if strings.HasPrefix(strings.ToLower(k), "x-amz") {
continue
}
query[k] = v
}
// Get the encoded query.
encodedQuery := query.Encode()
// Verify if date query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return false, nil
}
// Verify if expires query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return false, nil
}
// Verify if signed headers query is same.
if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return false, nil
}
// Verify if credential query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return false, nil
}
// Verify finally if signature is same.
newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t))
if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
return false, nil
}
return true, nil
}
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// Save authorization header.
v4Auth := s.httpRequest.Header.Get("Authorization")
// Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth)
if err != nil {
return false, err.Trace(v4Auth)
}
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)
// Verify if the access key id matches.
if signV4Values.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Credential.accessKeyID).Trace(signV4Values.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := signV4Values.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Set input payload.
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Extract date, if not present throw error.
var date string
if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = s.httpRequest.Header.Get("Date"); date == "" {
return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
}
}
// Parse date header.
t, e := time.Parse(iso8601Format, date)
if e != nil {
return false, probe.NewError(e)
}
// Signature version '4'.
canonicalRequest := s.getCanonicalRequest()
stringToSign := s.getStringToSign(canonicalRequest, t)
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, stringToSign)
// Verify if signature match.
if newSignature != signV4Values.Signature {
return false, nil
}
return true, nil
}
| getPresignedCanonicalRequest | identifier_name |
v4-signature.go | /*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package signature4 implements helper functions to validate AWS
// Signature Version '4' authorization header.
//
// This package provides comprehensive helpers for following signature
// types.
// - Based on Authorization header.
// - Based on Query parameters.
// - Based on Form POST policy.
package signature4
import (
"bytes"
"encoding/hex"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
)
// Sign - local variables
type Sign struct {
accessKeyID string
secretAccessKey string
region string
httpRequest *http.Request
extractedSignedHeaders http.Header
}
// AWS Signature Version '4' constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// New - initialize a new authorization checkes.
func New(accessKeyID, secretAccessKey, region string) (*Sign, *probe.Error) {
if !isValidAccessKey.MatchString(accessKeyID) {
return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
}
if !isValidSecretKey.MatchString(secretAccessKey) {
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
}
if region == "" {
return nil, ErrRegionISEmpty("Region is empty.").Trace()
}
signature := &Sign{
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
}
return signature, nil
}
// SetHTTPRequestToVerify - sets the http request which needs to be verified.
func (s *Sign) SetHTTPRequestToVerify(r *http.Request) *Sign {
// Do not set http request if its 'nil'.
if r == nil {
return s
}
s.httpRequest = r
return s
}
// getCanonicalHeaders generate a list of request headers with their values
func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string {
var headers []string
vals := make(http.Header)
for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(s.httpRequest.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (s Sign) getSignedHeaders(signedHeaders http.Header) string {
var headers []string
for k := range signedHeaders {
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s *Sign) getCanonicalRequest() string {
payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
s.httpRequest.URL.RawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
payload,
}, "\n")
return canonicalRequest
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s Sign) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
rawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
"UNSIGNED-PAYLOAD",
}, "\n")
return canonicalRequest
}
// getScope generate a string of a specific date, an AWS region, and a service.
func (s Sign) getScope(t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
s.region,
"s3",
"aws4_request",
}, "/")
return scope
}
// getStringToSign a string based on selected query values.
func (s Sign) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + s.getScope(t) + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign
}
// getSigningKey hmac seed to calculate final signature.
func (s Sign) getSigningKey(t time.Time) []byte {
secret := s.secretAccessKey
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte(s.region))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form.
func (s Sign) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
// Parse credential tag.
credential, err := parseCredential("Credential=" + formValues["X-Amz-Credential"])
if err != nil {
return false, err.Trace(formValues["X-Amz-Credential"])
}
// Verify if the access key id matches.
if credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", credential.accessKeyID).Trace(credential.accessKeyID)
}
// Verify if the region is valid.
reqRegion := credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Parse date string.
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
if e != nil {
return false, probe.NewError(e)
}
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, formValues["Policy"])
if newSignature != formValues["X-Amz-Signature"] {
return false, nil
}
return true, nil
}
// DoesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) {
// Parse request query string.
preSignValues, err := parsePreSignV4(s.httpRequest.URL.Query())
if err != nil {
return false, err.Trace(s.httpRequest.URL.String())
}
// Verify if the access key id matches.
if preSignValues.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignValues.Credential.accessKeyID).Trace(preSignValues.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := preSignValues.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(preSignValues.SignedHeaders, s.httpRequest.Header)
// Construct new query.
query := make(url.Values)
query.Set("X-Amz-Algorithm", signV4Algorithm)
if time.Now().UTC().Sub(preSignValues.Date) > time.Duration(preSignValues.Expires) {
return false, ErrExpiredPresignRequest("Presigned request already expired, please initiate a new request.")
}
// Save the date and expires.
t := preSignValues.Date
expireSeconds := int(time.Duration(preSignValues.Expires) / time.Second)
// Construct the query.
query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders))
query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t))
// Save other headers available in the request parameters.
for k, v := range s.httpRequest.URL.Query() {
if strings.HasPrefix(strings.ToLower(k), "x-amz") |
query[k] = v
}
// Get the encoded query.
encodedQuery := query.Encode()
// Verify if date query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return false, nil
}
// Verify if expires query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return false, nil
}
// Verify if signed headers query is same.
if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return false, nil
}
// Verify if credential query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return false, nil
}
// Verify finally if signature is same.
newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t))
if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
return false, nil
}
return true, nil
}
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// Save authorization header.
v4Auth := s.httpRequest.Header.Get("Authorization")
// Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth)
if err != nil {
return false, err.Trace(v4Auth)
}
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)
// Verify if the access key id matches.
if signV4Values.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Credential.accessKeyID).Trace(signV4Values.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := signV4Values.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Set input payload.
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Extract date, if not present throw error.
var date string
if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = s.httpRequest.Header.Get("Date"); date == "" {
return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
}
}
// Parse date header.
t, e := time.Parse(iso8601Format, date)
if e != nil {
return false, probe.NewError(e)
}
// Signature version '4'.
canonicalRequest := s.getCanonicalRequest()
stringToSign := s.getStringToSign(canonicalRequest, t)
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, stringToSign)
// Verify if signature match.
if newSignature != signV4Values.Signature {
return false, nil
}
return true, nil
}
| {
continue
} | conditional_block |
v4-signature.go | /*
* Minio Cloud Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package signature4 implements helper functions to validate AWS
// Signature Version '4' authorization header.
//
// This package provides comprehensive helpers for following signature
// types.
// - Based on Authorization header.
// - Based on Query parameters.
// - Based on Form POST policy.
package signature4
import (
"bytes"
"encoding/hex"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio/pkg/crypto/sha256"
"github.com/minio/minio/pkg/probe"
)
// Sign - local variables
type Sign struct {
accessKeyID string
secretAccessKey string
region string
httpRequest *http.Request
extractedSignedHeaders http.Header
}
// AWS Signature Version '4' constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601Format = "20060102T150405Z"
yyyymmdd = "20060102"
)
// New - initialize a new authorization checkes.
func New(accessKeyID, secretAccessKey, region string) (*Sign, *probe.Error) |
// SetHTTPRequestToVerify - sets the http request which needs to be verified.
func (s *Sign) SetHTTPRequestToVerify(r *http.Request) *Sign {
// Do not set http request if its 'nil'.
if r == nil {
return s
}
s.httpRequest = r
return s
}
// getCanonicalHeaders generate a list of request headers with their values
func (s Sign) getCanonicalHeaders(signedHeaders http.Header) string {
var headers []string
vals := make(http.Header)
for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(s.httpRequest.Host)
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(v)
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names
func (s Sign) getSignedHeaders(signedHeaders http.Header) string {
var headers []string
for k := range signedHeaders {
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s *Sign) getCanonicalRequest() string {
payload := s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-content-sha256"))
s.httpRequest.URL.RawQuery = strings.Replace(s.httpRequest.URL.Query().Encode(), "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
s.httpRequest.URL.RawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
payload,
}, "\n")
return canonicalRequest
}
// getCanonicalRequest generate a canonical request of style
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
//
func (s Sign) getPresignedCanonicalRequest(presignedQuery string) string {
rawQuery := strings.Replace(presignedQuery, "+", "%20", -1)
encodedPath := getURLEncodedName(s.httpRequest.URL.Path)
// Convert any space strings back to "+".
encodedPath = strings.Replace(encodedPath, "+", "%20", -1)
canonicalRequest := strings.Join([]string{
s.httpRequest.Method,
encodedPath,
rawQuery,
s.getCanonicalHeaders(s.extractedSignedHeaders),
s.getSignedHeaders(s.extractedSignedHeaders),
"UNSIGNED-PAYLOAD",
}, "\n")
return canonicalRequest
}
// getScope generate a string of a specific date, an AWS region, and a service.
func (s Sign) getScope(t time.Time) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
s.region,
"s3",
"aws4_request",
}, "/")
return scope
}
// getStringToSign a string based on selected query values.
func (s Sign) getStringToSign(canonicalRequest string, t time.Time) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + s.getScope(t) + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign
}
// getSigningKey hmac seed to calculate final signature.
func (s Sign) getSigningKey(t time.Time) []byte {
secret := s.secretAccessKey
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
region := sumHMAC(date, []byte(s.region))
service := sumHMAC(region, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form.
func (s Sign) getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// DoesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPolicySignatureMatch(formValues map[string]string) (bool, *probe.Error) {
// Parse credential tag.
credential, err := parseCredential("Credential=" + formValues["X-Amz-Credential"])
if err != nil {
return false, err.Trace(formValues["X-Amz-Credential"])
}
// Verify if the access key id matches.
if credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", credential.accessKeyID).Trace(credential.accessKeyID)
}
// Verify if the region is valid.
reqRegion := credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Parse date string.
t, e := time.Parse(iso8601Format, formValues["X-Amz-Date"])
if e != nil {
return false, probe.NewError(e)
}
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, formValues["Policy"])
if newSignature != formValues["X-Amz-Signature"] {
return false, nil
}
return true, nil
}
// DoesPresignedSignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesPresignedSignatureMatch() (bool, *probe.Error) {
// Parse request query string.
preSignValues, err := parsePreSignV4(s.httpRequest.URL.Query())
if err != nil {
return false, err.Trace(s.httpRequest.URL.String())
}
// Verify if the access key id matches.
if preSignValues.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", preSignValues.Credential.accessKeyID).Trace(preSignValues.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := preSignValues.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(preSignValues.SignedHeaders, s.httpRequest.Header)
// Construct new query.
query := make(url.Values)
query.Set("X-Amz-Algorithm", signV4Algorithm)
if time.Now().UTC().Sub(preSignValues.Date) > time.Duration(preSignValues.Expires) {
return false, ErrExpiredPresignRequest("Presigned request already expired, please initiate a new request.")
}
// Save the date and expires.
t := preSignValues.Date
expireSeconds := int(time.Duration(preSignValues.Expires) / time.Second)
// Construct the query.
query.Set("X-Amz-Date", t.Format(iso8601Format))
query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds))
query.Set("X-Amz-SignedHeaders", s.getSignedHeaders(s.extractedSignedHeaders))
query.Set("X-Amz-Credential", s.accessKeyID+"/"+s.getScope(t))
// Save other headers available in the request parameters.
for k, v := range s.httpRequest.URL.Query() {
if strings.HasPrefix(strings.ToLower(k), "x-amz") {
continue
}
query[k] = v
}
// Get the encoded query.
encodedQuery := query.Encode()
// Verify if date query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return false, nil
}
// Verify if expires query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return false, nil
}
// Verify if signed headers query is same.
if s.httpRequest.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return false, nil
}
// Verify if credential query is same.
if s.httpRequest.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return false, nil
}
// Verify finally if signature is same.
newSignature := s.getSignature(s.getSigningKey(t), s.getStringToSign(s.getPresignedCanonicalRequest(encodedQuery), t))
if s.httpRequest.URL.Query().Get("X-Amz-Signature") != newSignature {
return false, nil
}
return true, nil
}
// DoesSignatureMatch - Verify authorization header with calculated header in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func (s *Sign) DoesSignatureMatch(hashedPayload string) (bool, *probe.Error) {
// Save authorization header.
v4Auth := s.httpRequest.Header.Get("Authorization")
// Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth)
if err != nil {
return false, err.Trace(v4Auth)
}
// Extract all the signed headers along with its values.
s.extractedSignedHeaders = extractSignedHeaders(signV4Values.SignedHeaders, s.httpRequest.Header)
// Verify if the access key id matches.
if signV4Values.Credential.accessKeyID != s.accessKeyID {
return false, ErrInvalidAccessKeyID("Access key id does not match with our records.", signV4Values.Credential.accessKeyID).Trace(signV4Values.Credential.accessKeyID)
}
// Verify if region is valid.
reqRegion := signV4Values.Credential.scope.region
if !isValidRegion(reqRegion, s.region) {
return false, ErrInvalidRegion("Requested region is not recognized.", reqRegion).Trace(reqRegion)
}
// Save region.
s.region = reqRegion
// Set input payload.
s.httpRequest.Header.Set("X-Amz-Content-Sha256", hashedPayload)
// Extract date, if not present throw error.
var date string
if date = s.httpRequest.Header.Get(http.CanonicalHeaderKey("x-amz-date")); date == "" {
if date = s.httpRequest.Header.Get("Date"); date == "" {
return false, ErrMissingDateHeader("Date header is missing from the request.").Trace()
}
}
// Parse date header.
t, e := time.Parse(iso8601Format, date)
if e != nil {
return false, probe.NewError(e)
}
// Signature version '4'.
canonicalRequest := s.getCanonicalRequest()
stringToSign := s.getStringToSign(canonicalRequest, t)
signingKey := s.getSigningKey(t)
newSignature := s.getSignature(signingKey, stringToSign)
// Verify if signature match.
if newSignature != signV4Values.Signature {
return false, nil
}
return true, nil
}
| {
if !isValidAccessKey.MatchString(accessKeyID) {
return nil, ErrInvalidAccessKeyID("Invalid access key id.", accessKeyID).Trace(accessKeyID)
}
if !isValidSecretKey.MatchString(secretAccessKey) {
return nil, ErrInvalidAccessKeyID("Invalid secret key.", secretAccessKey).Trace(secretAccessKey)
}
if region == "" {
return nil, ErrRegionISEmpty("Region is empty.").Trace()
}
signature := &Sign{
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
region: region,
}
return signature, nil
} | identifier_body |
policy_row.ts | // Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/js/action_link.js';
import './policy_conflict.js';
import './strings.m.js';
import {CustomElement} from 'chrome://resources/js/custom_element.js';
import {loadTimeData} from 'chrome://resources/js/load_time_data.js';
import {Conflict, PolicyConflictElement} from './policy_conflict.js';
import {getTemplate} from './policy_row.html.js';
export interface Policy {
ignored?: boolean;
name: string;
level: string;
link?: string;
scope: string;
source: string;
error: string;
warning: string;
info: string;
value: any;
deprecated?: boolean;
future?: boolean;
allSourcesMerged?: boolean;
conflicts?: Conflict[];
superseded?: Conflict[];
forSigninScreen: boolean;
isExtension: boolean;
}
export class | extends CustomElement {
static override get template() {
return getTemplate();
}
policy: Policy;
private unset_: boolean;
private hasErrors_: boolean;
private hasWarnings_: boolean;
private hasInfos_: boolean;
private hasConflicts_: boolean;
private hasSuperseded_: boolean;
private isMergedValue_: boolean;
private deprecated_: boolean;
private future_: boolean;
connectedCallback() {
const toggle = this.shadowRoot!.querySelector('.policy.row .toggle');
toggle!.addEventListener('click', () => this.toggleExpanded());
const copy = this.shadowRoot!.querySelector('.copy-value');
copy!.addEventListener('click', () => this.copyValue_());
this.setAttribute('role', 'rowgroup');
this.classList.add('policy-data');
}
initialize(policy: Policy) {
this.policy = policy;
this.unset_ = policy.value === undefined;
this.hasErrors_ = !!policy.error;
this.hasWarnings_ = !!policy.warning;
this.hasInfos_ = !!policy.info;
this.hasConflicts_ = !!policy.conflicts;
this.hasSuperseded_ = !!policy.superseded;
this.isMergedValue_ = !!policy.allSourcesMerged;
this.deprecated_ = !!policy.deprecated;
this.future_ = !!policy.future;
// Populate the name column.
const nameDisplay = this.shadowRoot!.querySelector('.name .link span');
nameDisplay!.textContent = policy.name;
if (policy.link) {
const link =
this.shadowRoot!.querySelector('.name .link') as HTMLAnchorElement;
link.href = policy.link;
link.title = loadTimeData.getStringF('policyLearnMore', policy.name);
this.toggleAttribute('no-help-link', false);
} else {
this.toggleAttribute('no-help-link', true);
}
// Populate the remaining columns with policy scope, level and value if a
// value has been set. Otherwise, leave them blank.
if (!this.unset_) {
const scopeDisplay = this.shadowRoot!.querySelector('.scope');
let scope = 'scopeDevice';
if (policy.scope === 'user') {
scope = 'scopeUser';
} else if (policy.scope === 'allUsers') {
scope = 'scopeAllUsers';
}
scopeDisplay!.textContent = loadTimeData.getString(scope);
// Display scope and level as rows instead of columns on space constraint
// devices.
// <if expr="is_android or is_ios">
const scopeRowContentDisplay =
this.shadowRoot!.querySelector('.scope.row .value');
scopeRowContentDisplay!.textContent = loadTimeData.getString(scope);
const levelRowContentDisplay =
this.shadowRoot!.querySelector('.level.row .value');
levelRowContentDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
// </if>
const levelDisplay = this.shadowRoot!.querySelector('.level');
levelDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
const sourceDisplay = this.shadowRoot!.querySelector('.source');
sourceDisplay!.textContent = loadTimeData.getString(policy.source);
// Reduces load on the DOM for long values;
const convertValue = (value: string, format?: boolean) => {
// Skip 'string' policy to avoid unnecessary conversions.
if (typeof value == 'string') {
return value;
}
if (format) {
return JSON.stringify(value, null, 2);
} else {
return JSON.stringify(value, null);
}
};
// If value is longer than 256 characters, truncate and add ellipsis.
const policyValueStr = convertValue(policy.value);
const truncatedValue = policyValueStr.length > 256 ?
`${policyValueStr.substring(0, 256)}\u2026` :
policyValueStr;
const valueDisplay = this.shadowRoot!.querySelector('.value');
valueDisplay!.textContent = truncatedValue;
const copyLink =
this.shadowRoot!.querySelector('.copy .link') as HTMLElement;
copyLink!.title = loadTimeData.getStringF('policyCopyValue', policy.name);
const valueRowContentDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Expanded policy value is formatted.
valueRowContentDisplay!.textContent =
convertValue(policy.value, /*format=*/ true);
const errorRowContentDisplay =
this.shadowRoot!.querySelector('.errors.row .value');
errorRowContentDisplay!.textContent = policy.error;
const warningRowContentDisplay =
this.shadowRoot!.querySelector('.warnings.row .value');
warningRowContentDisplay!.textContent = policy.warning;
const infoRowContentDisplay =
this.shadowRoot!.querySelector('.infos.row .value');
infoRowContentDisplay!.textContent = policy.info;
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
const errorsNotice =
this.hasErrors_ ? loadTimeData.getString('error') : '';
const deprecationNotice =
this.deprecated_ ? loadTimeData.getString('deprecated') : '';
const futureNotice = this.future_ ? loadTimeData.getString('future') : '';
const warningsNotice =
this.hasWarnings_ ? loadTimeData.getString('warning') : '';
const conflictsNotice = this.hasConflicts_ && !this.isMergedValue_ ?
loadTimeData.getString('conflict') :
'';
const ignoredNotice =
this.policy.ignored ? loadTimeData.getString('ignored') : '';
let notice =
[
errorsNotice,
deprecationNotice,
futureNotice,
warningsNotice,
ignoredNotice,
conflictsNotice,
].filter(x => !!x)
.join(', ') ||
loadTimeData.getString('ok');
const supersededNotice = this.hasSuperseded_ && !this.isMergedValue_ ?
loadTimeData.getString('superseding') :
'';
if (supersededNotice) {
// Include superseded notice regardless of other notices
notice += `, ${supersededNotice}`;
}
messagesDisplay!.textContent = notice;
if (policy.conflicts) {
policy.conflicts.forEach(conflict => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(conflict, 'conflictValue');
row.classList!.add('policy-conflict-data');
this.shadowRoot!.appendChild(row);
});
}
if (policy.superseded) {
policy.superseded.forEach(superseded => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(superseded, 'supersededValue');
row.classList!.add('policy-superseded-data');
this.shadowRoot!.appendChild(row);
});
}
} else {
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
messagesDisplay!.textContent = loadTimeData.getString('unset');
}
}
// Copies the policy's value to the clipboard.
private copyValue_() {
const policyValueDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Select the text that will be copied.
const selection = window.getSelection();
const range = window.document.createRange();
range.selectNodeContents(policyValueDisplay as Node);
selection!.removeAllRanges();
selection!.addRange(range);
// Copy the policy value to the clipboard.
navigator.clipboard
.writeText((policyValueDisplay as CustomElement)!.innerText)
.catch(error => {
console.error('Unable to copy policy value to clipboard:', error);
});
}
// Toggle the visibility of an additional row containing the complete text.
private toggleExpanded() {
const warningRowDisplay =
this.shadowRoot!.querySelector('.warnings.row') as CustomElement;
const errorRowDisplay =
this.shadowRoot!.querySelector('.errors.row') as CustomElement;
const infoRowDisplay =
this.shadowRoot!.querySelector('.infos.row') as CustomElement;
const valueRowDisplay =
this.shadowRoot!.querySelector('.value.row') as CustomElement;
// <if expr="is_android or is_ios">
const scopeRowDisplay =
this.shadowRoot!.querySelector('.scope.row') as CustomElement;
scopeRowDisplay.hidden = !scopeRowDisplay.hidden;
const levelRowDisplay =
this.shadowRoot!.querySelector('.level.row') as CustomElement;
levelRowDisplay.hidden = !levelRowDisplay.hidden;
// </if>
valueRowDisplay.hidden = !valueRowDisplay.hidden;
this.classList!.toggle('expanded', !valueRowDisplay.hidden);
this.shadowRoot!.querySelector<CustomElement>('.show-more')!.hidden =
!valueRowDisplay.hidden;
this.shadowRoot!.querySelector<CustomElement>('.show-less')!.hidden =
valueRowDisplay!.hidden;
if (this.hasWarnings_) {
warningRowDisplay!.hidden = !warningRowDisplay.hidden;
}
if (this.hasErrors_) {
errorRowDisplay!.hidden = !errorRowDisplay.hidden;
}
if (this.hasInfos_) {
infoRowDisplay!.hidden = !infoRowDisplay.hidden;
}
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-conflict-data')!
.forEach(row => row!.hidden = !row.hidden);
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-superseded-data')!
.forEach(row => row!.hidden = !row.hidden);
}
}
declare global {
interface HTMLElementTagNameMap {
'policy-row': PolicyRowElement;
}
}
customElements.define('policy-row', PolicyRowElement);
| PolicyRowElement | identifier_name |
policy_row.ts | // Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/js/action_link.js';
import './policy_conflict.js';
import './strings.m.js';
import {CustomElement} from 'chrome://resources/js/custom_element.js';
import {loadTimeData} from 'chrome://resources/js/load_time_data.js';
import {Conflict, PolicyConflictElement} from './policy_conflict.js';
import {getTemplate} from './policy_row.html.js';
export interface Policy {
ignored?: boolean;
name: string;
level: string;
link?: string;
scope: string;
source: string;
error: string;
warning: string;
info: string;
value: any;
deprecated?: boolean;
future?: boolean;
allSourcesMerged?: boolean;
conflicts?: Conflict[];
superseded?: Conflict[];
forSigninScreen: boolean;
isExtension: boolean;
}
export class PolicyRowElement extends CustomElement {
static override get template() {
return getTemplate();
}
policy: Policy;
private unset_: boolean;
private hasErrors_: boolean;
private hasWarnings_: boolean;
private hasInfos_: boolean;
private hasConflicts_: boolean;
private hasSuperseded_: boolean;
private isMergedValue_: boolean;
private deprecated_: boolean;
private future_: boolean;
connectedCallback() {
const toggle = this.shadowRoot!.querySelector('.policy.row .toggle');
toggle!.addEventListener('click', () => this.toggleExpanded());
const copy = this.shadowRoot!.querySelector('.copy-value');
copy!.addEventListener('click', () => this.copyValue_());
this.setAttribute('role', 'rowgroup');
this.classList.add('policy-data');
}
initialize(policy: Policy) {
this.policy = policy;
this.unset_ = policy.value === undefined;
this.hasErrors_ = !!policy.error;
this.hasWarnings_ = !!policy.warning;
this.hasInfos_ = !!policy.info;
this.hasConflicts_ = !!policy.conflicts;
this.hasSuperseded_ = !!policy.superseded;
this.isMergedValue_ = !!policy.allSourcesMerged;
this.deprecated_ = !!policy.deprecated;
this.future_ = !!policy.future;
// Populate the name column.
const nameDisplay = this.shadowRoot!.querySelector('.name .link span');
nameDisplay!.textContent = policy.name;
if (policy.link) {
const link =
this.shadowRoot!.querySelector('.name .link') as HTMLAnchorElement;
link.href = policy.link;
link.title = loadTimeData.getStringF('policyLearnMore', policy.name);
this.toggleAttribute('no-help-link', false);
} else {
this.toggleAttribute('no-help-link', true);
}
// Populate the remaining columns with policy scope, level and value if a
// value has been set. Otherwise, leave them blank.
if (!this.unset_) {
const scopeDisplay = this.shadowRoot!.querySelector('.scope');
let scope = 'scopeDevice';
if (policy.scope === 'user') {
scope = 'scopeUser';
} else if (policy.scope === 'allUsers') {
scope = 'scopeAllUsers';
}
scopeDisplay!.textContent = loadTimeData.getString(scope);
// Display scope and level as rows instead of columns on space constraint
// devices.
// <if expr="is_android or is_ios">
const scopeRowContentDisplay =
this.shadowRoot!.querySelector('.scope.row .value');
scopeRowContentDisplay!.textContent = loadTimeData.getString(scope);
const levelRowContentDisplay =
this.shadowRoot!.querySelector('.level.row .value');
levelRowContentDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
// </if>
const levelDisplay = this.shadowRoot!.querySelector('.level');
levelDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
const sourceDisplay = this.shadowRoot!.querySelector('.source');
sourceDisplay!.textContent = loadTimeData.getString(policy.source);
// Reduces load on the DOM for long values;
const convertValue = (value: string, format?: boolean) => {
// Skip 'string' policy to avoid unnecessary conversions.
if (typeof value == 'string') |
if (format) {
return JSON.stringify(value, null, 2);
} else {
return JSON.stringify(value, null);
}
};
// If value is longer than 256 characters, truncate and add ellipsis.
const policyValueStr = convertValue(policy.value);
const truncatedValue = policyValueStr.length > 256 ?
`${policyValueStr.substring(0, 256)}\u2026` :
policyValueStr;
const valueDisplay = this.shadowRoot!.querySelector('.value');
valueDisplay!.textContent = truncatedValue;
const copyLink =
this.shadowRoot!.querySelector('.copy .link') as HTMLElement;
copyLink!.title = loadTimeData.getStringF('policyCopyValue', policy.name);
const valueRowContentDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Expanded policy value is formatted.
valueRowContentDisplay!.textContent =
convertValue(policy.value, /*format=*/ true);
const errorRowContentDisplay =
this.shadowRoot!.querySelector('.errors.row .value');
errorRowContentDisplay!.textContent = policy.error;
const warningRowContentDisplay =
this.shadowRoot!.querySelector('.warnings.row .value');
warningRowContentDisplay!.textContent = policy.warning;
const infoRowContentDisplay =
this.shadowRoot!.querySelector('.infos.row .value');
infoRowContentDisplay!.textContent = policy.info;
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
const errorsNotice =
this.hasErrors_ ? loadTimeData.getString('error') : '';
const deprecationNotice =
this.deprecated_ ? loadTimeData.getString('deprecated') : '';
const futureNotice = this.future_ ? loadTimeData.getString('future') : '';
const warningsNotice =
this.hasWarnings_ ? loadTimeData.getString('warning') : '';
const conflictsNotice = this.hasConflicts_ && !this.isMergedValue_ ?
loadTimeData.getString('conflict') :
'';
const ignoredNotice =
this.policy.ignored ? loadTimeData.getString('ignored') : '';
let notice =
[
errorsNotice,
deprecationNotice,
futureNotice,
warningsNotice,
ignoredNotice,
conflictsNotice,
].filter(x => !!x)
.join(', ') ||
loadTimeData.getString('ok');
const supersededNotice = this.hasSuperseded_ && !this.isMergedValue_ ?
loadTimeData.getString('superseding') :
'';
if (supersededNotice) {
// Include superseded notice regardless of other notices
notice += `, ${supersededNotice}`;
}
messagesDisplay!.textContent = notice;
if (policy.conflicts) {
policy.conflicts.forEach(conflict => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(conflict, 'conflictValue');
row.classList!.add('policy-conflict-data');
this.shadowRoot!.appendChild(row);
});
}
if (policy.superseded) {
policy.superseded.forEach(superseded => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(superseded, 'supersededValue');
row.classList!.add('policy-superseded-data');
this.shadowRoot!.appendChild(row);
});
}
} else {
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
messagesDisplay!.textContent = loadTimeData.getString('unset');
}
}
// Copies the policy's value to the clipboard.
private copyValue_() {
const policyValueDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Select the text that will be copied.
const selection = window.getSelection();
const range = window.document.createRange();
range.selectNodeContents(policyValueDisplay as Node);
selection!.removeAllRanges();
selection!.addRange(range);
// Copy the policy value to the clipboard.
navigator.clipboard
.writeText((policyValueDisplay as CustomElement)!.innerText)
.catch(error => {
console.error('Unable to copy policy value to clipboard:', error);
});
}
// Toggle the visibility of an additional row containing the complete text.
private toggleExpanded() {
const warningRowDisplay =
this.shadowRoot!.querySelector('.warnings.row') as CustomElement;
const errorRowDisplay =
this.shadowRoot!.querySelector('.errors.row') as CustomElement;
const infoRowDisplay =
this.shadowRoot!.querySelector('.infos.row') as CustomElement;
const valueRowDisplay =
this.shadowRoot!.querySelector('.value.row') as CustomElement;
// <if expr="is_android or is_ios">
const scopeRowDisplay =
this.shadowRoot!.querySelector('.scope.row') as CustomElement;
scopeRowDisplay.hidden = !scopeRowDisplay.hidden;
const levelRowDisplay =
this.shadowRoot!.querySelector('.level.row') as CustomElement;
levelRowDisplay.hidden = !levelRowDisplay.hidden;
// </if>
valueRowDisplay.hidden = !valueRowDisplay.hidden;
this.classList!.toggle('expanded', !valueRowDisplay.hidden);
this.shadowRoot!.querySelector<CustomElement>('.show-more')!.hidden =
!valueRowDisplay.hidden;
this.shadowRoot!.querySelector<CustomElement>('.show-less')!.hidden =
valueRowDisplay!.hidden;
if (this.hasWarnings_) {
warningRowDisplay!.hidden = !warningRowDisplay.hidden;
}
if (this.hasErrors_) {
errorRowDisplay!.hidden = !errorRowDisplay.hidden;
}
if (this.hasInfos_) {
infoRowDisplay!.hidden = !infoRowDisplay.hidden;
}
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-conflict-data')!
.forEach(row => row!.hidden = !row.hidden);
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-superseded-data')!
.forEach(row => row!.hidden = !row.hidden);
}
}
declare global {
interface HTMLElementTagNameMap {
'policy-row': PolicyRowElement;
}
}
customElements.define('policy-row', PolicyRowElement);
| {
return value;
} | conditional_block |
policy_row.ts | // Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/js/action_link.js';
import './policy_conflict.js';
import './strings.m.js';
import {CustomElement} from 'chrome://resources/js/custom_element.js';
import {loadTimeData} from 'chrome://resources/js/load_time_data.js';
import {Conflict, PolicyConflictElement} from './policy_conflict.js';
import {getTemplate} from './policy_row.html.js';
export interface Policy {
ignored?: boolean;
name: string;
level: string;
link?: string;
scope: string;
source: string;
error: string;
warning: string;
info: string;
value: any;
deprecated?: boolean;
future?: boolean;
allSourcesMerged?: boolean;
conflicts?: Conflict[];
superseded?: Conflict[];
forSigninScreen: boolean;
isExtension: boolean;
}
export class PolicyRowElement extends CustomElement {
static override get template() |
policy: Policy;
private unset_: boolean;
private hasErrors_: boolean;
private hasWarnings_: boolean;
private hasInfos_: boolean;
private hasConflicts_: boolean;
private hasSuperseded_: boolean;
private isMergedValue_: boolean;
private deprecated_: boolean;
private future_: boolean;
connectedCallback() {
const toggle = this.shadowRoot!.querySelector('.policy.row .toggle');
toggle!.addEventListener('click', () => this.toggleExpanded());
const copy = this.shadowRoot!.querySelector('.copy-value');
copy!.addEventListener('click', () => this.copyValue_());
this.setAttribute('role', 'rowgroup');
this.classList.add('policy-data');
}
initialize(policy: Policy) {
this.policy = policy;
this.unset_ = policy.value === undefined;
this.hasErrors_ = !!policy.error;
this.hasWarnings_ = !!policy.warning;
this.hasInfos_ = !!policy.info;
this.hasConflicts_ = !!policy.conflicts;
this.hasSuperseded_ = !!policy.superseded;
this.isMergedValue_ = !!policy.allSourcesMerged;
this.deprecated_ = !!policy.deprecated;
this.future_ = !!policy.future;
// Populate the name column.
const nameDisplay = this.shadowRoot!.querySelector('.name .link span');
nameDisplay!.textContent = policy.name;
if (policy.link) {
const link =
this.shadowRoot!.querySelector('.name .link') as HTMLAnchorElement;
link.href = policy.link;
link.title = loadTimeData.getStringF('policyLearnMore', policy.name);
this.toggleAttribute('no-help-link', false);
} else {
this.toggleAttribute('no-help-link', true);
}
// Populate the remaining columns with policy scope, level and value if a
// value has been set. Otherwise, leave them blank.
if (!this.unset_) {
const scopeDisplay = this.shadowRoot!.querySelector('.scope');
let scope = 'scopeDevice';
if (policy.scope === 'user') {
scope = 'scopeUser';
} else if (policy.scope === 'allUsers') {
scope = 'scopeAllUsers';
}
scopeDisplay!.textContent = loadTimeData.getString(scope);
// Display scope and level as rows instead of columns on space constraint
// devices.
// <if expr="is_android or is_ios">
const scopeRowContentDisplay =
this.shadowRoot!.querySelector('.scope.row .value');
scopeRowContentDisplay!.textContent = loadTimeData.getString(scope);
const levelRowContentDisplay =
this.shadowRoot!.querySelector('.level.row .value');
levelRowContentDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
// </if>
const levelDisplay = this.shadowRoot!.querySelector('.level');
levelDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
const sourceDisplay = this.shadowRoot!.querySelector('.source');
sourceDisplay!.textContent = loadTimeData.getString(policy.source);
// Reduces load on the DOM for long values;
const convertValue = (value: string, format?: boolean) => {
// Skip 'string' policy to avoid unnecessary conversions.
if (typeof value == 'string') {
return value;
}
if (format) {
return JSON.stringify(value, null, 2);
} else {
return JSON.stringify(value, null);
}
};
// If value is longer than 256 characters, truncate and add ellipsis.
const policyValueStr = convertValue(policy.value);
const truncatedValue = policyValueStr.length > 256 ?
`${policyValueStr.substring(0, 256)}\u2026` :
policyValueStr;
const valueDisplay = this.shadowRoot!.querySelector('.value');
valueDisplay!.textContent = truncatedValue;
const copyLink =
this.shadowRoot!.querySelector('.copy .link') as HTMLElement;
copyLink!.title = loadTimeData.getStringF('policyCopyValue', policy.name);
const valueRowContentDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Expanded policy value is formatted.
valueRowContentDisplay!.textContent =
convertValue(policy.value, /*format=*/ true);
const errorRowContentDisplay =
this.shadowRoot!.querySelector('.errors.row .value');
errorRowContentDisplay!.textContent = policy.error;
const warningRowContentDisplay =
this.shadowRoot!.querySelector('.warnings.row .value');
warningRowContentDisplay!.textContent = policy.warning;
const infoRowContentDisplay =
this.shadowRoot!.querySelector('.infos.row .value');
infoRowContentDisplay!.textContent = policy.info;
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
const errorsNotice =
this.hasErrors_ ? loadTimeData.getString('error') : '';
const deprecationNotice =
this.deprecated_ ? loadTimeData.getString('deprecated') : '';
const futureNotice = this.future_ ? loadTimeData.getString('future') : '';
const warningsNotice =
this.hasWarnings_ ? loadTimeData.getString('warning') : '';
const conflictsNotice = this.hasConflicts_ && !this.isMergedValue_ ?
loadTimeData.getString('conflict') :
'';
const ignoredNotice =
this.policy.ignored ? loadTimeData.getString('ignored') : '';
let notice =
[
errorsNotice,
deprecationNotice,
futureNotice,
warningsNotice,
ignoredNotice,
conflictsNotice,
].filter(x => !!x)
.join(', ') ||
loadTimeData.getString('ok');
const supersededNotice = this.hasSuperseded_ && !this.isMergedValue_ ?
loadTimeData.getString('superseding') :
'';
if (supersededNotice) {
// Include superseded notice regardless of other notices
notice += `, ${supersededNotice}`;
}
messagesDisplay!.textContent = notice;
if (policy.conflicts) {
policy.conflicts.forEach(conflict => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(conflict, 'conflictValue');
row.classList!.add('policy-conflict-data');
this.shadowRoot!.appendChild(row);
});
}
if (policy.superseded) {
policy.superseded.forEach(superseded => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(superseded, 'supersededValue');
row.classList!.add('policy-superseded-data');
this.shadowRoot!.appendChild(row);
});
}
} else {
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
messagesDisplay!.textContent = loadTimeData.getString('unset');
}
}
// Copies the policy's value to the clipboard.
private copyValue_() {
const policyValueDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Select the text that will be copied.
const selection = window.getSelection();
const range = window.document.createRange();
range.selectNodeContents(policyValueDisplay as Node);
selection!.removeAllRanges();
selection!.addRange(range);
// Copy the policy value to the clipboard.
navigator.clipboard
.writeText((policyValueDisplay as CustomElement)!.innerText)
.catch(error => {
console.error('Unable to copy policy value to clipboard:', error);
});
}
// Toggle the visibility of an additional row containing the complete text.
private toggleExpanded() {
const warningRowDisplay =
this.shadowRoot!.querySelector('.warnings.row') as CustomElement;
const errorRowDisplay =
this.shadowRoot!.querySelector('.errors.row') as CustomElement;
const infoRowDisplay =
this.shadowRoot!.querySelector('.infos.row') as CustomElement;
const valueRowDisplay =
this.shadowRoot!.querySelector('.value.row') as CustomElement;
// <if expr="is_android or is_ios">
const scopeRowDisplay =
this.shadowRoot!.querySelector('.scope.row') as CustomElement;
scopeRowDisplay.hidden = !scopeRowDisplay.hidden;
const levelRowDisplay =
this.shadowRoot!.querySelector('.level.row') as CustomElement;
levelRowDisplay.hidden = !levelRowDisplay.hidden;
// </if>
valueRowDisplay.hidden = !valueRowDisplay.hidden;
this.classList!.toggle('expanded', !valueRowDisplay.hidden);
this.shadowRoot!.querySelector<CustomElement>('.show-more')!.hidden =
!valueRowDisplay.hidden;
this.shadowRoot!.querySelector<CustomElement>('.show-less')!.hidden =
valueRowDisplay!.hidden;
if (this.hasWarnings_) {
warningRowDisplay!.hidden = !warningRowDisplay.hidden;
}
if (this.hasErrors_) {
errorRowDisplay!.hidden = !errorRowDisplay.hidden;
}
if (this.hasInfos_) {
infoRowDisplay!.hidden = !infoRowDisplay.hidden;
}
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-conflict-data')!
.forEach(row => row!.hidden = !row.hidden);
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-superseded-data')!
.forEach(row => row!.hidden = !row.hidden);
}
}
declare global {
interface HTMLElementTagNameMap {
'policy-row': PolicyRowElement;
}
}
customElements.define('policy-row', PolicyRowElement);
| {
return getTemplate();
} | identifier_body |
policy_row.ts | // Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'chrome://resources/js/action_link.js';
import './policy_conflict.js';
import './strings.m.js';
import {CustomElement} from 'chrome://resources/js/custom_element.js';
import {loadTimeData} from 'chrome://resources/js/load_time_data.js';
import {Conflict, PolicyConflictElement} from './policy_conflict.js';
import {getTemplate} from './policy_row.html.js';
export interface Policy {
ignored?: boolean;
name: string;
level: string;
link?: string;
scope: string;
source: string;
error: string;
warning: string;
info: string;
value: any;
deprecated?: boolean;
future?: boolean;
allSourcesMerged?: boolean;
conflicts?: Conflict[];
superseded?: Conflict[];
forSigninScreen: boolean;
isExtension: boolean;
}
export class PolicyRowElement extends CustomElement {
static override get template() {
return getTemplate();
}
policy: Policy;
private unset_: boolean;
private hasErrors_: boolean;
private hasWarnings_: boolean;
private hasInfos_: boolean;
private hasConflicts_: boolean;
private hasSuperseded_: boolean;
private isMergedValue_: boolean;
private deprecated_: boolean;
private future_: boolean;
connectedCallback() {
const toggle = this.shadowRoot!.querySelector('.policy.row .toggle');
toggle!.addEventListener('click', () => this.toggleExpanded());
const copy = this.shadowRoot!.querySelector('.copy-value');
copy!.addEventListener('click', () => this.copyValue_());
this.setAttribute('role', 'rowgroup');
this.classList.add('policy-data');
}
initialize(policy: Policy) {
this.policy = policy;
this.unset_ = policy.value === undefined;
this.hasErrors_ = !!policy.error;
this.hasWarnings_ = !!policy.warning;
this.hasInfos_ = !!policy.info;
this.hasConflicts_ = !!policy.conflicts;
this.hasSuperseded_ = !!policy.superseded;
this.isMergedValue_ = !!policy.allSourcesMerged;
this.deprecated_ = !!policy.deprecated;
this.future_ = !!policy.future;
// Populate the name column.
const nameDisplay = this.shadowRoot!.querySelector('.name .link span');
nameDisplay!.textContent = policy.name;
if (policy.link) {
const link =
this.shadowRoot!.querySelector('.name .link') as HTMLAnchorElement;
link.href = policy.link;
link.title = loadTimeData.getStringF('policyLearnMore', policy.name);
this.toggleAttribute('no-help-link', false);
} else {
this.toggleAttribute('no-help-link', true);
}
// Populate the remaining columns with policy scope, level and value if a
// value has been set. Otherwise, leave them blank.
if (!this.unset_) {
const scopeDisplay = this.shadowRoot!.querySelector('.scope');
let scope = 'scopeDevice';
if (policy.scope === 'user') {
scope = 'scopeUser';
} else if (policy.scope === 'allUsers') {
scope = 'scopeAllUsers';
}
scopeDisplay!.textContent = loadTimeData.getString(scope);
// Display scope and level as rows instead of columns on space constraint
// devices.
// <if expr="is_android or is_ios">
const scopeRowContentDisplay =
this.shadowRoot!.querySelector('.scope.row .value');
scopeRowContentDisplay!.textContent = loadTimeData.getString(scope);
const levelRowContentDisplay =
this.shadowRoot!.querySelector('.level.row .value');
levelRowContentDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
// </if>
const levelDisplay = this.shadowRoot!.querySelector('.level');
levelDisplay!.textContent = loadTimeData.getString(
policy.level === 'recommended' ? 'levelRecommended' :
'levelMandatory');
const sourceDisplay = this.shadowRoot!.querySelector('.source');
sourceDisplay!.textContent = loadTimeData.getString(policy.source);
// Reduces load on the DOM for long values;
const convertValue = (value: string, format?: boolean) => { | }
if (format) {
return JSON.stringify(value, null, 2);
} else {
return JSON.stringify(value, null);
}
};
// If value is longer than 256 characters, truncate and add ellipsis.
const policyValueStr = convertValue(policy.value);
const truncatedValue = policyValueStr.length > 256 ?
`${policyValueStr.substring(0, 256)}\u2026` :
policyValueStr;
const valueDisplay = this.shadowRoot!.querySelector('.value');
valueDisplay!.textContent = truncatedValue;
const copyLink =
this.shadowRoot!.querySelector('.copy .link') as HTMLElement;
copyLink!.title = loadTimeData.getStringF('policyCopyValue', policy.name);
const valueRowContentDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Expanded policy value is formatted.
valueRowContentDisplay!.textContent =
convertValue(policy.value, /*format=*/ true);
const errorRowContentDisplay =
this.shadowRoot!.querySelector('.errors.row .value');
errorRowContentDisplay!.textContent = policy.error;
const warningRowContentDisplay =
this.shadowRoot!.querySelector('.warnings.row .value');
warningRowContentDisplay!.textContent = policy.warning;
const infoRowContentDisplay =
this.shadowRoot!.querySelector('.infos.row .value');
infoRowContentDisplay!.textContent = policy.info;
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
const errorsNotice =
this.hasErrors_ ? loadTimeData.getString('error') : '';
const deprecationNotice =
this.deprecated_ ? loadTimeData.getString('deprecated') : '';
const futureNotice = this.future_ ? loadTimeData.getString('future') : '';
const warningsNotice =
this.hasWarnings_ ? loadTimeData.getString('warning') : '';
const conflictsNotice = this.hasConflicts_ && !this.isMergedValue_ ?
loadTimeData.getString('conflict') :
'';
const ignoredNotice =
this.policy.ignored ? loadTimeData.getString('ignored') : '';
let notice =
[
errorsNotice,
deprecationNotice,
futureNotice,
warningsNotice,
ignoredNotice,
conflictsNotice,
].filter(x => !!x)
.join(', ') ||
loadTimeData.getString('ok');
const supersededNotice = this.hasSuperseded_ && !this.isMergedValue_ ?
loadTimeData.getString('superseding') :
'';
if (supersededNotice) {
// Include superseded notice regardless of other notices
notice += `, ${supersededNotice}`;
}
messagesDisplay!.textContent = notice;
if (policy.conflicts) {
policy.conflicts.forEach(conflict => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(conflict, 'conflictValue');
row.classList!.add('policy-conflict-data');
this.shadowRoot!.appendChild(row);
});
}
if (policy.superseded) {
policy.superseded.forEach(superseded => {
const row = document.createElement('policy-conflict') as
PolicyConflictElement;
row.initialize(superseded, 'supersededValue');
row.classList!.add('policy-superseded-data');
this.shadowRoot!.appendChild(row);
});
}
} else {
const messagesDisplay = this.shadowRoot!.querySelector('.messages');
messagesDisplay!.textContent = loadTimeData.getString('unset');
}
}
// Copies the policy's value to the clipboard.
private copyValue_() {
const policyValueDisplay =
this.shadowRoot!.querySelector('.value.row .value');
// Select the text that will be copied.
const selection = window.getSelection();
const range = window.document.createRange();
range.selectNodeContents(policyValueDisplay as Node);
selection!.removeAllRanges();
selection!.addRange(range);
// Copy the policy value to the clipboard.
navigator.clipboard
.writeText((policyValueDisplay as CustomElement)!.innerText)
.catch(error => {
console.error('Unable to copy policy value to clipboard:', error);
});
}
// Toggle the visibility of an additional row containing the complete text.
private toggleExpanded() {
const warningRowDisplay =
this.shadowRoot!.querySelector('.warnings.row') as CustomElement;
const errorRowDisplay =
this.shadowRoot!.querySelector('.errors.row') as CustomElement;
const infoRowDisplay =
this.shadowRoot!.querySelector('.infos.row') as CustomElement;
const valueRowDisplay =
this.shadowRoot!.querySelector('.value.row') as CustomElement;
// <if expr="is_android or is_ios">
const scopeRowDisplay =
this.shadowRoot!.querySelector('.scope.row') as CustomElement;
scopeRowDisplay.hidden = !scopeRowDisplay.hidden;
const levelRowDisplay =
this.shadowRoot!.querySelector('.level.row') as CustomElement;
levelRowDisplay.hidden = !levelRowDisplay.hidden;
// </if>
valueRowDisplay.hidden = !valueRowDisplay.hidden;
this.classList!.toggle('expanded', !valueRowDisplay.hidden);
this.shadowRoot!.querySelector<CustomElement>('.show-more')!.hidden =
!valueRowDisplay.hidden;
this.shadowRoot!.querySelector<CustomElement>('.show-less')!.hidden =
valueRowDisplay!.hidden;
if (this.hasWarnings_) {
warningRowDisplay!.hidden = !warningRowDisplay.hidden;
}
if (this.hasErrors_) {
errorRowDisplay!.hidden = !errorRowDisplay.hidden;
}
if (this.hasInfos_) {
infoRowDisplay!.hidden = !infoRowDisplay.hidden;
}
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-conflict-data')!
.forEach(row => row!.hidden = !row.hidden);
this.shadowRoot!.querySelectorAll<HTMLElement>('.policy-superseded-data')!
.forEach(row => row!.hidden = !row.hidden);
}
}
declare global {
interface HTMLElementTagNameMap {
'policy-row': PolicyRowElement;
}
}
customElements.define('policy-row', PolicyRowElement); | // Skip 'string' policy to avoid unnecessary conversions.
if (typeof value == 'string') {
return value; | random_line_split |
server.go | package dynamiclistener
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"log"
"net"
"net/http"
"reflect"
"strconv"
"strings"
"sync"
"time"
cert "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
)
type server struct {
sync.Mutex
userConfig UserConfig
listenConfigStorage ListenerConfigStorage
tlsCert *tls.Certificate
ips map[string]bool
domains map[string]bool
cn string
listeners []net.Listener
servers []*http.Server
activeCA *x509.Certificate
activeCAKey crypto.Signer
}
func NewServer(listenConfigStorage ListenerConfigStorage, config UserConfig) (ServerInterface, error) {
s := &server{
userConfig: config,
listenConfigStorage: listenConfigStorage,
cn: "cattle",
}
s.ips = map[string]bool{}
s.domains = map[string]bool{}
if err := s.userConfigure(); err != nil {
return nil, err
}
lc, err := listenConfigStorage.Get()
if err != nil {
return nil, err
}
return s, s.Update(lc)
}
func (s *server) CACert() (string, error) {
if s.userConfig.NoCACerts {
return "", nil
}
if s.userConfig.CACerts != "" {
return s.userConfig.CACerts, nil
}
return "", fmt.Errorf("ca cert not found")
}
func marshalPrivateKey(privateKey crypto.Signer) (string, []byte, error) {
var (
keyType string
bytes []byte
err error
)
if key, ok := privateKey.(*ecdsa.PrivateKey); ok {
keyType = cert.ECPrivateKeyBlockType
bytes, err = x509.MarshalECPrivateKey(key)
} else if key, ok := privateKey.(*rsa.PrivateKey); ok {
keyType = cert.RSAPrivateKeyBlockType
bytes = x509.MarshalPKCS1PrivateKey(key)
} else {
keyType = cert.PrivateKeyBlockType
bytes, err = x509.MarshalPKCS8PrivateKey(privateKey)
}
if err != nil {
logrus.Errorf("Unable to marshal private key: %v", err)
}
return keyType, bytes, err
}
func newPrivateKey() (crypto.Signer, error) {
caKeyBytes, err := cert.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, err
}
caKeyIFace, err := cert.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, err
}
return caKeyIFace.(crypto.Signer), nil
}
func (s *server) save() (_err error) {
defer func() {
if _err != nil {
logrus.Errorf("Saving cert error: %s", _err)
}
}()
certStr, err := certToString(s.tlsCert)
if err != nil {
return err
}
cfg, err := s.listenConfigStorage.Get()
if err != nil {
return err
}
cfg.GeneratedCerts = map[string]string{s.cn: certStr}
_, err = s.listenConfigStorage.Set(cfg)
return err
}
func (s *server) userConfigure() error {
if s.userConfig.HTTPSPort == 0 {
s.userConfig.HTTPSPort = 8443
}
for _, d := range s.userConfig.Domains {
s.domains[d] = true
}
for _, ip := range s.userConfig.KnownIPs {
if netIP := net.ParseIP(ip); netIP != nil {
s.ips[ip] = true
}
}
if bindAddress := net.ParseIP(s.userConfig.BindAddress); bindAddress != nil {
s.ips[s.userConfig.BindAddress] = true
}
if s.activeCA == nil && s.activeCAKey == nil {
if s.userConfig.CACerts != "" && s.userConfig.CAKey != "" {
ca, err := cert.ParseCertsPEM([]byte(s.userConfig.CACerts))
if err != nil {
return err
}
key, err := cert.ParsePrivateKeyPEM([]byte(s.userConfig.CAKey))
if err != nil {
return err
}
s.activeCA = ca[0]
s.activeCAKey = key.(crypto.Signer)
} else {
ca, key, err := genCA()
if err != nil {
return err
}
s.activeCA = ca
s.activeCAKey = key
}
}
return nil
}
func genCA() (*x509.Certificate, crypto.Signer, error) {
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, err
}
caCert, err := cert.NewSelfSignedCACert(cert.Config{
CommonName: "k3s-ca",
Organization: []string{"k3s-org"},
}, caKey)
if err != nil {
return nil, nil, err
}
return caCert, caKey, nil
}
func (s *server) Update(status *ListenerStatus) (_err error) {
s.Lock()
defer func() {
s.Unlock()
if _err != nil {
logrus.Errorf("Update cert error: %s", _err)
}
if s.tlsCert == nil {
s.getCertificate(&tls.ClientHelloInfo{ServerName: "localhost"})
}
}()
certString := status.GeneratedCerts[s.cn]
tlsCert, err := stringToCert(certString)
if err != nil {
logrus.Errorf("Update cert unable to convert string to cert: %s", err)
s.tlsCert = nil
}
if tlsCert != nil {
s.tlsCert = tlsCert
for i, certBytes := range tlsCert.Certificate {
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
logrus.Errorf("Update cert %d parse error: %s", i, err)
s.tlsCert = nil
break
}
ips := map[string]bool{}
for _, ip := range cert.IPAddresses {
ips[ip.String()] = true
}
domains := map[string]bool{}
for _, domain := range cert.DNSNames {
domains[domain] = true
}
if !(reflect.DeepEqual(ips, s.ips) && reflect.DeepEqual(domains, s.domains)) {
subset := true
for ip := range s.ips {
if !ips[ip] {
subset = false
break
}
}
if subset {
for domain := range s.domains {
if !domains[domain] {
subset = false
break
}
}
}
if !subset {
s.tlsCert = nil
}
for ip := range ips {
s.ips[ip] = true
}
for domain := range domains {
s.domains[domain] = true
}
}
}
}
return s.reload()
}
func (s *server) shutdown() error {
for _, listener := range s.listeners {
if err := listener.Close(); err != nil {
return err
}
}
s.listeners = nil
for _, server := range s.servers {
go server.Shutdown(context.Background())
}
s.servers = nil
return nil
}
func (s *server) reload() error {
if len(s.listeners) > 0 {
return nil
}
if err := s.shutdown(); err != nil |
if err := s.serveHTTPS(); err != nil {
return err
}
return nil
}
func (s *server) getCertificate(hello *tls.ClientHelloInfo) (_servingCert *tls.Certificate, _err error) {
s.Lock()
changed := false
defer func() {
defer s.Unlock()
if _err != nil {
logrus.Errorf("Get certificate error: %s", _err)
return
}
if changed {
s.save()
}
}()
if hello.ServerName != "" && !s.domains[hello.ServerName] {
s.tlsCert = nil
s.domains[hello.ServerName] = true
}
if s.tlsCert != nil {
return s.tlsCert, nil
}
ips := []net.IP{}
for ipStr := range s.ips {
if ip := net.ParseIP(ipStr); ip != nil {
ips = append(ips, ip)
}
}
dnsNames := []string{}
for domain := range s.domains {
dnsNames = append(dnsNames, domain)
}
cfg := cert.Config{
CommonName: s.cn,
Organization: s.activeCA.Subject.Organization,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
AltNames: cert.AltNames{
DNSNames: dnsNames,
IPs: ips,
},
}
key, err := newPrivateKey()
if err != nil {
return nil, err
}
cert, err := cert.NewSignedCert(cfg, key, s.activeCA, s.activeCAKey)
if err != nil {
return nil, err
}
tlsCert := &tls.Certificate{
Certificate: [][]byte{
cert.Raw,
},
PrivateKey: key,
}
changed = true
s.tlsCert = tlsCert
return tlsCert, nil
}
func (s *server) cacheHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
h, _, err := net.SplitHostPort(req.Host)
if err != nil {
h = req.Host
}
s.Lock()
if ip := net.ParseIP(h); ip != nil {
if !s.ips[h] {
s.ips[h] = true
s.tlsCert = nil
}
} else {
if !s.domains[h] {
s.domains[h] = true
s.tlsCert = nil
}
}
s.Unlock()
handler.ServeHTTP(resp, req)
})
}
func (s *server) serveHTTPS() error {
conf := &tls.Config{
ClientAuth: tls.RequestClientCert,
GetCertificate: s.getCertificate,
PreferServerCipherSuites: true,
}
listener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPSPort, conf)
if err != nil {
return err
}
logger := logrus.StandardLogger()
server := &http.Server{
Handler: s.cacheHandler(s.Handler()),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, server)
s.startServer(listener, server)
if s.userConfig.HTTPPort > 0 {
httpListener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPPort, nil)
if err != nil {
return err
}
httpServer := &http.Server{
Handler: s.cacheHandler(httpRedirect(s.Handler())),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, httpServer)
s.startServer(httpListener, httpServer)
}
return nil
}
// Approach taken from letsencrypt, except manglePort is specific to us
func httpRedirect(next http.Handler) http.Handler {
return http.HandlerFunc(
func(rw http.ResponseWriter, r *http.Request) {
if r.Header.Get("x-Forwarded-Proto") == "https" ||
strings.HasPrefix(r.URL.Path, "/ping") ||
strings.HasPrefix(r.URL.Path, "/health") {
next.ServeHTTP(rw, r)
return
}
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(rw, "Use HTTPS", http.StatusBadRequest)
return
}
target := "https://" + manglePort(r.Host) + r.URL.RequestURI()
http.Redirect(rw, r, target, http.StatusFound)
})
}
func manglePort(hostport string) string {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return hostport
}
portInt, err := strconv.Atoi(port)
if err != nil {
return hostport
}
portInt = ((portInt / 1000) * 1000) + 443
return net.JoinHostPort(host, strconv.Itoa(portInt))
}
func (s *server) startServer(listener net.Listener, server *http.Server) {
go func() {
if err := server.Serve(listener); err != nil {
logrus.Errorf("server on %v returned err: %v", listener.Addr(), err)
}
}()
}
func (s *server) Handler() http.Handler {
return s.userConfig.Handler
}
func (s *server) newListener(ip string, port int, config *tls.Config) (net.Listener, error) {
addr := fmt.Sprintf("%s:%d", ip, port)
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
l = tcpKeepAliveListener{l.(*net.TCPListener)}
if config != nil {
l = tls.NewListener(l, config)
}
s.listeners = append(s.listeners, l)
logrus.Info("Listening on ", addr)
return l, nil
}
func stringToCert(certString string) (*tls.Certificate, error) {
parts := strings.Split(certString, "#")
if len(parts) != 2 {
return nil, errors.New("Unable to split cert into two parts")
}
certPart, keyPart := parts[0], parts[1]
keyBytes, err := base64.StdEncoding.DecodeString(keyPart)
if err != nil {
return nil, err
}
key, err := cert.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return nil, err
}
certBytes, err := base64.StdEncoding.DecodeString(certPart)
if err != nil {
return nil, err
}
return &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: key,
}, nil
}
func certToString(cert *tls.Certificate) (string, error) {
keyType, keyBytes, err := marshalPrivateKey(cert.PrivateKey.(crypto.Signer))
if err != nil {
return "", err
}
privateKeyPemBlock := &pem.Block{
Type: keyType,
Bytes: keyBytes,
}
pemBytes := pem.EncodeToMemory(privateKeyPemBlock)
certString := base64.StdEncoding.EncodeToString(cert.Certificate[0])
keyString := base64.StdEncoding.EncodeToString(pemBytes)
return certString + "#" + keyString, nil
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
| {
return err
} | conditional_block |
server.go | package dynamiclistener
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"log"
"net"
"net/http"
"reflect"
"strconv"
"strings"
"sync"
"time"
cert "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
)
type server struct {
sync.Mutex
userConfig UserConfig
listenConfigStorage ListenerConfigStorage
tlsCert *tls.Certificate
ips map[string]bool
domains map[string]bool
cn string
listeners []net.Listener
servers []*http.Server
activeCA *x509.Certificate
activeCAKey crypto.Signer
}
func NewServer(listenConfigStorage ListenerConfigStorage, config UserConfig) (ServerInterface, error) {
s := &server{
userConfig: config,
listenConfigStorage: listenConfigStorage,
cn: "cattle",
}
| if err := s.userConfigure(); err != nil {
return nil, err
}
lc, err := listenConfigStorage.Get()
if err != nil {
return nil, err
}
return s, s.Update(lc)
}
func (s *server) CACert() (string, error) {
if s.userConfig.NoCACerts {
return "", nil
}
if s.userConfig.CACerts != "" {
return s.userConfig.CACerts, nil
}
return "", fmt.Errorf("ca cert not found")
}
func marshalPrivateKey(privateKey crypto.Signer) (string, []byte, error) {
var (
keyType string
bytes []byte
err error
)
if key, ok := privateKey.(*ecdsa.PrivateKey); ok {
keyType = cert.ECPrivateKeyBlockType
bytes, err = x509.MarshalECPrivateKey(key)
} else if key, ok := privateKey.(*rsa.PrivateKey); ok {
keyType = cert.RSAPrivateKeyBlockType
bytes = x509.MarshalPKCS1PrivateKey(key)
} else {
keyType = cert.PrivateKeyBlockType
bytes, err = x509.MarshalPKCS8PrivateKey(privateKey)
}
if err != nil {
logrus.Errorf("Unable to marshal private key: %v", err)
}
return keyType, bytes, err
}
func newPrivateKey() (crypto.Signer, error) {
caKeyBytes, err := cert.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, err
}
caKeyIFace, err := cert.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, err
}
return caKeyIFace.(crypto.Signer), nil
}
func (s *server) save() (_err error) {
defer func() {
if _err != nil {
logrus.Errorf("Saving cert error: %s", _err)
}
}()
certStr, err := certToString(s.tlsCert)
if err != nil {
return err
}
cfg, err := s.listenConfigStorage.Get()
if err != nil {
return err
}
cfg.GeneratedCerts = map[string]string{s.cn: certStr}
_, err = s.listenConfigStorage.Set(cfg)
return err
}
func (s *server) userConfigure() error {
if s.userConfig.HTTPSPort == 0 {
s.userConfig.HTTPSPort = 8443
}
for _, d := range s.userConfig.Domains {
s.domains[d] = true
}
for _, ip := range s.userConfig.KnownIPs {
if netIP := net.ParseIP(ip); netIP != nil {
s.ips[ip] = true
}
}
if bindAddress := net.ParseIP(s.userConfig.BindAddress); bindAddress != nil {
s.ips[s.userConfig.BindAddress] = true
}
if s.activeCA == nil && s.activeCAKey == nil {
if s.userConfig.CACerts != "" && s.userConfig.CAKey != "" {
ca, err := cert.ParseCertsPEM([]byte(s.userConfig.CACerts))
if err != nil {
return err
}
key, err := cert.ParsePrivateKeyPEM([]byte(s.userConfig.CAKey))
if err != nil {
return err
}
s.activeCA = ca[0]
s.activeCAKey = key.(crypto.Signer)
} else {
ca, key, err := genCA()
if err != nil {
return err
}
s.activeCA = ca
s.activeCAKey = key
}
}
return nil
}
func genCA() (*x509.Certificate, crypto.Signer, error) {
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, err
}
caCert, err := cert.NewSelfSignedCACert(cert.Config{
CommonName: "k3s-ca",
Organization: []string{"k3s-org"},
}, caKey)
if err != nil {
return nil, nil, err
}
return caCert, caKey, nil
}
func (s *server) Update(status *ListenerStatus) (_err error) {
s.Lock()
defer func() {
s.Unlock()
if _err != nil {
logrus.Errorf("Update cert error: %s", _err)
}
if s.tlsCert == nil {
s.getCertificate(&tls.ClientHelloInfo{ServerName: "localhost"})
}
}()
certString := status.GeneratedCerts[s.cn]
tlsCert, err := stringToCert(certString)
if err != nil {
logrus.Errorf("Update cert unable to convert string to cert: %s", err)
s.tlsCert = nil
}
if tlsCert != nil {
s.tlsCert = tlsCert
for i, certBytes := range tlsCert.Certificate {
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
logrus.Errorf("Update cert %d parse error: %s", i, err)
s.tlsCert = nil
break
}
ips := map[string]bool{}
for _, ip := range cert.IPAddresses {
ips[ip.String()] = true
}
domains := map[string]bool{}
for _, domain := range cert.DNSNames {
domains[domain] = true
}
if !(reflect.DeepEqual(ips, s.ips) && reflect.DeepEqual(domains, s.domains)) {
subset := true
for ip := range s.ips {
if !ips[ip] {
subset = false
break
}
}
if subset {
for domain := range s.domains {
if !domains[domain] {
subset = false
break
}
}
}
if !subset {
s.tlsCert = nil
}
for ip := range ips {
s.ips[ip] = true
}
for domain := range domains {
s.domains[domain] = true
}
}
}
}
return s.reload()
}
func (s *server) shutdown() error {
for _, listener := range s.listeners {
if err := listener.Close(); err != nil {
return err
}
}
s.listeners = nil
for _, server := range s.servers {
go server.Shutdown(context.Background())
}
s.servers = nil
return nil
}
func (s *server) reload() error {
if len(s.listeners) > 0 {
return nil
}
if err := s.shutdown(); err != nil {
return err
}
if err := s.serveHTTPS(); err != nil {
return err
}
return nil
}
func (s *server) getCertificate(hello *tls.ClientHelloInfo) (_servingCert *tls.Certificate, _err error) {
s.Lock()
changed := false
defer func() {
defer s.Unlock()
if _err != nil {
logrus.Errorf("Get certificate error: %s", _err)
return
}
if changed {
s.save()
}
}()
if hello.ServerName != "" && !s.domains[hello.ServerName] {
s.tlsCert = nil
s.domains[hello.ServerName] = true
}
if s.tlsCert != nil {
return s.tlsCert, nil
}
ips := []net.IP{}
for ipStr := range s.ips {
if ip := net.ParseIP(ipStr); ip != nil {
ips = append(ips, ip)
}
}
dnsNames := []string{}
for domain := range s.domains {
dnsNames = append(dnsNames, domain)
}
cfg := cert.Config{
CommonName: s.cn,
Organization: s.activeCA.Subject.Organization,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
AltNames: cert.AltNames{
DNSNames: dnsNames,
IPs: ips,
},
}
key, err := newPrivateKey()
if err != nil {
return nil, err
}
cert, err := cert.NewSignedCert(cfg, key, s.activeCA, s.activeCAKey)
if err != nil {
return nil, err
}
tlsCert := &tls.Certificate{
Certificate: [][]byte{
cert.Raw,
},
PrivateKey: key,
}
changed = true
s.tlsCert = tlsCert
return tlsCert, nil
}
func (s *server) cacheHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
h, _, err := net.SplitHostPort(req.Host)
if err != nil {
h = req.Host
}
s.Lock()
if ip := net.ParseIP(h); ip != nil {
if !s.ips[h] {
s.ips[h] = true
s.tlsCert = nil
}
} else {
if !s.domains[h] {
s.domains[h] = true
s.tlsCert = nil
}
}
s.Unlock()
handler.ServeHTTP(resp, req)
})
}
func (s *server) serveHTTPS() error {
conf := &tls.Config{
ClientAuth: tls.RequestClientCert,
GetCertificate: s.getCertificate,
PreferServerCipherSuites: true,
}
listener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPSPort, conf)
if err != nil {
return err
}
logger := logrus.StandardLogger()
server := &http.Server{
Handler: s.cacheHandler(s.Handler()),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, server)
s.startServer(listener, server)
if s.userConfig.HTTPPort > 0 {
httpListener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPPort, nil)
if err != nil {
return err
}
httpServer := &http.Server{
Handler: s.cacheHandler(httpRedirect(s.Handler())),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, httpServer)
s.startServer(httpListener, httpServer)
}
return nil
}
// Approach taken from letsencrypt, except manglePort is specific to us
func httpRedirect(next http.Handler) http.Handler {
return http.HandlerFunc(
func(rw http.ResponseWriter, r *http.Request) {
if r.Header.Get("x-Forwarded-Proto") == "https" ||
strings.HasPrefix(r.URL.Path, "/ping") ||
strings.HasPrefix(r.URL.Path, "/health") {
next.ServeHTTP(rw, r)
return
}
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(rw, "Use HTTPS", http.StatusBadRequest)
return
}
target := "https://" + manglePort(r.Host) + r.URL.RequestURI()
http.Redirect(rw, r, target, http.StatusFound)
})
}
func manglePort(hostport string) string {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return hostport
}
portInt, err := strconv.Atoi(port)
if err != nil {
return hostport
}
portInt = ((portInt / 1000) * 1000) + 443
return net.JoinHostPort(host, strconv.Itoa(portInt))
}
func (s *server) startServer(listener net.Listener, server *http.Server) {
go func() {
if err := server.Serve(listener); err != nil {
logrus.Errorf("server on %v returned err: %v", listener.Addr(), err)
}
}()
}
func (s *server) Handler() http.Handler {
return s.userConfig.Handler
}
func (s *server) newListener(ip string, port int, config *tls.Config) (net.Listener, error) {
addr := fmt.Sprintf("%s:%d", ip, port)
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
l = tcpKeepAliveListener{l.(*net.TCPListener)}
if config != nil {
l = tls.NewListener(l, config)
}
s.listeners = append(s.listeners, l)
logrus.Info("Listening on ", addr)
return l, nil
}
func stringToCert(certString string) (*tls.Certificate, error) {
parts := strings.Split(certString, "#")
if len(parts) != 2 {
return nil, errors.New("Unable to split cert into two parts")
}
certPart, keyPart := parts[0], parts[1]
keyBytes, err := base64.StdEncoding.DecodeString(keyPart)
if err != nil {
return nil, err
}
key, err := cert.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return nil, err
}
certBytes, err := base64.StdEncoding.DecodeString(certPart)
if err != nil {
return nil, err
}
return &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: key,
}, nil
}
func certToString(cert *tls.Certificate) (string, error) {
keyType, keyBytes, err := marshalPrivateKey(cert.PrivateKey.(crypto.Signer))
if err != nil {
return "", err
}
privateKeyPemBlock := &pem.Block{
Type: keyType,
Bytes: keyBytes,
}
pemBytes := pem.EncodeToMemory(privateKeyPemBlock)
certString := base64.StdEncoding.EncodeToString(cert.Certificate[0])
keyString := base64.StdEncoding.EncodeToString(pemBytes)
return certString + "#" + keyString, nil
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
} | s.ips = map[string]bool{}
s.domains = map[string]bool{}
| random_line_split |
server.go | package dynamiclistener
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"log"
"net"
"net/http"
"reflect"
"strconv"
"strings"
"sync"
"time"
cert "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
)
type server struct {
sync.Mutex
userConfig UserConfig
listenConfigStorage ListenerConfigStorage
tlsCert *tls.Certificate
ips map[string]bool
domains map[string]bool
cn string
listeners []net.Listener
servers []*http.Server
activeCA *x509.Certificate
activeCAKey crypto.Signer
}
func NewServer(listenConfigStorage ListenerConfigStorage, config UserConfig) (ServerInterface, error) {
s := &server{
userConfig: config,
listenConfigStorage: listenConfigStorage,
cn: "cattle",
}
s.ips = map[string]bool{}
s.domains = map[string]bool{}
if err := s.userConfigure(); err != nil {
return nil, err
}
lc, err := listenConfigStorage.Get()
if err != nil {
return nil, err
}
return s, s.Update(lc)
}
func (s *server) CACert() (string, error) {
if s.userConfig.NoCACerts {
return "", nil
}
if s.userConfig.CACerts != "" {
return s.userConfig.CACerts, nil
}
return "", fmt.Errorf("ca cert not found")
}
func marshalPrivateKey(privateKey crypto.Signer) (string, []byte, error) {
var (
keyType string
bytes []byte
err error
)
if key, ok := privateKey.(*ecdsa.PrivateKey); ok {
keyType = cert.ECPrivateKeyBlockType
bytes, err = x509.MarshalECPrivateKey(key)
} else if key, ok := privateKey.(*rsa.PrivateKey); ok {
keyType = cert.RSAPrivateKeyBlockType
bytes = x509.MarshalPKCS1PrivateKey(key)
} else {
keyType = cert.PrivateKeyBlockType
bytes, err = x509.MarshalPKCS8PrivateKey(privateKey)
}
if err != nil {
logrus.Errorf("Unable to marshal private key: %v", err)
}
return keyType, bytes, err
}
func newPrivateKey() (crypto.Signer, error) {
caKeyBytes, err := cert.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, err
}
caKeyIFace, err := cert.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, err
}
return caKeyIFace.(crypto.Signer), nil
}
func (s *server) save() (_err error) {
defer func() {
if _err != nil {
logrus.Errorf("Saving cert error: %s", _err)
}
}()
certStr, err := certToString(s.tlsCert)
if err != nil {
return err
}
cfg, err := s.listenConfigStorage.Get()
if err != nil {
return err
}
cfg.GeneratedCerts = map[string]string{s.cn: certStr}
_, err = s.listenConfigStorage.Set(cfg)
return err
}
func (s *server) | () error {
if s.userConfig.HTTPSPort == 0 {
s.userConfig.HTTPSPort = 8443
}
for _, d := range s.userConfig.Domains {
s.domains[d] = true
}
for _, ip := range s.userConfig.KnownIPs {
if netIP := net.ParseIP(ip); netIP != nil {
s.ips[ip] = true
}
}
if bindAddress := net.ParseIP(s.userConfig.BindAddress); bindAddress != nil {
s.ips[s.userConfig.BindAddress] = true
}
if s.activeCA == nil && s.activeCAKey == nil {
if s.userConfig.CACerts != "" && s.userConfig.CAKey != "" {
ca, err := cert.ParseCertsPEM([]byte(s.userConfig.CACerts))
if err != nil {
return err
}
key, err := cert.ParsePrivateKeyPEM([]byte(s.userConfig.CAKey))
if err != nil {
return err
}
s.activeCA = ca[0]
s.activeCAKey = key.(crypto.Signer)
} else {
ca, key, err := genCA()
if err != nil {
return err
}
s.activeCA = ca
s.activeCAKey = key
}
}
return nil
}
func genCA() (*x509.Certificate, crypto.Signer, error) {
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, err
}
caCert, err := cert.NewSelfSignedCACert(cert.Config{
CommonName: "k3s-ca",
Organization: []string{"k3s-org"},
}, caKey)
if err != nil {
return nil, nil, err
}
return caCert, caKey, nil
}
func (s *server) Update(status *ListenerStatus) (_err error) {
s.Lock()
defer func() {
s.Unlock()
if _err != nil {
logrus.Errorf("Update cert error: %s", _err)
}
if s.tlsCert == nil {
s.getCertificate(&tls.ClientHelloInfo{ServerName: "localhost"})
}
}()
certString := status.GeneratedCerts[s.cn]
tlsCert, err := stringToCert(certString)
if err != nil {
logrus.Errorf("Update cert unable to convert string to cert: %s", err)
s.tlsCert = nil
}
if tlsCert != nil {
s.tlsCert = tlsCert
for i, certBytes := range tlsCert.Certificate {
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
logrus.Errorf("Update cert %d parse error: %s", i, err)
s.tlsCert = nil
break
}
ips := map[string]bool{}
for _, ip := range cert.IPAddresses {
ips[ip.String()] = true
}
domains := map[string]bool{}
for _, domain := range cert.DNSNames {
domains[domain] = true
}
if !(reflect.DeepEqual(ips, s.ips) && reflect.DeepEqual(domains, s.domains)) {
subset := true
for ip := range s.ips {
if !ips[ip] {
subset = false
break
}
}
if subset {
for domain := range s.domains {
if !domains[domain] {
subset = false
break
}
}
}
if !subset {
s.tlsCert = nil
}
for ip := range ips {
s.ips[ip] = true
}
for domain := range domains {
s.domains[domain] = true
}
}
}
}
return s.reload()
}
func (s *server) shutdown() error {
for _, listener := range s.listeners {
if err := listener.Close(); err != nil {
return err
}
}
s.listeners = nil
for _, server := range s.servers {
go server.Shutdown(context.Background())
}
s.servers = nil
return nil
}
func (s *server) reload() error {
if len(s.listeners) > 0 {
return nil
}
if err := s.shutdown(); err != nil {
return err
}
if err := s.serveHTTPS(); err != nil {
return err
}
return nil
}
func (s *server) getCertificate(hello *tls.ClientHelloInfo) (_servingCert *tls.Certificate, _err error) {
s.Lock()
changed := false
defer func() {
defer s.Unlock()
if _err != nil {
logrus.Errorf("Get certificate error: %s", _err)
return
}
if changed {
s.save()
}
}()
if hello.ServerName != "" && !s.domains[hello.ServerName] {
s.tlsCert = nil
s.domains[hello.ServerName] = true
}
if s.tlsCert != nil {
return s.tlsCert, nil
}
ips := []net.IP{}
for ipStr := range s.ips {
if ip := net.ParseIP(ipStr); ip != nil {
ips = append(ips, ip)
}
}
dnsNames := []string{}
for domain := range s.domains {
dnsNames = append(dnsNames, domain)
}
cfg := cert.Config{
CommonName: s.cn,
Organization: s.activeCA.Subject.Organization,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
AltNames: cert.AltNames{
DNSNames: dnsNames,
IPs: ips,
},
}
key, err := newPrivateKey()
if err != nil {
return nil, err
}
cert, err := cert.NewSignedCert(cfg, key, s.activeCA, s.activeCAKey)
if err != nil {
return nil, err
}
tlsCert := &tls.Certificate{
Certificate: [][]byte{
cert.Raw,
},
PrivateKey: key,
}
changed = true
s.tlsCert = tlsCert
return tlsCert, nil
}
func (s *server) cacheHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
h, _, err := net.SplitHostPort(req.Host)
if err != nil {
h = req.Host
}
s.Lock()
if ip := net.ParseIP(h); ip != nil {
if !s.ips[h] {
s.ips[h] = true
s.tlsCert = nil
}
} else {
if !s.domains[h] {
s.domains[h] = true
s.tlsCert = nil
}
}
s.Unlock()
handler.ServeHTTP(resp, req)
})
}
func (s *server) serveHTTPS() error {
conf := &tls.Config{
ClientAuth: tls.RequestClientCert,
GetCertificate: s.getCertificate,
PreferServerCipherSuites: true,
}
listener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPSPort, conf)
if err != nil {
return err
}
logger := logrus.StandardLogger()
server := &http.Server{
Handler: s.cacheHandler(s.Handler()),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, server)
s.startServer(listener, server)
if s.userConfig.HTTPPort > 0 {
httpListener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPPort, nil)
if err != nil {
return err
}
httpServer := &http.Server{
Handler: s.cacheHandler(httpRedirect(s.Handler())),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, httpServer)
s.startServer(httpListener, httpServer)
}
return nil
}
// Approach taken from letsencrypt, except manglePort is specific to us
func httpRedirect(next http.Handler) http.Handler {
return http.HandlerFunc(
func(rw http.ResponseWriter, r *http.Request) {
if r.Header.Get("x-Forwarded-Proto") == "https" ||
strings.HasPrefix(r.URL.Path, "/ping") ||
strings.HasPrefix(r.URL.Path, "/health") {
next.ServeHTTP(rw, r)
return
}
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(rw, "Use HTTPS", http.StatusBadRequest)
return
}
target := "https://" + manglePort(r.Host) + r.URL.RequestURI()
http.Redirect(rw, r, target, http.StatusFound)
})
}
func manglePort(hostport string) string {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return hostport
}
portInt, err := strconv.Atoi(port)
if err != nil {
return hostport
}
portInt = ((portInt / 1000) * 1000) + 443
return net.JoinHostPort(host, strconv.Itoa(portInt))
}
func (s *server) startServer(listener net.Listener, server *http.Server) {
go func() {
if err := server.Serve(listener); err != nil {
logrus.Errorf("server on %v returned err: %v", listener.Addr(), err)
}
}()
}
func (s *server) Handler() http.Handler {
return s.userConfig.Handler
}
func (s *server) newListener(ip string, port int, config *tls.Config) (net.Listener, error) {
addr := fmt.Sprintf("%s:%d", ip, port)
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
l = tcpKeepAliveListener{l.(*net.TCPListener)}
if config != nil {
l = tls.NewListener(l, config)
}
s.listeners = append(s.listeners, l)
logrus.Info("Listening on ", addr)
return l, nil
}
func stringToCert(certString string) (*tls.Certificate, error) {
parts := strings.Split(certString, "#")
if len(parts) != 2 {
return nil, errors.New("Unable to split cert into two parts")
}
certPart, keyPart := parts[0], parts[1]
keyBytes, err := base64.StdEncoding.DecodeString(keyPart)
if err != nil {
return nil, err
}
key, err := cert.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return nil, err
}
certBytes, err := base64.StdEncoding.DecodeString(certPart)
if err != nil {
return nil, err
}
return &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: key,
}, nil
}
func certToString(cert *tls.Certificate) (string, error) {
keyType, keyBytes, err := marshalPrivateKey(cert.PrivateKey.(crypto.Signer))
if err != nil {
return "", err
}
privateKeyPemBlock := &pem.Block{
Type: keyType,
Bytes: keyBytes,
}
pemBytes := pem.EncodeToMemory(privateKeyPemBlock)
certString := base64.StdEncoding.EncodeToString(cert.Certificate[0])
keyString := base64.StdEncoding.EncodeToString(pemBytes)
return certString + "#" + keyString, nil
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
| userConfigure | identifier_name |
server.go | package dynamiclistener
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"log"
"net"
"net/http"
"reflect"
"strconv"
"strings"
"sync"
"time"
cert "github.com/rancher/dynamiclistener/cert"
"github.com/sirupsen/logrus"
)
type server struct {
sync.Mutex
userConfig UserConfig
listenConfigStorage ListenerConfigStorage
tlsCert *tls.Certificate
ips map[string]bool
domains map[string]bool
cn string
listeners []net.Listener
servers []*http.Server
activeCA *x509.Certificate
activeCAKey crypto.Signer
}
func NewServer(listenConfigStorage ListenerConfigStorage, config UserConfig) (ServerInterface, error) {
s := &server{
userConfig: config,
listenConfigStorage: listenConfigStorage,
cn: "cattle",
}
s.ips = map[string]bool{}
s.domains = map[string]bool{}
if err := s.userConfigure(); err != nil {
return nil, err
}
lc, err := listenConfigStorage.Get()
if err != nil {
return nil, err
}
return s, s.Update(lc)
}
func (s *server) CACert() (string, error) {
if s.userConfig.NoCACerts {
return "", nil
}
if s.userConfig.CACerts != "" {
return s.userConfig.CACerts, nil
}
return "", fmt.Errorf("ca cert not found")
}
func marshalPrivateKey(privateKey crypto.Signer) (string, []byte, error) {
var (
keyType string
bytes []byte
err error
)
if key, ok := privateKey.(*ecdsa.PrivateKey); ok {
keyType = cert.ECPrivateKeyBlockType
bytes, err = x509.MarshalECPrivateKey(key)
} else if key, ok := privateKey.(*rsa.PrivateKey); ok {
keyType = cert.RSAPrivateKeyBlockType
bytes = x509.MarshalPKCS1PrivateKey(key)
} else {
keyType = cert.PrivateKeyBlockType
bytes, err = x509.MarshalPKCS8PrivateKey(privateKey)
}
if err != nil {
logrus.Errorf("Unable to marshal private key: %v", err)
}
return keyType, bytes, err
}
func newPrivateKey() (crypto.Signer, error) {
caKeyBytes, err := cert.MakeEllipticPrivateKeyPEM()
if err != nil {
return nil, err
}
caKeyIFace, err := cert.ParsePrivateKeyPEM(caKeyBytes)
if err != nil {
return nil, err
}
return caKeyIFace.(crypto.Signer), nil
}
func (s *server) save() (_err error) {
defer func() {
if _err != nil {
logrus.Errorf("Saving cert error: %s", _err)
}
}()
certStr, err := certToString(s.tlsCert)
if err != nil {
return err
}
cfg, err := s.listenConfigStorage.Get()
if err != nil {
return err
}
cfg.GeneratedCerts = map[string]string{s.cn: certStr}
_, err = s.listenConfigStorage.Set(cfg)
return err
}
func (s *server) userConfigure() error {
if s.userConfig.HTTPSPort == 0 {
s.userConfig.HTTPSPort = 8443
}
for _, d := range s.userConfig.Domains {
s.domains[d] = true
}
for _, ip := range s.userConfig.KnownIPs {
if netIP := net.ParseIP(ip); netIP != nil {
s.ips[ip] = true
}
}
if bindAddress := net.ParseIP(s.userConfig.BindAddress); bindAddress != nil {
s.ips[s.userConfig.BindAddress] = true
}
if s.activeCA == nil && s.activeCAKey == nil {
if s.userConfig.CACerts != "" && s.userConfig.CAKey != "" {
ca, err := cert.ParseCertsPEM([]byte(s.userConfig.CACerts))
if err != nil {
return err
}
key, err := cert.ParsePrivateKeyPEM([]byte(s.userConfig.CAKey))
if err != nil {
return err
}
s.activeCA = ca[0]
s.activeCAKey = key.(crypto.Signer)
} else {
ca, key, err := genCA()
if err != nil {
return err
}
s.activeCA = ca
s.activeCAKey = key
}
}
return nil
}
func genCA() (*x509.Certificate, crypto.Signer, error) {
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, err
}
caCert, err := cert.NewSelfSignedCACert(cert.Config{
CommonName: "k3s-ca",
Organization: []string{"k3s-org"},
}, caKey)
if err != nil {
return nil, nil, err
}
return caCert, caKey, nil
}
func (s *server) Update(status *ListenerStatus) (_err error) {
s.Lock()
defer func() {
s.Unlock()
if _err != nil {
logrus.Errorf("Update cert error: %s", _err)
}
if s.tlsCert == nil {
s.getCertificate(&tls.ClientHelloInfo{ServerName: "localhost"})
}
}()
certString := status.GeneratedCerts[s.cn]
tlsCert, err := stringToCert(certString)
if err != nil {
logrus.Errorf("Update cert unable to convert string to cert: %s", err)
s.tlsCert = nil
}
if tlsCert != nil {
s.tlsCert = tlsCert
for i, certBytes := range tlsCert.Certificate {
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
logrus.Errorf("Update cert %d parse error: %s", i, err)
s.tlsCert = nil
break
}
ips := map[string]bool{}
for _, ip := range cert.IPAddresses {
ips[ip.String()] = true
}
domains := map[string]bool{}
for _, domain := range cert.DNSNames {
domains[domain] = true
}
if !(reflect.DeepEqual(ips, s.ips) && reflect.DeepEqual(domains, s.domains)) {
subset := true
for ip := range s.ips {
if !ips[ip] {
subset = false
break
}
}
if subset {
for domain := range s.domains {
if !domains[domain] {
subset = false
break
}
}
}
if !subset {
s.tlsCert = nil
}
for ip := range ips {
s.ips[ip] = true
}
for domain := range domains {
s.domains[domain] = true
}
}
}
}
return s.reload()
}
func (s *server) shutdown() error {
for _, listener := range s.listeners {
if err := listener.Close(); err != nil {
return err
}
}
s.listeners = nil
for _, server := range s.servers {
go server.Shutdown(context.Background())
}
s.servers = nil
return nil
}
func (s *server) reload() error |
func (s *server) getCertificate(hello *tls.ClientHelloInfo) (_servingCert *tls.Certificate, _err error) {
s.Lock()
changed := false
defer func() {
defer s.Unlock()
if _err != nil {
logrus.Errorf("Get certificate error: %s", _err)
return
}
if changed {
s.save()
}
}()
if hello.ServerName != "" && !s.domains[hello.ServerName] {
s.tlsCert = nil
s.domains[hello.ServerName] = true
}
if s.tlsCert != nil {
return s.tlsCert, nil
}
ips := []net.IP{}
for ipStr := range s.ips {
if ip := net.ParseIP(ipStr); ip != nil {
ips = append(ips, ip)
}
}
dnsNames := []string{}
for domain := range s.domains {
dnsNames = append(dnsNames, domain)
}
cfg := cert.Config{
CommonName: s.cn,
Organization: s.activeCA.Subject.Organization,
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
AltNames: cert.AltNames{
DNSNames: dnsNames,
IPs: ips,
},
}
key, err := newPrivateKey()
if err != nil {
return nil, err
}
cert, err := cert.NewSignedCert(cfg, key, s.activeCA, s.activeCAKey)
if err != nil {
return nil, err
}
tlsCert := &tls.Certificate{
Certificate: [][]byte{
cert.Raw,
},
PrivateKey: key,
}
changed = true
s.tlsCert = tlsCert
return tlsCert, nil
}
func (s *server) cacheHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
h, _, err := net.SplitHostPort(req.Host)
if err != nil {
h = req.Host
}
s.Lock()
if ip := net.ParseIP(h); ip != nil {
if !s.ips[h] {
s.ips[h] = true
s.tlsCert = nil
}
} else {
if !s.domains[h] {
s.domains[h] = true
s.tlsCert = nil
}
}
s.Unlock()
handler.ServeHTTP(resp, req)
})
}
func (s *server) serveHTTPS() error {
conf := &tls.Config{
ClientAuth: tls.RequestClientCert,
GetCertificate: s.getCertificate,
PreferServerCipherSuites: true,
}
listener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPSPort, conf)
if err != nil {
return err
}
logger := logrus.StandardLogger()
server := &http.Server{
Handler: s.cacheHandler(s.Handler()),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, server)
s.startServer(listener, server)
if s.userConfig.HTTPPort > 0 {
httpListener, err := s.newListener(s.userConfig.BindAddress, s.userConfig.HTTPPort, nil)
if err != nil {
return err
}
httpServer := &http.Server{
Handler: s.cacheHandler(httpRedirect(s.Handler())),
ErrorLog: log.New(logger.WriterLevel(logrus.DebugLevel), "", log.LstdFlags),
}
s.servers = append(s.servers, httpServer)
s.startServer(httpListener, httpServer)
}
return nil
}
// Approach taken from letsencrypt, except manglePort is specific to us
func httpRedirect(next http.Handler) http.Handler {
return http.HandlerFunc(
func(rw http.ResponseWriter, r *http.Request) {
if r.Header.Get("x-Forwarded-Proto") == "https" ||
strings.HasPrefix(r.URL.Path, "/ping") ||
strings.HasPrefix(r.URL.Path, "/health") {
next.ServeHTTP(rw, r)
return
}
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(rw, "Use HTTPS", http.StatusBadRequest)
return
}
target := "https://" + manglePort(r.Host) + r.URL.RequestURI()
http.Redirect(rw, r, target, http.StatusFound)
})
}
func manglePort(hostport string) string {
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return hostport
}
portInt, err := strconv.Atoi(port)
if err != nil {
return hostport
}
portInt = ((portInt / 1000) * 1000) + 443
return net.JoinHostPort(host, strconv.Itoa(portInt))
}
func (s *server) startServer(listener net.Listener, server *http.Server) {
go func() {
if err := server.Serve(listener); err != nil {
logrus.Errorf("server on %v returned err: %v", listener.Addr(), err)
}
}()
}
func (s *server) Handler() http.Handler {
return s.userConfig.Handler
}
func (s *server) newListener(ip string, port int, config *tls.Config) (net.Listener, error) {
addr := fmt.Sprintf("%s:%d", ip, port)
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
l = tcpKeepAliveListener{l.(*net.TCPListener)}
if config != nil {
l = tls.NewListener(l, config)
}
s.listeners = append(s.listeners, l)
logrus.Info("Listening on ", addr)
return l, nil
}
func stringToCert(certString string) (*tls.Certificate, error) {
parts := strings.Split(certString, "#")
if len(parts) != 2 {
return nil, errors.New("Unable to split cert into two parts")
}
certPart, keyPart := parts[0], parts[1]
keyBytes, err := base64.StdEncoding.DecodeString(keyPart)
if err != nil {
return nil, err
}
key, err := cert.ParsePrivateKeyPEM(keyBytes)
if err != nil {
return nil, err
}
certBytes, err := base64.StdEncoding.DecodeString(certPart)
if err != nil {
return nil, err
}
return &tls.Certificate{
Certificate: [][]byte{certBytes},
PrivateKey: key,
}, nil
}
func certToString(cert *tls.Certificate) (string, error) {
keyType, keyBytes, err := marshalPrivateKey(cert.PrivateKey.(crypto.Signer))
if err != nil {
return "", err
}
privateKeyPemBlock := &pem.Block{
Type: keyType,
Bytes: keyBytes,
}
pemBytes := pem.EncodeToMemory(privateKeyPemBlock)
certString := base64.StdEncoding.EncodeToString(cert.Certificate[0])
keyString := base64.StdEncoding.EncodeToString(pemBytes)
return certString + "#" + keyString, nil
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
| {
if len(s.listeners) > 0 {
return nil
}
if err := s.shutdown(); err != nil {
return err
}
if err := s.serveHTTPS(); err != nil {
return err
}
return nil
} | identifier_body |
cli.rs | // Copyright 2015 Axel Rasmussen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::*;
use errno;
use libc::{self, c_int};
use log::debug;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem::MaybeUninit;
/// An alias for std::io::Result.
pub type IoResult<T> = io::Result<T>;
fn to_io_result(ret: c_int) -> IoResult<()> {
match ret {
0 => Ok(()),
_ => Err(io::Error::last_os_error()),
}
}
/// This enum describes high-level terminal flags, in an OS-agnostic way.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum TerminalFlag {
/// A flag indicating that typed characters should be echoed.
Echo,
/// A flag indicating that newlines, specifically, should be echoed.
EchoNewlines,
}
impl TerminalFlag {
fn to_value(&self) -> libc::tcflag_t {
match *self {
TerminalFlag::Echo => libc::ECHO,
TerminalFlag::EchoNewlines => libc::ECHONL,
}
}
}
/// This trait describes an abstract type which describes the attributes of a
/// terminal.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractTerminalAttributes {
/// Enable a flag in this set of attributes.
fn enable(&mut self, flag: TerminalFlag);
/// Disable a flag in this set of attributes.
fn disable(&mut self, flag: TerminalFlag);
}
/// This is an opaque structure which encapsulates the state / attributes of an
/// interactive terminal. The contents of this structure are OS-specific.
pub struct TerminalAttributes {
inner: libc::termios,
}
impl TerminalAttributes {
fn new(fd: c_int) -> IoResult<Self> {
let mut attrs = MaybeUninit::uninit();
to_io_result(unsafe { libc::tcgetattr(fd, attrs.as_mut_ptr()) })?;
Ok(TerminalAttributes {
inner: unsafe { attrs.assume_init() },
})
}
/// Create a new TerminalAttributes, with an "empty" state (no flags
/// enabled).
pub fn new_empty() -> Self {
TerminalAttributes {
inner: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
fn apply(&self, fd: c_int) -> IoResult<()> {
to_io_result(unsafe { libc::tcsetattr(fd, libc::TCSANOW, &self.inner) })
}
/// Test whether or not the given `TerminalFlag` is currently enabled.
pub fn is_enabled(&self, flag: TerminalFlag) -> bool {
self.inner.c_lflag & flag.to_value() != 0
}
}
impl PartialEq for TerminalAttributes {
fn eq(&self, other: &Self) -> bool {
self.inner.c_iflag == other.inner.c_iflag
&& self.inner.c_oflag == other.inner.c_oflag
&& self.inner.c_cflag == other.inner.c_cflag
&& self.inner.c_lflag == other.inner.c_lflag
&& self.inner.c_line == other.inner.c_line
&& self.inner.c_cc == other.inner.c_cc
&& self.inner.c_ispeed == other.inner.c_ispeed
&& self.inner.c_ospeed == other.inner.c_ospeed
}
}
impl Eq for TerminalAttributes {}
fn debug_format_flag_field(
v: libc::tcflag_t,
fs: &'static [(&'static str, libc::tcflag_t)],
) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
let mut remaining_v: libc::tcflag_t = v;
let mut s = String::new();
for &(fname, fvalue) in fs {
if (v & fvalue) != 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}",
match was_empty {
true => "",
false => " | ",
},
fname
)?;
remaining_v &= !v;
}
}
if remaining_v != 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}(extra: {:x})",
match was_empty {
true => "",
false => " ",
},
remaining_v
)?;
}
Ok(s)
}
fn debug_format_c_cc_field(c_cc: &[libc::cc_t; 32]) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
const INDICES: &'static [(&'static str, usize)] = &[
("VDISCARD", libc::VDISCARD),
("VEOF", libc::VEOF),
("VEOL", libc::VEOL),
("VEOL2", libc::VEOL2),
("VERASE", libc::VERASE),
("VINTR", libc::VINTR),
("VKILL", libc::VKILL),
("VLNEXT", libc::VLNEXT),
("VMIN", libc::VMIN),
("VQUIT", libc::VQUIT),
("VREPRINT", libc::VREPRINT),
("VSTART", libc::VSTART),
("VSTOP", libc::VSTOP),
("VSUSP", libc::VSUSP),
("VSWTC", libc::VSWTC),
("VTIME", libc::VTIME),
("VWERASE", libc::VWERASE),
];
let mut s = String::new();
for &(name, idx) in INDICES {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}:{}",
match was_empty {
true => "",
false => ", ",
},
name,
c_cc[idx]
)?;
}
Ok(s)
}
impl fmt::Debug for TerminalAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TerminalAttributes")
.field(
"c_iflag",
&debug_format_flag_field(
self.inner.c_iflag,
&[
("IGNBRK", libc::IGNBRK),
("BRKINT", libc::BRKINT),
("IGNPAR", libc::IGNPAR),
("PARMRK", libc::PARMRK),
("INPCK", libc::INPCK),
("ISTRIP", libc::ISTRIP),
("INLCR", libc::INLCR),
("IGNCR", libc::IGNCR),
("ICRNL", libc::ICRNL),
("IXON", libc::IXON),
("IXANY", libc::IXANY),
("IXOFF", libc::IXOFF),
("IMAXBEL", libc::IMAXBEL),
("IUTF8", libc::IUTF8),
],
)?,
)
.field(
"c_oflag",
&debug_format_flag_field(
self.inner.c_oflag,
&[
("OPOST", libc::OPOST),
("OLCUC", libc::OLCUC),
("ONLCR", libc::ONLCR),
("ONOCR", libc::ONOCR),
("ONLRET", libc::ONLRET),
("OFILL", libc::OFILL),
("OFDEL", libc::OFDEL),
("NLDLY", libc::NLDLY),
("CRDLY", libc::CRDLY),
("TABDLY", libc::TABDLY),
("BSDLY", libc::BSDLY),
("VTDLY", libc::VTDLY),
("FFDLY", libc::FFDLY),
],
)?,
)
.field(
"c_cflag",
&debug_format_flag_field(
self.inner.c_cflag,
&[
("CBAUD", libc::CBAUD),
("CBAUDEX", libc::CBAUDEX),
("CSIZE", libc::CSIZE),
("CSTOPB", libc::CSTOPB),
("CREAD", libc::CREAD),
("PARENB", libc::PARENB),
("PARODD", libc::PARODD),
("HUPCL", libc::HUPCL),
("CLOCAL", libc::CLOCAL),
("CIBAUD", libc::CIBAUD),
("CMSPAR", libc::CMSPAR),
("CRTSCTS", libc::CRTSCTS),
],
)?,
)
.field(
"c_lflag",
&debug_format_flag_field(
self.inner.c_lflag,
&[
("ISIG", libc::ISIG),
("ICANON", libc::ICANON),
("ECHO", libc::ECHO),
("ECHOE", libc::ECHOE),
("ECHOK", libc::ECHOK),
("ECHONL", libc::ECHONL),
("ECHOCTL", libc::ECHOCTL),
("ECHOPRT", libc::ECHOPRT),
("ECHOKE", libc::ECHOKE),
("FLUSHO", libc::FLUSHO),
("NOFLSH", libc::NOFLSH),
("TOSTOP", libc::TOSTOP),
("PENDIN", libc::PENDIN),
("IEXTEN", libc::IEXTEN),
],
)?,
)
.field("c_cc", &debug_format_c_cc_field(&self.inner.c_cc)?)
.field("c_ispeed", unsafe { &libc::cfgetispeed(&self.inner) })
.field("c_ospeed", unsafe { &libc::cfgetospeed(&self.inner) })
.finish()
}
}
impl AbstractTerminalAttributes for TerminalAttributes {
fn enable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag |= flag.to_value();
}
fn disable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag &= !flag.to_value();
}
}
/// This trait describes an abstract input or output stream.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractStream {
/// A type which describes the attributes of this stream / terminal.
type Attributes: AbstractTerminalAttributes + fmt::Debug;
/// Returns whether or not this stream refers to an interactive terminal (a
/// TTY), as opposed to, for example, a pipe.
fn isatty(&self) -> bool;
/// Retrieve the current attributes of this stream / terminal.
fn get_attributes(&self) -> IoResult<Self::Attributes>;
/// Modify this stream's / terminal's attributes to match the given state.
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()>;
/// Return a `Read` for this stream, if reading is supported.
fn as_reader(&self) -> Option<Box<dyn Read>>;
/// Return a `Write` for this stream, if writing is supported.
fn as_writer(&self) -> Option<Box<dyn Write>>;
}
/// Standard input / output streams.
#[derive(Debug)]
pub enum Stream {
/// Standard output.
Stdout,
/// Standard error.
Stderr,
/// Standard input.
Stdin,
}
impl Stream {
fn to_fd(&self) -> c_int |
}
impl AbstractStream for Stream {
type Attributes = TerminalAttributes;
fn isatty(&self) -> bool {
let ret = unsafe { libc::isatty(self.to_fd()) };
let error: i32 = errno::errno().into();
match ret {
1 => true,
0 => match error {
libc::EBADF => false,
libc::ENOTTY => false,
_ => {
debug!(
"Unrecognized isatty errno: {}; assuming {:?} is not a TTY",
error, *self
);
false
}
},
_ => {
debug!(
"Unrecognized isatty return code: {}; assuming {:?} is not a TTY",
ret, *self
);
false
}
}
}
fn get_attributes(&self) -> IoResult<Self::Attributes> {
TerminalAttributes::new(self.to_fd())
}
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()> {
let ret = attributes.apply(self.to_fd());
debug_assert!(ret.is_err() || *attributes == Self::Attributes::new(self.to_fd()).unwrap());
ret
}
fn as_reader(&self) -> Option<Box<dyn Read>> {
match *self {
Stream::Stdin => Some(Box::new(io::stdin())),
_ => None,
}
}
fn as_writer(&self) -> Option<Box<dyn Write>> {
match *self {
Stream::Stdout => Some(Box::new(io::stdout())),
Stream::Stderr => Some(Box::new(io::stderr())),
_ => None,
}
}
}
/// This structure handles a) disabling the echoing of characters typed to
/// `Stdin`, and b) remembering to reset the terminal attributes afterwards
/// (via `Drop`).
struct DisableEcho<'s, S: AbstractStream> {
stream: &'s mut S,
initial_attributes: S::Attributes,
}
impl<'s, S: AbstractStream> DisableEcho<'s, S> {
fn new(stream: &'s mut S) -> Result<Self> {
let initial_attributes = stream.get_attributes()?;
debug!("Initial stream attributes: {:#?}", initial_attributes);
let mut attributes = stream.get_attributes()?;
// Don't echo characters typed to stdin.
attributes.disable(TerminalFlag::Echo);
// But, *do* echo the newline when the user hits ENTER.
attributes.enable(TerminalFlag::EchoNewlines);
debug!("Setting attributes to: {:#?}", attributes);
stream.set_attributes(&attributes)?;
Ok(DisableEcho {
stream: stream,
initial_attributes: initial_attributes,
})
}
}
impl<'s, S: AbstractStream> Drop for DisableEcho<'s, S> {
fn drop(&mut self) {
self.stream
.set_attributes(&self.initial_attributes)
.unwrap();
}
}
fn require_isatty<S: AbstractStream>(s: &mut S) -> Result<()> {
if !s.isatty() {
Err(Error::Precondition(format!(
"cannot prompt interactively when the I/O streams are not TTYs"
)))
} else {
Ok(())
}
}
fn build_input_reader<IS: AbstractStream>(
input_stream: &mut IS,
) -> Result<io::BufReader<Box<dyn Read>>> {
require_isatty(input_stream)?;
Ok(io::BufReader::new(match input_stream.as_reader() {
None => {
return Err(Error::Precondition(format!(
"the given input stream must support `Read`"
)))
}
Some(r) => r,
}))
}
fn remove_newline(mut s: String) -> Result<String> {
// Remove the trailing newline (if any - not finding one is an error).
if !s.ends_with('\n') {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected end of input").into());
}
s.pop();
// If this is windows and so there's also a \r, remove that too.
if s.ends_with('\r') {
s.pop();
}
Ok(s)
}
fn prompt_for_string_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
// We have to take the reader as a parameter, since it must be "global",
// even if this function is e.g. called in a loop. Otherwise, because it's
// buffered, we might buffer some input and then discard it.
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
use io::BufRead;
require_isatty(output_stream)?;
// It's fine to construct a separate writer, potentially on each loop
// iteration or whatever, because we flush immediately, and don't do any
// buffering.
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "{}", prompt)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
Ok({
let _disable_echo = match is_sensitive {
false => None,
true => Some(DisableEcho::new(input_stream)?),
};
let mut ret = String::new();
input_reader.read_line(&mut ret)?;
remove_newline(ret)?
})
}
/// Prompt the user for a string (read from the given input stream) using the
/// given output stream (typically standard output or standard error) to display
/// the given prompt message.
///
/// If `is_sensitive` is true, then the users characters will not be echoed back
/// (e.g. this will behave like a password prompt).
///
/// Note that there are various requirements for the given streams, and this
/// function will return an error if any of them are not met:
///
/// - Both `input_stream` and `output_stream` must be TTYs.
/// - `input_stream` must return a valid `Read` instance.
/// - `output_stream` must return a valid `Write` instance.
pub fn prompt_for_string<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
fn prompt_for_string_confirm_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
loop {
let string = prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
prompt,
is_sensitive,
)?;
if string
== prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
"Confirm: ",
is_sensitive,
)?
{
return Ok(string);
}
}
}
/// Prompt for a string as per `prompt_for_string`, but additionally have the
/// user enter the value again to confirm we get the same answer twice. This is
/// useful for e.g. password entry.
pub fn prompt_for_string_confirm<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
/// MaybePromptedString is a wrapper for getting user input interactively, while
/// also allowing the value to be specified at call time. This is useful e.g.
/// when we want to prompt users interactively, but want to predefine the values
/// in unit tests, or when users can specify a value either interactively or via
/// flags.
pub struct MaybePromptedString {
value: String,
was_provided: bool,
}
impl MaybePromptedString {
/// Construct a new MaybePromptedString, either using the given value or
/// prompting the user interactively with the given options.
pub fn new<IS: AbstractStream, OS: AbstractStream>(
provided: Option<&str>,
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
confirm: bool,
) -> Result<Self> {
let mut input_reader = build_input_reader(&mut input_stream)?;
let prompted: Option<String> = match provided {
None => Some(match confirm {
false => prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)?,
true => prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)?,
}),
Some(_) => None,
};
let was_provided = provided.is_some();
let value = provided.map_or_else(|| prompted.unwrap(), |s| s.to_owned());
Ok(MaybePromptedString {
value: value,
was_provided: was_provided,
})
}
/// Returns true if this string was provided, or false if it is the result
/// of an interactive prompt.
pub fn was_provided(&self) -> bool {
self.was_provided
}
/// "Unwraps" this structure into its underlying string.
pub fn into_inner(self) -> String {
self.value
}
}
/// Display a "<description> Continue?" confirmation. Returns true if the user
/// replies "yes" (or similar), or false otherwise.
pub fn continue_confirmation<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
description: &str,
) -> Result<bool> {
let mut input_reader = build_input_reader(&mut input_stream)?;
let prompt = format!("{}Continue? [Yes/No] ", description);
loop {
let original_response = prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt.as_str(),
/*is_sensitive=*/ false,
)?;
let response = original_response.trim().to_lowercase();
if response == "y" || response == "yes" {
return Ok(true);
} else if response == "n" || response == "no" {
return Ok(false);
} else {
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "Invalid response '{}'.\n", original_response)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
}
}
}
| {
match *self {
Stream::Stdout => libc::STDOUT_FILENO,
Stream::Stderr => libc::STDERR_FILENO,
Stream::Stdin => libc::STDIN_FILENO,
}
} | identifier_body |
cli.rs | // Copyright 2015 Axel Rasmussen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::*;
use errno;
use libc::{self, c_int};
use log::debug;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem::MaybeUninit;
/// An alias for std::io::Result.
pub type IoResult<T> = io::Result<T>;
fn to_io_result(ret: c_int) -> IoResult<()> {
match ret {
0 => Ok(()),
_ => Err(io::Error::last_os_error()),
}
}
/// This enum describes high-level terminal flags, in an OS-agnostic way.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum TerminalFlag {
/// A flag indicating that typed characters should be echoed.
Echo,
/// A flag indicating that newlines, specifically, should be echoed.
EchoNewlines,
}
impl TerminalFlag {
fn to_value(&self) -> libc::tcflag_t {
match *self {
TerminalFlag::Echo => libc::ECHO,
TerminalFlag::EchoNewlines => libc::ECHONL,
}
}
}
/// This trait describes an abstract type which describes the attributes of a
/// terminal.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractTerminalAttributes {
/// Enable a flag in this set of attributes.
fn enable(&mut self, flag: TerminalFlag);
/// Disable a flag in this set of attributes.
fn disable(&mut self, flag: TerminalFlag);
}
/// This is an opaque structure which encapsulates the state / attributes of an
/// interactive terminal. The contents of this structure are OS-specific.
pub struct TerminalAttributes {
inner: libc::termios,
}
impl TerminalAttributes {
fn new(fd: c_int) -> IoResult<Self> {
let mut attrs = MaybeUninit::uninit();
to_io_result(unsafe { libc::tcgetattr(fd, attrs.as_mut_ptr()) })?;
Ok(TerminalAttributes {
inner: unsafe { attrs.assume_init() },
})
}
/// Create a new TerminalAttributes, with an "empty" state (no flags
/// enabled).
pub fn new_empty() -> Self {
TerminalAttributes {
inner: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
fn apply(&self, fd: c_int) -> IoResult<()> {
to_io_result(unsafe { libc::tcsetattr(fd, libc::TCSANOW, &self.inner) })
}
/// Test whether or not the given `TerminalFlag` is currently enabled.
pub fn is_enabled(&self, flag: TerminalFlag) -> bool {
self.inner.c_lflag & flag.to_value() != 0
}
}
impl PartialEq for TerminalAttributes {
fn eq(&self, other: &Self) -> bool {
self.inner.c_iflag == other.inner.c_iflag
&& self.inner.c_oflag == other.inner.c_oflag
&& self.inner.c_cflag == other.inner.c_cflag
&& self.inner.c_lflag == other.inner.c_lflag
&& self.inner.c_line == other.inner.c_line
&& self.inner.c_cc == other.inner.c_cc
&& self.inner.c_ispeed == other.inner.c_ispeed
&& self.inner.c_ospeed == other.inner.c_ospeed
}
}
impl Eq for TerminalAttributes {}
fn debug_format_flag_field(
v: libc::tcflag_t,
fs: &'static [(&'static str, libc::tcflag_t)],
) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
let mut remaining_v: libc::tcflag_t = v;
let mut s = String::new();
for &(fname, fvalue) in fs {
if (v & fvalue) != 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}",
match was_empty {
true => "",
false => " | ",
},
fname
)?;
remaining_v &= !v;
}
}
if remaining_v != 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}(extra: {:x})",
match was_empty {
true => "",
false => " ",
},
remaining_v
)?;
}
Ok(s)
}
fn debug_format_c_cc_field(c_cc: &[libc::cc_t; 32]) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
const INDICES: &'static [(&'static str, usize)] = &[
("VDISCARD", libc::VDISCARD),
("VEOF", libc::VEOF),
("VEOL", libc::VEOL),
("VEOL2", libc::VEOL2),
("VERASE", libc::VERASE),
("VINTR", libc::VINTR),
("VKILL", libc::VKILL),
("VLNEXT", libc::VLNEXT),
("VMIN", libc::VMIN),
("VQUIT", libc::VQUIT),
("VREPRINT", libc::VREPRINT),
("VSTART", libc::VSTART),
("VSTOP", libc::VSTOP),
("VSUSP", libc::VSUSP),
("VSWTC", libc::VSWTC),
("VTIME", libc::VTIME),
("VWERASE", libc::VWERASE),
];
let mut s = String::new();
for &(name, idx) in INDICES {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}:{}",
match was_empty {
true => "",
false => ", ",
},
name,
c_cc[idx]
)?;
}
Ok(s)
}
impl fmt::Debug for TerminalAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TerminalAttributes")
.field(
"c_iflag",
&debug_format_flag_field(
self.inner.c_iflag,
&[
("IGNBRK", libc::IGNBRK),
("BRKINT", libc::BRKINT),
("IGNPAR", libc::IGNPAR),
("PARMRK", libc::PARMRK),
("INPCK", libc::INPCK),
("ISTRIP", libc::ISTRIP),
("INLCR", libc::INLCR),
("IGNCR", libc::IGNCR),
("ICRNL", libc::ICRNL),
("IXON", libc::IXON),
("IXANY", libc::IXANY),
("IXOFF", libc::IXOFF),
("IMAXBEL", libc::IMAXBEL),
("IUTF8", libc::IUTF8),
],
)?,
)
.field(
"c_oflag",
&debug_format_flag_field(
self.inner.c_oflag,
&[
("OPOST", libc::OPOST),
("OLCUC", libc::OLCUC),
("ONLCR", libc::ONLCR),
("ONOCR", libc::ONOCR),
("ONLRET", libc::ONLRET),
("OFILL", libc::OFILL),
("OFDEL", libc::OFDEL),
("NLDLY", libc::NLDLY),
("CRDLY", libc::CRDLY),
("TABDLY", libc::TABDLY),
("BSDLY", libc::BSDLY),
("VTDLY", libc::VTDLY),
("FFDLY", libc::FFDLY),
],
)?,
)
.field(
"c_cflag",
&debug_format_flag_field(
self.inner.c_cflag,
&[
("CBAUD", libc::CBAUD),
("CBAUDEX", libc::CBAUDEX),
("CSIZE", libc::CSIZE),
("CSTOPB", libc::CSTOPB),
("CREAD", libc::CREAD),
("PARENB", libc::PARENB),
("PARODD", libc::PARODD),
("HUPCL", libc::HUPCL),
("CLOCAL", libc::CLOCAL),
("CIBAUD", libc::CIBAUD),
("CMSPAR", libc::CMSPAR),
("CRTSCTS", libc::CRTSCTS),
],
)?,
)
.field(
"c_lflag",
&debug_format_flag_field(
self.inner.c_lflag,
&[
("ISIG", libc::ISIG),
("ICANON", libc::ICANON),
("ECHO", libc::ECHO),
("ECHOE", libc::ECHOE),
("ECHOK", libc::ECHOK),
("ECHONL", libc::ECHONL),
("ECHOCTL", libc::ECHOCTL),
("ECHOPRT", libc::ECHOPRT),
("ECHOKE", libc::ECHOKE),
("FLUSHO", libc::FLUSHO),
("NOFLSH", libc::NOFLSH),
("TOSTOP", libc::TOSTOP),
("PENDIN", libc::PENDIN),
("IEXTEN", libc::IEXTEN),
],
)?,
)
.field("c_cc", &debug_format_c_cc_field(&self.inner.c_cc)?)
.field("c_ispeed", unsafe { &libc::cfgetispeed(&self.inner) })
.field("c_ospeed", unsafe { &libc::cfgetospeed(&self.inner) })
.finish()
}
}
impl AbstractTerminalAttributes for TerminalAttributes {
fn enable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag |= flag.to_value();
}
fn disable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag &= !flag.to_value();
}
}
/// This trait describes an abstract input or output stream.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractStream {
/// A type which describes the attributes of this stream / terminal.
type Attributes: AbstractTerminalAttributes + fmt::Debug;
/// Returns whether or not this stream refers to an interactive terminal (a
/// TTY), as opposed to, for example, a pipe.
fn isatty(&self) -> bool;
/// Retrieve the current attributes of this stream / terminal.
fn get_attributes(&self) -> IoResult<Self::Attributes>;
/// Modify this stream's / terminal's attributes to match the given state.
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()>;
/// Return a `Read` for this stream, if reading is supported.
fn as_reader(&self) -> Option<Box<dyn Read>>;
/// Return a `Write` for this stream, if writing is supported.
fn as_writer(&self) -> Option<Box<dyn Write>>;
}
/// Standard input / output streams.
#[derive(Debug)]
pub enum Stream {
/// Standard output.
Stdout,
/// Standard error.
Stderr,
/// Standard input.
Stdin,
}
impl Stream {
fn to_fd(&self) -> c_int {
match *self {
Stream::Stdout => libc::STDOUT_FILENO,
Stream::Stderr => libc::STDERR_FILENO,
Stream::Stdin => libc::STDIN_FILENO,
}
}
}
impl AbstractStream for Stream {
type Attributes = TerminalAttributes;
fn isatty(&self) -> bool {
let ret = unsafe { libc::isatty(self.to_fd()) };
let error: i32 = errno::errno().into();
match ret {
1 => true,
0 => match error {
libc::EBADF => false,
libc::ENOTTY => false,
_ => {
debug!(
"Unrecognized isatty errno: {}; assuming {:?} is not a TTY",
error, *self
);
false
}
},
_ => {
debug!(
"Unrecognized isatty return code: {}; assuming {:?} is not a TTY",
ret, *self
);
false
}
}
}
fn get_attributes(&self) -> IoResult<Self::Attributes> {
TerminalAttributes::new(self.to_fd())
}
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()> {
let ret = attributes.apply(self.to_fd());
debug_assert!(ret.is_err() || *attributes == Self::Attributes::new(self.to_fd()).unwrap());
ret
}
fn as_reader(&self) -> Option<Box<dyn Read>> {
match *self {
Stream::Stdin => Some(Box::new(io::stdin())),
_ => None,
}
}
fn as_writer(&self) -> Option<Box<dyn Write>> {
match *self {
Stream::Stdout => Some(Box::new(io::stdout())),
Stream::Stderr => Some(Box::new(io::stderr())),
_ => None,
}
}
}
/// This structure handles a) disabling the echoing of characters typed to
/// `Stdin`, and b) remembering to reset the terminal attributes afterwards
/// (via `Drop`).
struct DisableEcho<'s, S: AbstractStream> {
stream: &'s mut S,
initial_attributes: S::Attributes,
}
impl<'s, S: AbstractStream> DisableEcho<'s, S> {
fn new(stream: &'s mut S) -> Result<Self> {
let initial_attributes = stream.get_attributes()?;
debug!("Initial stream attributes: {:#?}", initial_attributes);
let mut attributes = stream.get_attributes()?;
// Don't echo characters typed to stdin.
attributes.disable(TerminalFlag::Echo);
// But, *do* echo the newline when the user hits ENTER.
attributes.enable(TerminalFlag::EchoNewlines);
debug!("Setting attributes to: {:#?}", attributes);
stream.set_attributes(&attributes)?;
Ok(DisableEcho {
stream: stream,
initial_attributes: initial_attributes,
})
}
}
impl<'s, S: AbstractStream> Drop for DisableEcho<'s, S> {
fn drop(&mut self) {
self.stream
.set_attributes(&self.initial_attributes)
.unwrap();
}
}
fn require_isatty<S: AbstractStream>(s: &mut S) -> Result<()> {
if !s.isatty() {
Err(Error::Precondition(format!(
"cannot prompt interactively when the I/O streams are not TTYs"
)))
} else {
Ok(())
}
}
fn build_input_reader<IS: AbstractStream>(
input_stream: &mut IS,
) -> Result<io::BufReader<Box<dyn Read>>> {
require_isatty(input_stream)?;
Ok(io::BufReader::new(match input_stream.as_reader() {
None => {
return Err(Error::Precondition(format!(
"the given input stream must support `Read`"
)))
}
Some(r) => r,
}))
}
fn remove_newline(mut s: String) -> Result<String> {
// Remove the trailing newline (if any - not finding one is an error).
if !s.ends_with('\n') {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected end of input").into());
}
s.pop();
// If this is windows and so there's also a \r, remove that too.
if s.ends_with('\r') {
s.pop();
}
Ok(s)
}
fn prompt_for_string_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
// We have to take the reader as a parameter, since it must be "global",
// even if this function is e.g. called in a loop. Otherwise, because it's
// buffered, we might buffer some input and then discard it.
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
use io::BufRead;
require_isatty(output_stream)?;
// It's fine to construct a separate writer, potentially on each loop
// iteration or whatever, because we flush immediately, and don't do any
// buffering.
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "{}", prompt)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
Ok({
let _disable_echo = match is_sensitive {
false => None,
true => Some(DisableEcho::new(input_stream)?),
};
let mut ret = String::new();
input_reader.read_line(&mut ret)?;
remove_newline(ret)?
})
}
/// Prompt the user for a string (read from the given input stream) using the
/// given output stream (typically standard output or standard error) to display
/// the given prompt message.
///
/// If `is_sensitive` is true, then the users characters will not be echoed back
/// (e.g. this will behave like a password prompt).
///
/// Note that there are various requirements for the given streams, and this
/// function will return an error if any of them are not met:
///
/// - Both `input_stream` and `output_stream` must be TTYs.
/// - `input_stream` must return a valid `Read` instance.
/// - `output_stream` must return a valid `Write` instance.
pub fn | <IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
fn prompt_for_string_confirm_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
loop {
let string = prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
prompt,
is_sensitive,
)?;
if string
== prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
"Confirm: ",
is_sensitive,
)?
{
return Ok(string);
}
}
}
/// Prompt for a string as per `prompt_for_string`, but additionally have the
/// user enter the value again to confirm we get the same answer twice. This is
/// useful for e.g. password entry.
pub fn prompt_for_string_confirm<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
/// MaybePromptedString is a wrapper for getting user input interactively, while
/// also allowing the value to be specified at call time. This is useful e.g.
/// when we want to prompt users interactively, but want to predefine the values
/// in unit tests, or when users can specify a value either interactively or via
/// flags.
pub struct MaybePromptedString {
value: String,
was_provided: bool,
}
impl MaybePromptedString {
/// Construct a new MaybePromptedString, either using the given value or
/// prompting the user interactively with the given options.
pub fn new<IS: AbstractStream, OS: AbstractStream>(
provided: Option<&str>,
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
confirm: bool,
) -> Result<Self> {
let mut input_reader = build_input_reader(&mut input_stream)?;
let prompted: Option<String> = match provided {
None => Some(match confirm {
false => prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)?,
true => prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)?,
}),
Some(_) => None,
};
let was_provided = provided.is_some();
let value = provided.map_or_else(|| prompted.unwrap(), |s| s.to_owned());
Ok(MaybePromptedString {
value: value,
was_provided: was_provided,
})
}
/// Returns true if this string was provided, or false if it is the result
/// of an interactive prompt.
pub fn was_provided(&self) -> bool {
self.was_provided
}
/// "Unwraps" this structure into its underlying string.
pub fn into_inner(self) -> String {
self.value
}
}
/// Display a "<description> Continue?" confirmation. Returns true if the user
/// replies "yes" (or similar), or false otherwise.
pub fn continue_confirmation<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
description: &str,
) -> Result<bool> {
let mut input_reader = build_input_reader(&mut input_stream)?;
let prompt = format!("{}Continue? [Yes/No] ", description);
loop {
let original_response = prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt.as_str(),
/*is_sensitive=*/ false,
)?;
let response = original_response.trim().to_lowercase();
if response == "y" || response == "yes" {
return Ok(true);
} else if response == "n" || response == "no" {
return Ok(false);
} else {
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "Invalid response '{}'.\n", original_response)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
}
}
}
| prompt_for_string | identifier_name |
cli.rs | // Copyright 2015 Axel Rasmussen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::*;
use errno;
use libc::{self, c_int};
use log::debug;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem::MaybeUninit;
/// An alias for std::io::Result.
pub type IoResult<T> = io::Result<T>;
fn to_io_result(ret: c_int) -> IoResult<()> {
match ret {
0 => Ok(()),
_ => Err(io::Error::last_os_error()),
}
}
/// This enum describes high-level terminal flags, in an OS-agnostic way.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum TerminalFlag {
/// A flag indicating that typed characters should be echoed.
Echo,
/// A flag indicating that newlines, specifically, should be echoed.
EchoNewlines,
}
impl TerminalFlag {
fn to_value(&self) -> libc::tcflag_t {
match *self {
TerminalFlag::Echo => libc::ECHO,
TerminalFlag::EchoNewlines => libc::ECHONL,
}
}
}
/// This trait describes an abstract type which describes the attributes of a
/// terminal.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractTerminalAttributes {
/// Enable a flag in this set of attributes.
fn enable(&mut self, flag: TerminalFlag);
/// Disable a flag in this set of attributes.
fn disable(&mut self, flag: TerminalFlag);
}
/// This is an opaque structure which encapsulates the state / attributes of an
/// interactive terminal. The contents of this structure are OS-specific.
pub struct TerminalAttributes {
inner: libc::termios,
}
impl TerminalAttributes {
fn new(fd: c_int) -> IoResult<Self> {
let mut attrs = MaybeUninit::uninit();
to_io_result(unsafe { libc::tcgetattr(fd, attrs.as_mut_ptr()) })?;
Ok(TerminalAttributes {
inner: unsafe { attrs.assume_init() },
})
}
/// Create a new TerminalAttributes, with an "empty" state (no flags
/// enabled).
pub fn new_empty() -> Self {
TerminalAttributes {
inner: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
fn apply(&self, fd: c_int) -> IoResult<()> {
to_io_result(unsafe { libc::tcsetattr(fd, libc::TCSANOW, &self.inner) })
}
/// Test whether or not the given `TerminalFlag` is currently enabled.
pub fn is_enabled(&self, flag: TerminalFlag) -> bool {
self.inner.c_lflag & flag.to_value() != 0
}
}
impl PartialEq for TerminalAttributes {
fn eq(&self, other: &Self) -> bool {
self.inner.c_iflag == other.inner.c_iflag | && self.inner.c_oflag == other.inner.c_oflag
&& self.inner.c_cflag == other.inner.c_cflag
&& self.inner.c_lflag == other.inner.c_lflag
&& self.inner.c_line == other.inner.c_line
&& self.inner.c_cc == other.inner.c_cc
&& self.inner.c_ispeed == other.inner.c_ispeed
&& self.inner.c_ospeed == other.inner.c_ospeed
}
}
impl Eq for TerminalAttributes {}
fn debug_format_flag_field(
v: libc::tcflag_t,
fs: &'static [(&'static str, libc::tcflag_t)],
) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
let mut remaining_v: libc::tcflag_t = v;
let mut s = String::new();
for &(fname, fvalue) in fs {
if (v & fvalue) != 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}",
match was_empty {
true => "",
false => " | ",
},
fname
)?;
remaining_v &= !v;
}
}
if remaining_v != 0 {
let was_empty = s.is_empty();
write!(
&mut s,
"{}(extra: {:x})",
match was_empty {
true => "",
false => " ",
},
remaining_v
)?;
}
Ok(s)
}
fn debug_format_c_cc_field(c_cc: &[libc::cc_t; 32]) -> std::result::Result<String, fmt::Error> {
use fmt::Write;
const INDICES: &'static [(&'static str, usize)] = &[
("VDISCARD", libc::VDISCARD),
("VEOF", libc::VEOF),
("VEOL", libc::VEOL),
("VEOL2", libc::VEOL2),
("VERASE", libc::VERASE),
("VINTR", libc::VINTR),
("VKILL", libc::VKILL),
("VLNEXT", libc::VLNEXT),
("VMIN", libc::VMIN),
("VQUIT", libc::VQUIT),
("VREPRINT", libc::VREPRINT),
("VSTART", libc::VSTART),
("VSTOP", libc::VSTOP),
("VSUSP", libc::VSUSP),
("VSWTC", libc::VSWTC),
("VTIME", libc::VTIME),
("VWERASE", libc::VWERASE),
];
let mut s = String::new();
for &(name, idx) in INDICES {
let was_empty = s.is_empty();
write!(
&mut s,
"{}{}:{}",
match was_empty {
true => "",
false => ", ",
},
name,
c_cc[idx]
)?;
}
Ok(s)
}
impl fmt::Debug for TerminalAttributes {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TerminalAttributes")
.field(
"c_iflag",
&debug_format_flag_field(
self.inner.c_iflag,
&[
("IGNBRK", libc::IGNBRK),
("BRKINT", libc::BRKINT),
("IGNPAR", libc::IGNPAR),
("PARMRK", libc::PARMRK),
("INPCK", libc::INPCK),
("ISTRIP", libc::ISTRIP),
("INLCR", libc::INLCR),
("IGNCR", libc::IGNCR),
("ICRNL", libc::ICRNL),
("IXON", libc::IXON),
("IXANY", libc::IXANY),
("IXOFF", libc::IXOFF),
("IMAXBEL", libc::IMAXBEL),
("IUTF8", libc::IUTF8),
],
)?,
)
.field(
"c_oflag",
&debug_format_flag_field(
self.inner.c_oflag,
&[
("OPOST", libc::OPOST),
("OLCUC", libc::OLCUC),
("ONLCR", libc::ONLCR),
("ONOCR", libc::ONOCR),
("ONLRET", libc::ONLRET),
("OFILL", libc::OFILL),
("OFDEL", libc::OFDEL),
("NLDLY", libc::NLDLY),
("CRDLY", libc::CRDLY),
("TABDLY", libc::TABDLY),
("BSDLY", libc::BSDLY),
("VTDLY", libc::VTDLY),
("FFDLY", libc::FFDLY),
],
)?,
)
.field(
"c_cflag",
&debug_format_flag_field(
self.inner.c_cflag,
&[
("CBAUD", libc::CBAUD),
("CBAUDEX", libc::CBAUDEX),
("CSIZE", libc::CSIZE),
("CSTOPB", libc::CSTOPB),
("CREAD", libc::CREAD),
("PARENB", libc::PARENB),
("PARODD", libc::PARODD),
("HUPCL", libc::HUPCL),
("CLOCAL", libc::CLOCAL),
("CIBAUD", libc::CIBAUD),
("CMSPAR", libc::CMSPAR),
("CRTSCTS", libc::CRTSCTS),
],
)?,
)
.field(
"c_lflag",
&debug_format_flag_field(
self.inner.c_lflag,
&[
("ISIG", libc::ISIG),
("ICANON", libc::ICANON),
("ECHO", libc::ECHO),
("ECHOE", libc::ECHOE),
("ECHOK", libc::ECHOK),
("ECHONL", libc::ECHONL),
("ECHOCTL", libc::ECHOCTL),
("ECHOPRT", libc::ECHOPRT),
("ECHOKE", libc::ECHOKE),
("FLUSHO", libc::FLUSHO),
("NOFLSH", libc::NOFLSH),
("TOSTOP", libc::TOSTOP),
("PENDIN", libc::PENDIN),
("IEXTEN", libc::IEXTEN),
],
)?,
)
.field("c_cc", &debug_format_c_cc_field(&self.inner.c_cc)?)
.field("c_ispeed", unsafe { &libc::cfgetispeed(&self.inner) })
.field("c_ospeed", unsafe { &libc::cfgetospeed(&self.inner) })
.finish()
}
}
impl AbstractTerminalAttributes for TerminalAttributes {
fn enable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag |= flag.to_value();
}
fn disable(&mut self, flag: TerminalFlag) {
self.inner.c_lflag &= !flag.to_value();
}
}
/// This trait describes an abstract input or output stream.
///
/// This trait primarily exists for testing purposes. In almost all cases, users
/// will instead just use the concrete type `Stream` defined below.
pub trait AbstractStream {
/// A type which describes the attributes of this stream / terminal.
type Attributes: AbstractTerminalAttributes + fmt::Debug;
/// Returns whether or not this stream refers to an interactive terminal (a
/// TTY), as opposed to, for example, a pipe.
fn isatty(&self) -> bool;
/// Retrieve the current attributes of this stream / terminal.
fn get_attributes(&self) -> IoResult<Self::Attributes>;
/// Modify this stream's / terminal's attributes to match the given state.
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()>;
/// Return a `Read` for this stream, if reading is supported.
fn as_reader(&self) -> Option<Box<dyn Read>>;
/// Return a `Write` for this stream, if writing is supported.
fn as_writer(&self) -> Option<Box<dyn Write>>;
}
/// Standard input / output streams.
#[derive(Debug)]
pub enum Stream {
/// Standard output.
Stdout,
/// Standard error.
Stderr,
/// Standard input.
Stdin,
}
impl Stream {
fn to_fd(&self) -> c_int {
match *self {
Stream::Stdout => libc::STDOUT_FILENO,
Stream::Stderr => libc::STDERR_FILENO,
Stream::Stdin => libc::STDIN_FILENO,
}
}
}
impl AbstractStream for Stream {
type Attributes = TerminalAttributes;
fn isatty(&self) -> bool {
let ret = unsafe { libc::isatty(self.to_fd()) };
let error: i32 = errno::errno().into();
match ret {
1 => true,
0 => match error {
libc::EBADF => false,
libc::ENOTTY => false,
_ => {
debug!(
"Unrecognized isatty errno: {}; assuming {:?} is not a TTY",
error, *self
);
false
}
},
_ => {
debug!(
"Unrecognized isatty return code: {}; assuming {:?} is not a TTY",
ret, *self
);
false
}
}
}
fn get_attributes(&self) -> IoResult<Self::Attributes> {
TerminalAttributes::new(self.to_fd())
}
fn set_attributes(&mut self, attributes: &Self::Attributes) -> IoResult<()> {
let ret = attributes.apply(self.to_fd());
debug_assert!(ret.is_err() || *attributes == Self::Attributes::new(self.to_fd()).unwrap());
ret
}
fn as_reader(&self) -> Option<Box<dyn Read>> {
match *self {
Stream::Stdin => Some(Box::new(io::stdin())),
_ => None,
}
}
fn as_writer(&self) -> Option<Box<dyn Write>> {
match *self {
Stream::Stdout => Some(Box::new(io::stdout())),
Stream::Stderr => Some(Box::new(io::stderr())),
_ => None,
}
}
}
/// This structure handles a) disabling the echoing of characters typed to
/// `Stdin`, and b) remembering to reset the terminal attributes afterwards
/// (via `Drop`).
struct DisableEcho<'s, S: AbstractStream> {
stream: &'s mut S,
initial_attributes: S::Attributes,
}
impl<'s, S: AbstractStream> DisableEcho<'s, S> {
fn new(stream: &'s mut S) -> Result<Self> {
let initial_attributes = stream.get_attributes()?;
debug!("Initial stream attributes: {:#?}", initial_attributes);
let mut attributes = stream.get_attributes()?;
// Don't echo characters typed to stdin.
attributes.disable(TerminalFlag::Echo);
// But, *do* echo the newline when the user hits ENTER.
attributes.enable(TerminalFlag::EchoNewlines);
debug!("Setting attributes to: {:#?}", attributes);
stream.set_attributes(&attributes)?;
Ok(DisableEcho {
stream: stream,
initial_attributes: initial_attributes,
})
}
}
impl<'s, S: AbstractStream> Drop for DisableEcho<'s, S> {
fn drop(&mut self) {
self.stream
.set_attributes(&self.initial_attributes)
.unwrap();
}
}
fn require_isatty<S: AbstractStream>(s: &mut S) -> Result<()> {
if !s.isatty() {
Err(Error::Precondition(format!(
"cannot prompt interactively when the I/O streams are not TTYs"
)))
} else {
Ok(())
}
}
fn build_input_reader<IS: AbstractStream>(
input_stream: &mut IS,
) -> Result<io::BufReader<Box<dyn Read>>> {
require_isatty(input_stream)?;
Ok(io::BufReader::new(match input_stream.as_reader() {
None => {
return Err(Error::Precondition(format!(
"the given input stream must support `Read`"
)))
}
Some(r) => r,
}))
}
fn remove_newline(mut s: String) -> Result<String> {
// Remove the trailing newline (if any - not finding one is an error).
if !s.ends_with('\n') {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "unexpected end of input").into());
}
s.pop();
// If this is windows and so there's also a \r, remove that too.
if s.ends_with('\r') {
s.pop();
}
Ok(s)
}
fn prompt_for_string_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
// We have to take the reader as a parameter, since it must be "global",
// even if this function is e.g. called in a loop. Otherwise, because it's
// buffered, we might buffer some input and then discard it.
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
use io::BufRead;
require_isatty(output_stream)?;
// It's fine to construct a separate writer, potentially on each loop
// iteration or whatever, because we flush immediately, and don't do any
// buffering.
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "{}", prompt)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
Ok({
let _disable_echo = match is_sensitive {
false => None,
true => Some(DisableEcho::new(input_stream)?),
};
let mut ret = String::new();
input_reader.read_line(&mut ret)?;
remove_newline(ret)?
})
}
/// Prompt the user for a string (read from the given input stream) using the
/// given output stream (typically standard output or standard error) to display
/// the given prompt message.
///
/// If `is_sensitive` is true, then the users characters will not be echoed back
/// (e.g. this will behave like a password prompt).
///
/// Note that there are various requirements for the given streams, and this
/// function will return an error if any of them are not met:
///
/// - Both `input_stream` and `output_stream` must be TTYs.
/// - `input_stream` must return a valid `Read` instance.
/// - `output_stream` must return a valid `Write` instance.
pub fn prompt_for_string<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
fn prompt_for_string_confirm_impl<IS: AbstractStream, OS: AbstractStream>(
input_stream: &mut IS,
input_reader: &mut io::BufReader<Box<dyn Read>>,
output_stream: &mut OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
loop {
let string = prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
prompt,
is_sensitive,
)?;
if string
== prompt_for_string_impl(
input_stream,
input_reader,
output_stream,
"Confirm: ",
is_sensitive,
)?
{
return Ok(string);
}
}
}
/// Prompt for a string as per `prompt_for_string`, but additionally have the
/// user enter the value again to confirm we get the same answer twice. This is
/// useful for e.g. password entry.
pub fn prompt_for_string_confirm<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
) -> Result<String> {
let mut input_reader = build_input_reader(&mut input_stream)?;
prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)
}
/// MaybePromptedString is a wrapper for getting user input interactively, while
/// also allowing the value to be specified at call time. This is useful e.g.
/// when we want to prompt users interactively, but want to predefine the values
/// in unit tests, or when users can specify a value either interactively or via
/// flags.
pub struct MaybePromptedString {
value: String,
was_provided: bool,
}
impl MaybePromptedString {
/// Construct a new MaybePromptedString, either using the given value or
/// prompting the user interactively with the given options.
pub fn new<IS: AbstractStream, OS: AbstractStream>(
provided: Option<&str>,
mut input_stream: IS,
mut output_stream: OS,
prompt: &str,
is_sensitive: bool,
confirm: bool,
) -> Result<Self> {
let mut input_reader = build_input_reader(&mut input_stream)?;
let prompted: Option<String> = match provided {
None => Some(match confirm {
false => prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)?,
true => prompt_for_string_confirm_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt,
is_sensitive,
)?,
}),
Some(_) => None,
};
let was_provided = provided.is_some();
let value = provided.map_or_else(|| prompted.unwrap(), |s| s.to_owned());
Ok(MaybePromptedString {
value: value,
was_provided: was_provided,
})
}
/// Returns true if this string was provided, or false if it is the result
/// of an interactive prompt.
pub fn was_provided(&self) -> bool {
self.was_provided
}
/// "Unwraps" this structure into its underlying string.
pub fn into_inner(self) -> String {
self.value
}
}
/// Display a "<description> Continue?" confirmation. Returns true if the user
/// replies "yes" (or similar), or false otherwise.
pub fn continue_confirmation<IS: AbstractStream, OS: AbstractStream>(
mut input_stream: IS,
mut output_stream: OS,
description: &str,
) -> Result<bool> {
let mut input_reader = build_input_reader(&mut input_stream)?;
let prompt = format!("{}Continue? [Yes/No] ", description);
loop {
let original_response = prompt_for_string_impl(
&mut input_stream,
&mut input_reader,
&mut output_stream,
prompt.as_str(),
/*is_sensitive=*/ false,
)?;
let response = original_response.trim().to_lowercase();
if response == "y" || response == "yes" {
return Ok(true);
} else if response == "n" || response == "no" {
return Ok(false);
} else {
let mut writer = match output_stream.as_writer() {
None => {
return Err(Error::Precondition(format!(
"the given output stream must support `Write`"
)))
}
Some(w) => w,
};
write!(writer, "Invalid response '{}'.\n", original_response)?;
// We have to flush so the user sees the prompt immediately.
writer.flush()?;
}
}
} | random_line_split | |
helmrepoutils.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
appv1alpha1 "github.com/IBM/multicloud-operators-subscription-release/pkg/apis/app/v1alpha1"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
githttp "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
var log = logf.Log.WithName("utils")
//GetHelmRepoClient returns an *http.client to access the helm repo
func GetHelmRepoClient(parentNamespace string, configMap *corev1.ConfigMap) (*http.Client, error) {
srLogger := log.WithValues("package", "utils", "method", "GetHelmRepoClient")
httpClient := http.DefaultClient
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: false,
},
}
if configMap != nil {
configData := configMap.Data
srLogger.Info("ConfigRef retrieved", "configMap.Data", configData)
if configData["insecureSkipVerify"] != "" {
b, err := strconv.ParseBool(configData["insecureSkipVerify"])
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
srLogger.Error(err, "Unable to parse", "insecureSkipVerify", configData["insecureSkipVerify"])
return nil, err
}
srLogger.Info("Set InsecureSkipVerify", "insecureSkipVerify", b)
transport.TLSClientConfig.InsecureSkipVerify = b
} else {
srLogger.Info("insecureSkipVerify is not specified")
}
} else {
srLogger.Info("configMap is nil")
}
httpClient.Transport = transport
srLogger.Info("InsecureSkipVerify equal", "InsecureSkipVerify", transport.TLSClientConfig.InsecureSkipVerify)
return httpClient, nil
}
//GetConfigMap search the config map containing the helm repo client configuration.
func GetConfigMap(client client.Client, parentNamespace string, configMapRef *corev1.ObjectReference) (configMap *corev1.ConfigMap, err error) {
srLogger := log.WithValues("package", "utils", "method", "getConfigMap")
if configMapRef != nil {
srLogger.Info("Retrieve configMap ", "parentNamespace", parentNamespace, "configMapRef.Name", configMapRef.Name)
ns := configMapRef.Namespace
if ns == "" {
ns = parentNamespace
}
configMap = &corev1.ConfigMap{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: configMapRef.Name}, configMap)
if err != nil {
if errors.IsNotFound(err) {
srLogger.Error(err, "ConfigMap not found ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, nil
}
srLogger.Error(err, "Failed to get configMap ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, err
}
srLogger.Info("ConfigMap found ", "Name:", configMapRef.Name, " on namespace: ", ns)
} else {
srLogger.Info("no configMapRef defined ", "parentNamespace", parentNamespace)
}
return configMap, err
}
//GetSecret returns the secret to access the helm-repo
func | (client client.Client, parentNamespace string, secretRef *corev1.ObjectReference) (secret *corev1.Secret, err error) {
srLogger := log.WithValues("package", "utils", "method", "getSecret")
if secretRef != nil {
srLogger.Info("Retreive secret", "parentNamespace", parentNamespace, "secretRef", secretRef)
ns := secretRef.Namespace
if ns == "" {
ns = parentNamespace
}
secret = &corev1.Secret{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: secretRef.Name}, secret)
if err != nil {
srLogger.Error(err, "Failed to get secret ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
return nil, err
}
srLogger.Info("Secret found ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
} else {
srLogger.Info("No secret defined", "parentNamespace", parentNamespace)
}
return secret, err
}
func DownloadChart(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
switch strings.ToLower(string(s.Spec.Source.SourceType)) {
case string(appv1alpha1.HelmRepoSourceType):
return DownloadChartFromHelmRepo(configMap, secret, chartsDir, s)
case string(appv1alpha1.GitHubSourceType):
return DownloadChartFromGitHub(configMap, secret, chartsDir, s)
default:
return "", fmt.Errorf("SourceType '%s' unsupported", s.Spec.Source.SourceType)
}
}
//DownloadChartFromGitHub downloads a chart into the charsDir
func DownloadChartFromGitHub(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.GitHub == nil {
err := fmt.Errorf("GitHub type but Spec.GitHub is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
destRepo := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace, s.Spec.ChartName)
for _, url := range s.Spec.Source.GitHub.Urls {
options := &git.CloneOptions{
URL: url,
Depth: 1,
SingleBranch: true,
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
}
if secret != nil && secret.Data != nil {
srLogger.Info("Add credentials")
options.Auth = &githttp.BasicAuth{
Username: string(secret.Data["user"]),
Password: string(secret.Data["password"]),
}
}
if s.Spec.Source.GitHub.Branch == "" {
options.ReferenceName = plumbing.Master
} else {
options.ReferenceName = plumbing.ReferenceName(s.Spec.Source.GitHub.Branch)
}
os.RemoveAll(chartDir)
_, err = git.PlainClone(destRepo, false, options)
if err != nil {
os.RemoveAll(destRepo)
srLogger.Error(err, "Clone failed", "url", url)
continue
}
}
if err != nil {
srLogger.Error(err, "All urls failed")
}
chartDir = filepath.Join(destRepo, s.Spec.Source.GitHub.ChartPath)
return chartDir, err
}
//DownloadChartFromHelmRepo downloads a chart into the charsDir
func DownloadChartFromHelmRepo(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.HelmRepo == nil {
err := fmt.Errorf("HelmRepo type but Spec.HelmRepo is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
httpClient, err := GetHelmRepoClient(s.Namespace, configMap)
if err != nil {
srLogger.Error(err, "Failed to create httpClient ", "sr.Spec.SecretRef.Name", s.Spec.SecretRef.Name)
return "", err
}
var downloadErr error
for _, urlelem := range s.Spec.Source.HelmRepo.Urls {
var URLP *url.URL
URLP, downloadErr = url.Parse(urlelem)
if err != nil {
srLogger.Error(downloadErr, "url", urlelem)
continue
}
fileName := filepath.Base(URLP.Path)
// Create the file
chartZip := filepath.Join(chartsDir, fileName)
if _, err := os.Stat(chartZip); os.IsNotExist(err) {
var req *http.Request
req, downloadErr = http.NewRequest(http.MethodGet, urlelem, nil)
if downloadErr != nil {
srLogger.Error(downloadErr, "Can not build request: ", "urlelem", urlelem)
continue
}
if secret != nil && secret.Data != nil {
req.SetBasicAuth(string(secret.Data["user"]), string(secret.Data["password"]))
}
var resp *http.Response
resp, downloadErr = httpClient.Do(req)
if downloadErr != nil {
srLogger.Error(downloadErr, "Http request failed: ", "urlelem", urlelem)
continue
}
srLogger.Info("Get suceeded: ", "urlelem", urlelem)
defer resp.Body.Close()
var out *os.File
out, downloadErr = os.Create(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to create: ", "chartZip", chartZip)
continue
}
defer out.Close()
// Write the body to file
_, downloadErr = io.Copy(out, resp.Body)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to copy body: ", "chartZip", chartZip)
continue
}
}
var r *os.File
r, downloadErr = os.Open(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to open: ", "chartZip", chartZip)
continue
}
chartDirUnzip := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace)
chartDir = filepath.Join(chartDirUnzip, s.Spec.ChartName)
//Clean before untar
os.RemoveAll(chartDirUnzip)
downloadErr = Untar(chartDirUnzip, r)
if downloadErr != nil {
//Remove zip because failed to untar and so probably corrupted
os.RemoveAll(chartZip)
srLogger.Error(downloadErr, "Failed to unzip: ", "chartZip", chartZip)
continue
}
}
return chartDir, downloadErr
}
//Untar untars the reader into the dst directory
func Untar(dst string, r io.Reader) error {
srLogger := log.WithValues("destination", dst)
gzr, err := gzip.NewReader(r)
if err != nil {
srLogger.Error(err, "")
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
switch {
// if no more files are found return
case err == io.EOF:
return nil
// return any other error
case err != nil:
srLogger.Error(err, "")
return err
// if the header is nil, just skip it (not sure how this happens)
case header == nil:
continue
}
// the target location where the dir/file should be created
target := filepath.Join(dst, header.Name)
// the following switch could also be done using fi.Mode(), not sure if there
// a benefit of using one vs. the other.
// fi := header.FileInfo()
// check the file type
switch header.Typeflag {
// if its a dir and it doesn't exist create it
case tar.TypeDir:
if _, err := os.Stat(target); err != nil {
if err := os.MkdirAll(target, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
// if it's a file create it
case tar.TypeReg:
dir := filepath.Dir(target)
if _, err := os.Stat(dir); err != nil {
if err := os.MkdirAll(dir, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
srLogger.Error(err, "")
return err
}
// copy over contents
if _, err := io.Copy(f, tr); err != nil {
srLogger.Error(err, "")
return err
}
// manually close here after each file operation; defering would cause each file close
// to wait until all operations have completed.
f.Close()
}
}
}
| GetSecret | identifier_name |
helmrepoutils.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
appv1alpha1 "github.com/IBM/multicloud-operators-subscription-release/pkg/apis/app/v1alpha1"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
githttp "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
var log = logf.Log.WithName("utils")
//GetHelmRepoClient returns an *http.client to access the helm repo
func GetHelmRepoClient(parentNamespace string, configMap *corev1.ConfigMap) (*http.Client, error) {
srLogger := log.WithValues("package", "utils", "method", "GetHelmRepoClient")
httpClient := http.DefaultClient
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: false,
},
}
if configMap != nil {
configData := configMap.Data
srLogger.Info("ConfigRef retrieved", "configMap.Data", configData)
if configData["insecureSkipVerify"] != "" {
b, err := strconv.ParseBool(configData["insecureSkipVerify"])
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
srLogger.Error(err, "Unable to parse", "insecureSkipVerify", configData["insecureSkipVerify"])
return nil, err
}
srLogger.Info("Set InsecureSkipVerify", "insecureSkipVerify", b)
transport.TLSClientConfig.InsecureSkipVerify = b
} else {
srLogger.Info("insecureSkipVerify is not specified")
}
} else {
srLogger.Info("configMap is nil")
}
httpClient.Transport = transport
srLogger.Info("InsecureSkipVerify equal", "InsecureSkipVerify", transport.TLSClientConfig.InsecureSkipVerify)
return httpClient, nil
}
//GetConfigMap search the config map containing the helm repo client configuration.
func GetConfigMap(client client.Client, parentNamespace string, configMapRef *corev1.ObjectReference) (configMap *corev1.ConfigMap, err error) {
srLogger := log.WithValues("package", "utils", "method", "getConfigMap")
if configMapRef != nil {
srLogger.Info("Retrieve configMap ", "parentNamespace", parentNamespace, "configMapRef.Name", configMapRef.Name)
ns := configMapRef.Namespace
if ns == "" {
ns = parentNamespace
}
configMap = &corev1.ConfigMap{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: configMapRef.Name}, configMap)
if err != nil {
if errors.IsNotFound(err) {
srLogger.Error(err, "ConfigMap not found ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, nil
}
srLogger.Error(err, "Failed to get configMap ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, err
}
srLogger.Info("ConfigMap found ", "Name:", configMapRef.Name, " on namespace: ", ns)
} else {
srLogger.Info("no configMapRef defined ", "parentNamespace", parentNamespace)
}
return configMap, err
}
//GetSecret returns the secret to access the helm-repo
func GetSecret(client client.Client, parentNamespace string, secretRef *corev1.ObjectReference) (secret *corev1.Secret, err error) {
srLogger := log.WithValues("package", "utils", "method", "getSecret")
if secretRef != nil {
srLogger.Info("Retreive secret", "parentNamespace", parentNamespace, "secretRef", secretRef)
ns := secretRef.Namespace
if ns == "" {
ns = parentNamespace
}
secret = &corev1.Secret{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: secretRef.Name}, secret)
if err != nil {
srLogger.Error(err, "Failed to get secret ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
return nil, err
}
srLogger.Info("Secret found ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
} else {
srLogger.Info("No secret defined", "parentNamespace", parentNamespace)
}
return secret, err
}
func DownloadChart(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
switch strings.ToLower(string(s.Spec.Source.SourceType)) {
case string(appv1alpha1.HelmRepoSourceType):
return DownloadChartFromHelmRepo(configMap, secret, chartsDir, s)
case string(appv1alpha1.GitHubSourceType):
return DownloadChartFromGitHub(configMap, secret, chartsDir, s)
default:
return "", fmt.Errorf("SourceType '%s' unsupported", s.Spec.Source.SourceType)
}
}
//DownloadChartFromGitHub downloads a chart into the charsDir
func DownloadChartFromGitHub(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.GitHub == nil {
err := fmt.Errorf("GitHub type but Spec.GitHub is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
destRepo := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace, s.Spec.ChartName)
for _, url := range s.Spec.Source.GitHub.Urls {
options := &git.CloneOptions{
URL: url,
Depth: 1,
SingleBranch: true,
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
}
if secret != nil && secret.Data != nil {
srLogger.Info("Add credentials")
options.Auth = &githttp.BasicAuth{
Username: string(secret.Data["user"]),
Password: string(secret.Data["password"]),
}
}
if s.Spec.Source.GitHub.Branch == "" {
options.ReferenceName = plumbing.Master
} else {
options.ReferenceName = plumbing.ReferenceName(s.Spec.Source.GitHub.Branch)
}
os.RemoveAll(chartDir)
_, err = git.PlainClone(destRepo, false, options)
if err != nil {
os.RemoveAll(destRepo)
srLogger.Error(err, "Clone failed", "url", url)
continue
}
}
if err != nil {
srLogger.Error(err, "All urls failed")
}
chartDir = filepath.Join(destRepo, s.Spec.Source.GitHub.ChartPath)
return chartDir, err
}
//DownloadChartFromHelmRepo downloads a chart into the charsDir | srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.HelmRepo == nil {
err := fmt.Errorf("HelmRepo type but Spec.HelmRepo is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
httpClient, err := GetHelmRepoClient(s.Namespace, configMap)
if err != nil {
srLogger.Error(err, "Failed to create httpClient ", "sr.Spec.SecretRef.Name", s.Spec.SecretRef.Name)
return "", err
}
var downloadErr error
for _, urlelem := range s.Spec.Source.HelmRepo.Urls {
var URLP *url.URL
URLP, downloadErr = url.Parse(urlelem)
if err != nil {
srLogger.Error(downloadErr, "url", urlelem)
continue
}
fileName := filepath.Base(URLP.Path)
// Create the file
chartZip := filepath.Join(chartsDir, fileName)
if _, err := os.Stat(chartZip); os.IsNotExist(err) {
var req *http.Request
req, downloadErr = http.NewRequest(http.MethodGet, urlelem, nil)
if downloadErr != nil {
srLogger.Error(downloadErr, "Can not build request: ", "urlelem", urlelem)
continue
}
if secret != nil && secret.Data != nil {
req.SetBasicAuth(string(secret.Data["user"]), string(secret.Data["password"]))
}
var resp *http.Response
resp, downloadErr = httpClient.Do(req)
if downloadErr != nil {
srLogger.Error(downloadErr, "Http request failed: ", "urlelem", urlelem)
continue
}
srLogger.Info("Get suceeded: ", "urlelem", urlelem)
defer resp.Body.Close()
var out *os.File
out, downloadErr = os.Create(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to create: ", "chartZip", chartZip)
continue
}
defer out.Close()
// Write the body to file
_, downloadErr = io.Copy(out, resp.Body)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to copy body: ", "chartZip", chartZip)
continue
}
}
var r *os.File
r, downloadErr = os.Open(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to open: ", "chartZip", chartZip)
continue
}
chartDirUnzip := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace)
chartDir = filepath.Join(chartDirUnzip, s.Spec.ChartName)
//Clean before untar
os.RemoveAll(chartDirUnzip)
downloadErr = Untar(chartDirUnzip, r)
if downloadErr != nil {
//Remove zip because failed to untar and so probably corrupted
os.RemoveAll(chartZip)
srLogger.Error(downloadErr, "Failed to unzip: ", "chartZip", chartZip)
continue
}
}
return chartDir, downloadErr
}
//Untar untars the reader into the dst directory
func Untar(dst string, r io.Reader) error {
srLogger := log.WithValues("destination", dst)
gzr, err := gzip.NewReader(r)
if err != nil {
srLogger.Error(err, "")
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
switch {
// if no more files are found return
case err == io.EOF:
return nil
// return any other error
case err != nil:
srLogger.Error(err, "")
return err
// if the header is nil, just skip it (not sure how this happens)
case header == nil:
continue
}
// the target location where the dir/file should be created
target := filepath.Join(dst, header.Name)
// the following switch could also be done using fi.Mode(), not sure if there
// a benefit of using one vs. the other.
// fi := header.FileInfo()
// check the file type
switch header.Typeflag {
// if its a dir and it doesn't exist create it
case tar.TypeDir:
if _, err := os.Stat(target); err != nil {
if err := os.MkdirAll(target, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
// if it's a file create it
case tar.TypeReg:
dir := filepath.Dir(target)
if _, err := os.Stat(dir); err != nil {
if err := os.MkdirAll(dir, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
srLogger.Error(err, "")
return err
}
// copy over contents
if _, err := io.Copy(f, tr); err != nil {
srLogger.Error(err, "")
return err
}
// manually close here after each file operation; defering would cause each file close
// to wait until all operations have completed.
f.Close()
}
}
} | func DownloadChartFromHelmRepo(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) { | random_line_split |
helmrepoutils.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
appv1alpha1 "github.com/IBM/multicloud-operators-subscription-release/pkg/apis/app/v1alpha1"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
githttp "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
var log = logf.Log.WithName("utils")
//GetHelmRepoClient returns an *http.client to access the helm repo
func GetHelmRepoClient(parentNamespace string, configMap *corev1.ConfigMap) (*http.Client, error) {
srLogger := log.WithValues("package", "utils", "method", "GetHelmRepoClient")
httpClient := http.DefaultClient
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: false,
},
}
if configMap != nil {
configData := configMap.Data
srLogger.Info("ConfigRef retrieved", "configMap.Data", configData)
if configData["insecureSkipVerify"] != "" {
b, err := strconv.ParseBool(configData["insecureSkipVerify"])
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
srLogger.Error(err, "Unable to parse", "insecureSkipVerify", configData["insecureSkipVerify"])
return nil, err
}
srLogger.Info("Set InsecureSkipVerify", "insecureSkipVerify", b)
transport.TLSClientConfig.InsecureSkipVerify = b
} else {
srLogger.Info("insecureSkipVerify is not specified")
}
} else {
srLogger.Info("configMap is nil")
}
httpClient.Transport = transport
srLogger.Info("InsecureSkipVerify equal", "InsecureSkipVerify", transport.TLSClientConfig.InsecureSkipVerify)
return httpClient, nil
}
//GetConfigMap search the config map containing the helm repo client configuration.
func GetConfigMap(client client.Client, parentNamespace string, configMapRef *corev1.ObjectReference) (configMap *corev1.ConfigMap, err error) {
srLogger := log.WithValues("package", "utils", "method", "getConfigMap")
if configMapRef != nil {
srLogger.Info("Retrieve configMap ", "parentNamespace", parentNamespace, "configMapRef.Name", configMapRef.Name)
ns := configMapRef.Namespace
if ns == "" {
ns = parentNamespace
}
configMap = &corev1.ConfigMap{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: configMapRef.Name}, configMap)
if err != nil {
if errors.IsNotFound(err) {
srLogger.Error(err, "ConfigMap not found ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, nil
}
srLogger.Error(err, "Failed to get configMap ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, err
}
srLogger.Info("ConfigMap found ", "Name:", configMapRef.Name, " on namespace: ", ns)
} else {
srLogger.Info("no configMapRef defined ", "parentNamespace", parentNamespace)
}
return configMap, err
}
//GetSecret returns the secret to access the helm-repo
func GetSecret(client client.Client, parentNamespace string, secretRef *corev1.ObjectReference) (secret *corev1.Secret, err error) {
srLogger := log.WithValues("package", "utils", "method", "getSecret")
if secretRef != nil {
srLogger.Info("Retreive secret", "parentNamespace", parentNamespace, "secretRef", secretRef)
ns := secretRef.Namespace
if ns == "" {
ns = parentNamespace
}
secret = &corev1.Secret{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: secretRef.Name}, secret)
if err != nil {
srLogger.Error(err, "Failed to get secret ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
return nil, err
}
srLogger.Info("Secret found ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
} else {
srLogger.Info("No secret defined", "parentNamespace", parentNamespace)
}
return secret, err
}
func DownloadChart(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
switch strings.ToLower(string(s.Spec.Source.SourceType)) {
case string(appv1alpha1.HelmRepoSourceType):
return DownloadChartFromHelmRepo(configMap, secret, chartsDir, s)
case string(appv1alpha1.GitHubSourceType):
return DownloadChartFromGitHub(configMap, secret, chartsDir, s)
default:
return "", fmt.Errorf("SourceType '%s' unsupported", s.Spec.Source.SourceType)
}
}
//DownloadChartFromGitHub downloads a chart into the charsDir
func DownloadChartFromGitHub(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.GitHub == nil {
err := fmt.Errorf("GitHub type but Spec.GitHub is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
destRepo := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace, s.Spec.ChartName)
for _, url := range s.Spec.Source.GitHub.Urls {
options := &git.CloneOptions{
URL: url,
Depth: 1,
SingleBranch: true,
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
}
if secret != nil && secret.Data != nil {
srLogger.Info("Add credentials")
options.Auth = &githttp.BasicAuth{
Username: string(secret.Data["user"]),
Password: string(secret.Data["password"]),
}
}
if s.Spec.Source.GitHub.Branch == "" {
options.ReferenceName = plumbing.Master
} else {
options.ReferenceName = plumbing.ReferenceName(s.Spec.Source.GitHub.Branch)
}
os.RemoveAll(chartDir)
_, err = git.PlainClone(destRepo, false, options)
if err != nil {
os.RemoveAll(destRepo)
srLogger.Error(err, "Clone failed", "url", url)
continue
}
}
if err != nil {
srLogger.Error(err, "All urls failed")
}
chartDir = filepath.Join(destRepo, s.Spec.Source.GitHub.ChartPath)
return chartDir, err
}
//DownloadChartFromHelmRepo downloads a chart into the charsDir
func DownloadChartFromHelmRepo(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.HelmRepo == nil {
err := fmt.Errorf("HelmRepo type but Spec.HelmRepo is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
httpClient, err := GetHelmRepoClient(s.Namespace, configMap)
if err != nil {
srLogger.Error(err, "Failed to create httpClient ", "sr.Spec.SecretRef.Name", s.Spec.SecretRef.Name)
return "", err
}
var downloadErr error
for _, urlelem := range s.Spec.Source.HelmRepo.Urls {
var URLP *url.URL
URLP, downloadErr = url.Parse(urlelem)
if err != nil {
srLogger.Error(downloadErr, "url", urlelem)
continue
}
fileName := filepath.Base(URLP.Path)
// Create the file
chartZip := filepath.Join(chartsDir, fileName)
if _, err := os.Stat(chartZip); os.IsNotExist(err) {
var req *http.Request
req, downloadErr = http.NewRequest(http.MethodGet, urlelem, nil)
if downloadErr != nil {
srLogger.Error(downloadErr, "Can not build request: ", "urlelem", urlelem)
continue
}
if secret != nil && secret.Data != nil {
req.SetBasicAuth(string(secret.Data["user"]), string(secret.Data["password"]))
}
var resp *http.Response
resp, downloadErr = httpClient.Do(req)
if downloadErr != nil {
srLogger.Error(downloadErr, "Http request failed: ", "urlelem", urlelem)
continue
}
srLogger.Info("Get suceeded: ", "urlelem", urlelem)
defer resp.Body.Close()
var out *os.File
out, downloadErr = os.Create(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to create: ", "chartZip", chartZip)
continue
}
defer out.Close()
// Write the body to file
_, downloadErr = io.Copy(out, resp.Body)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to copy body: ", "chartZip", chartZip)
continue
}
}
var r *os.File
r, downloadErr = os.Open(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to open: ", "chartZip", chartZip)
continue
}
chartDirUnzip := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace)
chartDir = filepath.Join(chartDirUnzip, s.Spec.ChartName)
//Clean before untar
os.RemoveAll(chartDirUnzip)
downloadErr = Untar(chartDirUnzip, r)
if downloadErr != nil {
//Remove zip because failed to untar and so probably corrupted
os.RemoveAll(chartZip)
srLogger.Error(downloadErr, "Failed to unzip: ", "chartZip", chartZip)
continue
}
}
return chartDir, downloadErr
}
//Untar untars the reader into the dst directory
func Untar(dst string, r io.Reader) error {
srLogger := log.WithValues("destination", dst)
gzr, err := gzip.NewReader(r)
if err != nil {
srLogger.Error(err, "")
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
switch {
// if no more files are found return
case err == io.EOF:
return nil
// return any other error
case err != nil:
srLogger.Error(err, "")
return err
// if the header is nil, just skip it (not sure how this happens)
case header == nil:
continue
}
// the target location where the dir/file should be created
target := filepath.Join(dst, header.Name)
// the following switch could also be done using fi.Mode(), not sure if there
// a benefit of using one vs. the other.
// fi := header.FileInfo()
// check the file type
switch header.Typeflag {
// if its a dir and it doesn't exist create it
case tar.TypeDir:
if _, err := os.Stat(target); err != nil {
if err := os.MkdirAll(target, 0755); err != nil |
}
// if it's a file create it
case tar.TypeReg:
dir := filepath.Dir(target)
if _, err := os.Stat(dir); err != nil {
if err := os.MkdirAll(dir, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
srLogger.Error(err, "")
return err
}
// copy over contents
if _, err := io.Copy(f, tr); err != nil {
srLogger.Error(err, "")
return err
}
// manually close here after each file operation; defering would cause each file close
// to wait until all operations have completed.
f.Close()
}
}
}
| {
srLogger.Error(err, "")
return err
} | conditional_block |
helmrepoutils.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"archive/tar"
"compress/gzip"
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
appv1alpha1 "github.com/IBM/multicloud-operators-subscription-release/pkg/apis/app/v1alpha1"
"gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
githttp "gopkg.in/src-d/go-git.v4/plumbing/transport/http"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
var log = logf.Log.WithName("utils")
//GetHelmRepoClient returns an *http.client to access the helm repo
func GetHelmRepoClient(parentNamespace string, configMap *corev1.ConfigMap) (*http.Client, error) {
srLogger := log.WithValues("package", "utils", "method", "GetHelmRepoClient")
httpClient := http.DefaultClient
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: false,
},
}
if configMap != nil {
configData := configMap.Data
srLogger.Info("ConfigRef retrieved", "configMap.Data", configData)
if configData["insecureSkipVerify"] != "" {
b, err := strconv.ParseBool(configData["insecureSkipVerify"])
if err != nil {
if errors.IsNotFound(err) {
return nil, nil
}
srLogger.Error(err, "Unable to parse", "insecureSkipVerify", configData["insecureSkipVerify"])
return nil, err
}
srLogger.Info("Set InsecureSkipVerify", "insecureSkipVerify", b)
transport.TLSClientConfig.InsecureSkipVerify = b
} else {
srLogger.Info("insecureSkipVerify is not specified")
}
} else {
srLogger.Info("configMap is nil")
}
httpClient.Transport = transport
srLogger.Info("InsecureSkipVerify equal", "InsecureSkipVerify", transport.TLSClientConfig.InsecureSkipVerify)
return httpClient, nil
}
//GetConfigMap search the config map containing the helm repo client configuration.
func GetConfigMap(client client.Client, parentNamespace string, configMapRef *corev1.ObjectReference) (configMap *corev1.ConfigMap, err error) {
srLogger := log.WithValues("package", "utils", "method", "getConfigMap")
if configMapRef != nil {
srLogger.Info("Retrieve configMap ", "parentNamespace", parentNamespace, "configMapRef.Name", configMapRef.Name)
ns := configMapRef.Namespace
if ns == "" {
ns = parentNamespace
}
configMap = &corev1.ConfigMap{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: configMapRef.Name}, configMap)
if err != nil {
if errors.IsNotFound(err) {
srLogger.Error(err, "ConfigMap not found ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, nil
}
srLogger.Error(err, "Failed to get configMap ", "Name:", configMapRef.Name, " on namespace: ", ns)
return nil, err
}
srLogger.Info("ConfigMap found ", "Name:", configMapRef.Name, " on namespace: ", ns)
} else {
srLogger.Info("no configMapRef defined ", "parentNamespace", parentNamespace)
}
return configMap, err
}
//GetSecret returns the secret to access the helm-repo
func GetSecret(client client.Client, parentNamespace string, secretRef *corev1.ObjectReference) (secret *corev1.Secret, err error) {
srLogger := log.WithValues("package", "utils", "method", "getSecret")
if secretRef != nil {
srLogger.Info("Retreive secret", "parentNamespace", parentNamespace, "secretRef", secretRef)
ns := secretRef.Namespace
if ns == "" {
ns = parentNamespace
}
secret = &corev1.Secret{}
err = client.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: secretRef.Name}, secret)
if err != nil {
srLogger.Error(err, "Failed to get secret ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
return nil, err
}
srLogger.Info("Secret found ", "Name:", secretRef.Name, " on namespace: ", secretRef.Namespace)
} else {
srLogger.Info("No secret defined", "parentNamespace", parentNamespace)
}
return secret, err
}
func DownloadChart(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
switch strings.ToLower(string(s.Spec.Source.SourceType)) {
case string(appv1alpha1.HelmRepoSourceType):
return DownloadChartFromHelmRepo(configMap, secret, chartsDir, s)
case string(appv1alpha1.GitHubSourceType):
return DownloadChartFromGitHub(configMap, secret, chartsDir, s)
default:
return "", fmt.Errorf("SourceType '%s' unsupported", s.Spec.Source.SourceType)
}
}
//DownloadChartFromGitHub downloads a chart into the charsDir
func DownloadChartFromGitHub(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) |
//DownloadChartFromHelmRepo downloads a chart into the charsDir
func DownloadChartFromHelmRepo(configMap *corev1.ConfigMap, secret *corev1.Secret, chartsDir string, s *appv1alpha1.HelmRelease) (chartDir string, err error) {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.HelmRepo == nil {
err := fmt.Errorf("HelmRepo type but Spec.HelmRepo is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
httpClient, err := GetHelmRepoClient(s.Namespace, configMap)
if err != nil {
srLogger.Error(err, "Failed to create httpClient ", "sr.Spec.SecretRef.Name", s.Spec.SecretRef.Name)
return "", err
}
var downloadErr error
for _, urlelem := range s.Spec.Source.HelmRepo.Urls {
var URLP *url.URL
URLP, downloadErr = url.Parse(urlelem)
if err != nil {
srLogger.Error(downloadErr, "url", urlelem)
continue
}
fileName := filepath.Base(URLP.Path)
// Create the file
chartZip := filepath.Join(chartsDir, fileName)
if _, err := os.Stat(chartZip); os.IsNotExist(err) {
var req *http.Request
req, downloadErr = http.NewRequest(http.MethodGet, urlelem, nil)
if downloadErr != nil {
srLogger.Error(downloadErr, "Can not build request: ", "urlelem", urlelem)
continue
}
if secret != nil && secret.Data != nil {
req.SetBasicAuth(string(secret.Data["user"]), string(secret.Data["password"]))
}
var resp *http.Response
resp, downloadErr = httpClient.Do(req)
if downloadErr != nil {
srLogger.Error(downloadErr, "Http request failed: ", "urlelem", urlelem)
continue
}
srLogger.Info("Get suceeded: ", "urlelem", urlelem)
defer resp.Body.Close()
var out *os.File
out, downloadErr = os.Create(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to create: ", "chartZip", chartZip)
continue
}
defer out.Close()
// Write the body to file
_, downloadErr = io.Copy(out, resp.Body)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to copy body: ", "chartZip", chartZip)
continue
}
}
var r *os.File
r, downloadErr = os.Open(chartZip)
if downloadErr != nil {
srLogger.Error(downloadErr, "Failed to open: ", "chartZip", chartZip)
continue
}
chartDirUnzip := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace)
chartDir = filepath.Join(chartDirUnzip, s.Spec.ChartName)
//Clean before untar
os.RemoveAll(chartDirUnzip)
downloadErr = Untar(chartDirUnzip, r)
if downloadErr != nil {
//Remove zip because failed to untar and so probably corrupted
os.RemoveAll(chartZip)
srLogger.Error(downloadErr, "Failed to unzip: ", "chartZip", chartZip)
continue
}
}
return chartDir, downloadErr
}
//Untar untars the reader into the dst directory
func Untar(dst string, r io.Reader) error {
srLogger := log.WithValues("destination", dst)
gzr, err := gzip.NewReader(r)
if err != nil {
srLogger.Error(err, "")
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
switch {
// if no more files are found return
case err == io.EOF:
return nil
// return any other error
case err != nil:
srLogger.Error(err, "")
return err
// if the header is nil, just skip it (not sure how this happens)
case header == nil:
continue
}
// the target location where the dir/file should be created
target := filepath.Join(dst, header.Name)
// the following switch could also be done using fi.Mode(), not sure if there
// a benefit of using one vs. the other.
// fi := header.FileInfo()
// check the file type
switch header.Typeflag {
// if its a dir and it doesn't exist create it
case tar.TypeDir:
if _, err := os.Stat(target); err != nil {
if err := os.MkdirAll(target, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
// if it's a file create it
case tar.TypeReg:
dir := filepath.Dir(target)
if _, err := os.Stat(dir); err != nil {
if err := os.MkdirAll(dir, 0755); err != nil {
srLogger.Error(err, "")
return err
}
}
f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
if err != nil {
srLogger.Error(err, "")
return err
}
// copy over contents
if _, err := io.Copy(f, tr); err != nil {
srLogger.Error(err, "")
return err
}
// manually close here after each file operation; defering would cause each file close
// to wait until all operations have completed.
f.Close()
}
}
}
| {
srLogger := log.WithValues("HelmRelease.Namespace", s.Namespace, "SubscrptionRelease.Name", s.Name)
if s.Spec.Source.GitHub == nil {
err := fmt.Errorf("GitHub type but Spec.GitHub is not defined")
return "", err
}
if _, err := os.Stat(chartsDir); os.IsNotExist(err) {
err := os.MkdirAll(chartsDir, 0755)
if err != nil {
srLogger.Error(err, "Unable to create chartDir: ", "chartsDir", chartsDir)
return "", err
}
}
destRepo := filepath.Join(chartsDir, s.Spec.ReleaseName, s.Namespace, s.Spec.ChartName)
for _, url := range s.Spec.Source.GitHub.Urls {
options := &git.CloneOptions{
URL: url,
Depth: 1,
SingleBranch: true,
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
}
if secret != nil && secret.Data != nil {
srLogger.Info("Add credentials")
options.Auth = &githttp.BasicAuth{
Username: string(secret.Data["user"]),
Password: string(secret.Data["password"]),
}
}
if s.Spec.Source.GitHub.Branch == "" {
options.ReferenceName = plumbing.Master
} else {
options.ReferenceName = plumbing.ReferenceName(s.Spec.Source.GitHub.Branch)
}
os.RemoveAll(chartDir)
_, err = git.PlainClone(destRepo, false, options)
if err != nil {
os.RemoveAll(destRepo)
srLogger.Error(err, "Clone failed", "url", url)
continue
}
}
if err != nil {
srLogger.Error(err, "All urls failed")
}
chartDir = filepath.Join(destRepo, s.Spec.Source.GitHub.ChartPath)
return chartDir, err
} | identifier_body |
ResNet50_augmentation.py | import pathlib, random, cv2
import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
import albumentations as A
from matplotlib import pyplot as plt
from functools import partial
from sklearn.model_selection import train_test_split
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
try:
print("Activate Multi GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
print(e)
else:
try:
print("Activate Sigle GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
except RuntimeError as e:
print(e)
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.cast(image, tf.float32) / 255.0
# image = (tf.cast(image, tf.float32) / 127.5) - 1
# image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes), dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {"image":image}
aug_data = transforms(**data)
aug_img = aug_data["image"]
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
# aug_img = tf.keras.applications.resnet.preprocess_input(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation: |
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name+'_0_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name+'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name='predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
def tf_data_visualize(augmentation_element, name):
row, col, idx = 5, 4, 0
row = min(row, BATCH_SIZE // col)
for (image, label) in augmentation_element:
print(image.shape, label.shape)
image = image / 255.0
plt.figure(figsize=(15, int(15 * row / col)))
for j in range(row * col):
plt.subplot(row, col, j + 1)
plt.axis('off')
plt.imshow(image[j, ])
# plt.savefig(f'{SAVED_PATH}/{LOG_TIME}/{name}_{idx}.jpg')
plt.show()
idx += 1
if idx == 3:
break
if __name__ == "__main__":
# hyper parameters
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMG_SIZE = 224
INPUT_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 32
EPOCHS = 1000
# learning rate scheduler
LR_START = 0.001
LR_MAX = 0.005 * strategy.num_replicas_in_sync
LR_MIN = 0.001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .8
# early stopping
PATIENCE = 3
EARLY_STOPPING = True
minimum_loss = float(2147000000)
total_images, total_labels, CLASSES = get_dataset('/home/v100/tf_workspace/datasets/natural_images/natural_images')
n_classes = len(CLASSES)
train_images, valid_images, train_labels, valid_labels = train_test_split(total_images, total_labels, test_size=.3, shuffle=True, random_state=777)
TRAIN_STEPS_PER_EPOCH = int(tf.math.ceil(len(train_images) / BATCH_SIZE).numpy())
VALID_STEP_PER_EPOCH = int(tf.math.ceil(len(valid_images) / BATCH_SIZE).numpy())
cost_fn = tf.keras.losses.CategoricalCrossentropy()
# optimizer = tf.keras.optimizers.Adam(learning_rate=lrfn)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
inputs = tf.keras.Input(shape=(INPUT_SHAPE))
model = ResNet50()
model(inputs=inputs)
model.summary()
# tf.keras.utils.plot_model(model, show_shapes=True)
train_acc = tf.metrics.CategoricalAccuracy()
train_loss = tf.metrics.CategoricalCrossentropy()
val_acc = tf.metrics.CategoricalAccuracy()
val_loss = tf.metrics.CategoricalCrossentropy()
transforms = A.Compose([
# A.Resize(IMG_SIZE, IMG_SIZE, 3, p=1),
A.OneOf([
A.HorizontalFlip(p=0.6),
A.VerticalFlip(p=0.6),
], p=0.7),
# A.Cutout(num_holes=15, max_h_size=4, max_w_size=4, fill_value=[0, 0, 0], p=0.7),
A.OneOf([
A.RandomRotate90(p=0.6),
A.ShiftScaleRotate(p=0.6, border_mode=1)
], p=0.7),
# A.RandomBrightness(limit=0.1, p=0.5),
# A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.5),
# A.RandomContrast(limit=0.2, p=0.5),
])
# tf_data_visualize(make_tf_data(train_images, train_labels, True), 'train')
stateful_matrices = ['train_acc', 'train_loss', 'valid_acc', 'valid_loss']
print()
print('Learning started. It takes sometime.')
for epoch in range(EPOCHS):
print("Current Learning Rate : ", optimizer._decayed_lr('float32').numpy())
tf.print("Epoch {}/{}".format(epoch + 1, EPOCHS))
prog_bar = tf.keras.utils.Progbar(target=TRAIN_STEPS_PER_EPOCH, stateful_metrics=stateful_matrices)
train_acc.reset_states()
train_loss.reset_states()
val_acc.reset_states()
val_loss.reset_states()
for idx, (images, labels) in enumerate(make_tf_data(train_images, train_labels, True)):
train(model, images, labels)
values=[('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy())]
prog_bar.update(idx, values=values)
if idx+1 >= TRAIN_STEPS_PER_EPOCH:
break
for idx, (images, labels) in enumerate(make_tf_data(valid_images, valid_labels, True)):
validation(model, images, labels)
if idx+1 >= VALID_STEP_PER_EPOCH:
break
values = [('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy()), ('valid_loss', val_loss.result().numpy()), ('valid_acc', val_acc.result().numpy())]
prog_bar.update(TRAIN_STEPS_PER_EPOCH, values=values, finalize=True)
if EARLY_STOPPING:
tmp_loss = (val_loss.result().numpy())
if tmp_loss < minimum_loss:
minimum_loss = tmp_loss
PATIENCE = 3
else:
PATIENCE -= 1
if PATIENCE == 0:
break
print('Learning Finished')
model.save('/home/v100/tf_workspace/model/resnet50_adam_he_l2_aug.h5') | dataset = dataset.map(partial(process_data), num_parallel_calls=AUTOTUNE) | random_line_split |
ResNet50_augmentation.py | import pathlib, random, cv2
import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
import albumentations as A
from matplotlib import pyplot as plt
from functools import partial
from sklearn.model_selection import train_test_split
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
try:
print("Activate Multi GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
print(e)
else:
try:
print("Activate Sigle GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
except RuntimeError as e:
print(e)
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.cast(image, tf.float32) / 255.0
# image = (tf.cast(image, tf.float32) / 127.5) - 1
# image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes), dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {"image":image}
aug_data = transforms(**data)
aug_img = aug_data["image"]
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
# aug_img = tf.keras.applications.resnet.preprocess_input(aug_img)
return aug_img
def process_data(image, label):
|
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name+'_0_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name+'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name='predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
def tf_data_visualize(augmentation_element, name):
row, col, idx = 5, 4, 0
row = min(row, BATCH_SIZE // col)
for (image, label) in augmentation_element:
print(image.shape, label.shape)
image = image / 255.0
plt.figure(figsize=(15, int(15 * row / col)))
for j in range(row * col):
plt.subplot(row, col, j + 1)
plt.axis('off')
plt.imshow(image[j, ])
# plt.savefig(f'{SAVED_PATH}/{LOG_TIME}/{name}_{idx}.jpg')
plt.show()
idx += 1
if idx == 3:
break
if __name__ == "__main__":
# hyper parameters
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMG_SIZE = 224
INPUT_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 32
EPOCHS = 1000
# learning rate scheduler
LR_START = 0.001
LR_MAX = 0.005 * strategy.num_replicas_in_sync
LR_MIN = 0.001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .8
# early stopping
PATIENCE = 3
EARLY_STOPPING = True
minimum_loss = float(2147000000)
total_images, total_labels, CLASSES = get_dataset('/home/v100/tf_workspace/datasets/natural_images/natural_images')
n_classes = len(CLASSES)
train_images, valid_images, train_labels, valid_labels = train_test_split(total_images, total_labels, test_size=.3, shuffle=True, random_state=777)
TRAIN_STEPS_PER_EPOCH = int(tf.math.ceil(len(train_images) / BATCH_SIZE).numpy())
VALID_STEP_PER_EPOCH = int(tf.math.ceil(len(valid_images) / BATCH_SIZE).numpy())
cost_fn = tf.keras.losses.CategoricalCrossentropy()
# optimizer = tf.keras.optimizers.Adam(learning_rate=lrfn)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
inputs = tf.keras.Input(shape=(INPUT_SHAPE))
model = ResNet50()
model(inputs=inputs)
model.summary()
# tf.keras.utils.plot_model(model, show_shapes=True)
train_acc = tf.metrics.CategoricalAccuracy()
train_loss = tf.metrics.CategoricalCrossentropy()
val_acc = tf.metrics.CategoricalAccuracy()
val_loss = tf.metrics.CategoricalCrossentropy()
transforms = A.Compose([
# A.Resize(IMG_SIZE, IMG_SIZE, 3, p=1),
A.OneOf([
A.HorizontalFlip(p=0.6),
A.VerticalFlip(p=0.6),
], p=0.7),
# A.Cutout(num_holes=15, max_h_size=4, max_w_size=4, fill_value=[0, 0, 0], p=0.7),
A.OneOf([
A.RandomRotate90(p=0.6),
A.ShiftScaleRotate(p=0.6, border_mode=1)
], p=0.7),
# A.RandomBrightness(limit=0.1, p=0.5),
# A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.5),
# A.RandomContrast(limit=0.2, p=0.5),
])
# tf_data_visualize(make_tf_data(train_images, train_labels, True), 'train')
stateful_matrices = ['train_acc', 'train_loss', 'valid_acc', 'valid_loss']
print()
print('Learning started. It takes sometime.')
for epoch in range(EPOCHS):
print("Current Learning Rate : ", optimizer._decayed_lr('float32').numpy())
tf.print("Epoch {}/{}".format(epoch + 1, EPOCHS))
prog_bar = tf.keras.utils.Progbar(target=TRAIN_STEPS_PER_EPOCH, stateful_metrics=stateful_matrices)
train_acc.reset_states()
train_loss.reset_states()
val_acc.reset_states()
val_loss.reset_states()
for idx, (images, labels) in enumerate(make_tf_data(train_images, train_labels, True)):
train(model, images, labels)
values=[('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy())]
prog_bar.update(idx, values=values)
if idx+1 >= TRAIN_STEPS_PER_EPOCH:
break
for idx, (images, labels) in enumerate(make_tf_data(valid_images, valid_labels, True)):
validation(model, images, labels)
if idx+1 >= VALID_STEP_PER_EPOCH:
break
values = [('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy()), ('valid_loss', val_loss.result().numpy()), ('valid_acc', val_acc.result().numpy())]
prog_bar.update(TRAIN_STEPS_PER_EPOCH, values=values, finalize=True)
if EARLY_STOPPING:
tmp_loss = (val_loss.result().numpy())
if tmp_loss < minimum_loss:
minimum_loss = tmp_loss
PATIENCE = 3
else:
PATIENCE -= 1
if PATIENCE == 0:
break
print('Learning Finished')
model.save('/home/v100/tf_workspace/model/resnet50_adam_he_l2_aug.h5') | aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label | identifier_body |
ResNet50_augmentation.py | import pathlib, random, cv2
import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
import albumentations as A
from matplotlib import pyplot as plt
from functools import partial
from sklearn.model_selection import train_test_split
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
try:
print("Activate Multi GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
print(e)
else:
try:
print("Activate Sigle GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
except RuntimeError as e:
print(e)
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.cast(image, tf.float32) / 255.0
# image = (tf.cast(image, tf.float32) / 127.5) - 1
# image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes), dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {"image":image}
aug_data = transforms(**data)
aug_img = aug_data["image"]
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
# aug_img = tf.keras.applications.resnet.preprocess_input(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name+'_0_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name+'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name='predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def | ():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
def tf_data_visualize(augmentation_element, name):
row, col, idx = 5, 4, 0
row = min(row, BATCH_SIZE // col)
for (image, label) in augmentation_element:
print(image.shape, label.shape)
image = image / 255.0
plt.figure(figsize=(15, int(15 * row / col)))
for j in range(row * col):
plt.subplot(row, col, j + 1)
plt.axis('off')
plt.imshow(image[j, ])
# plt.savefig(f'{SAVED_PATH}/{LOG_TIME}/{name}_{idx}.jpg')
plt.show()
idx += 1
if idx == 3:
break
if __name__ == "__main__":
# hyper parameters
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMG_SIZE = 224
INPUT_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 32
EPOCHS = 1000
# learning rate scheduler
LR_START = 0.001
LR_MAX = 0.005 * strategy.num_replicas_in_sync
LR_MIN = 0.001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .8
# early stopping
PATIENCE = 3
EARLY_STOPPING = True
minimum_loss = float(2147000000)
total_images, total_labels, CLASSES = get_dataset('/home/v100/tf_workspace/datasets/natural_images/natural_images')
n_classes = len(CLASSES)
train_images, valid_images, train_labels, valid_labels = train_test_split(total_images, total_labels, test_size=.3, shuffle=True, random_state=777)
TRAIN_STEPS_PER_EPOCH = int(tf.math.ceil(len(train_images) / BATCH_SIZE).numpy())
VALID_STEP_PER_EPOCH = int(tf.math.ceil(len(valid_images) / BATCH_SIZE).numpy())
cost_fn = tf.keras.losses.CategoricalCrossentropy()
# optimizer = tf.keras.optimizers.Adam(learning_rate=lrfn)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
inputs = tf.keras.Input(shape=(INPUT_SHAPE))
model = ResNet50()
model(inputs=inputs)
model.summary()
# tf.keras.utils.plot_model(model, show_shapes=True)
train_acc = tf.metrics.CategoricalAccuracy()
train_loss = tf.metrics.CategoricalCrossentropy()
val_acc = tf.metrics.CategoricalAccuracy()
val_loss = tf.metrics.CategoricalCrossentropy()
transforms = A.Compose([
# A.Resize(IMG_SIZE, IMG_SIZE, 3, p=1),
A.OneOf([
A.HorizontalFlip(p=0.6),
A.VerticalFlip(p=0.6),
], p=0.7),
# A.Cutout(num_holes=15, max_h_size=4, max_w_size=4, fill_value=[0, 0, 0], p=0.7),
A.OneOf([
A.RandomRotate90(p=0.6),
A.ShiftScaleRotate(p=0.6, border_mode=1)
], p=0.7),
# A.RandomBrightness(limit=0.1, p=0.5),
# A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.5),
# A.RandomContrast(limit=0.2, p=0.5),
])
# tf_data_visualize(make_tf_data(train_images, train_labels, True), 'train')
stateful_matrices = ['train_acc', 'train_loss', 'valid_acc', 'valid_loss']
print()
print('Learning started. It takes sometime.')
for epoch in range(EPOCHS):
print("Current Learning Rate : ", optimizer._decayed_lr('float32').numpy())
tf.print("Epoch {}/{}".format(epoch + 1, EPOCHS))
prog_bar = tf.keras.utils.Progbar(target=TRAIN_STEPS_PER_EPOCH, stateful_metrics=stateful_matrices)
train_acc.reset_states()
train_loss.reset_states()
val_acc.reset_states()
val_loss.reset_states()
for idx, (images, labels) in enumerate(make_tf_data(train_images, train_labels, True)):
train(model, images, labels)
values=[('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy())]
prog_bar.update(idx, values=values)
if idx+1 >= TRAIN_STEPS_PER_EPOCH:
break
for idx, (images, labels) in enumerate(make_tf_data(valid_images, valid_labels, True)):
validation(model, images, labels)
if idx+1 >= VALID_STEP_PER_EPOCH:
break
values = [('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy()), ('valid_loss', val_loss.result().numpy()), ('valid_acc', val_acc.result().numpy())]
prog_bar.update(TRAIN_STEPS_PER_EPOCH, values=values, finalize=True)
if EARLY_STOPPING:
tmp_loss = (val_loss.result().numpy())
if tmp_loss < minimum_loss:
minimum_loss = tmp_loss
PATIENCE = 3
else:
PATIENCE -= 1
if PATIENCE == 0:
break
print('Learning Finished')
model.save('/home/v100/tf_workspace/model/resnet50_adam_he_l2_aug.h5') | lrfn | identifier_name |
ResNet50_augmentation.py | import pathlib, random, cv2
import tensorflow as tf
import numpy as np
import tensorflow.keras.backend as K
import albumentations as A
from matplotlib import pyplot as plt
from functools import partial
from sklearn.model_selection import train_test_split
# GPU setup
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 1:
try:
print("Activate Multi GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
print(e)
else:
try:
print("Activate Sigle GPU")
tf.config.experimental.set_memory_growth(gpus[0], True)
strategy = tf.distribute.experimental.CentralStorageStrategy()
except RuntimeError as e:
print(e)
def preprocess_image(images):
image = tf.io.read_file(images)
image = tf.image.decode_jpeg(image, channels=3)
# image = tf.cast(image, tf.float32) / 255.0
# image = (tf.cast(image, tf.float32) / 127.5) - 1
# image = tf.image.per_image_standardization(image)
image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
return image
def get_dataset(ds_path):
ds_path = pathlib.Path(ds_path)
images = list(ds_path.glob('*/*.jpg'))
images = [str(path) for path in images]
total_images = len(images)
labels = sorted(item.name for item in ds_path.glob('*/') if item.is_dir())
classes = labels
labels = dict((name, index) for index, name in enumerate(labels))
labels = [labels[pathlib.Path(path).parent.name] for path in images]
labels = tf.keras.utils.to_categorical(labels, num_classes=len(classes), dtype='float32')
return images, labels, classes
def aug_fn(image):
data = {"image":image}
aug_data = transforms(**data)
aug_img = aug_data["image"]
aug_img = tf.cast(aug_img, tf.float32) / 255.0
aug_img = tf.image.per_image_standardization(aug_img)
# aug_img = tf.keras.applications.resnet.preprocess_input(aug_img)
return aug_img
def process_data(image, label):
aug_img = tf.numpy_function(func=aug_fn, inp=[image], Tout=tf.float32)
return aug_img, label
def make_tf_data(images, labels, augmentation):
images = tf.data.Dataset.from_tensor_slices(images)
images = images.map(preprocess_image, num_parallel_calls=AUTOTUNE)
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.repeat()
if augmentation:
dataset = dataset.map(partial(process_data), num_parallel_calls=AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def residual_block(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
if conv_shortcut:
shortcut = tf.keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name+'_0_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
shortcut = tf.keras.layers.BatchNormalization(axis=3, name=name+'_0_bn')(shortcut)
else:
shortcut = x
x = tf.keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_1_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = tf.keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_2_bn')(x)
x = tf.keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = tf.keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name=name + '_3_bn')(x)
x = tf.keras.layers.Add(name=name + '_add')([shortcut, x])
x = tf.keras.layers.Activation('relu', name=name + '_out')(x)
return x
def residual_stack(x, filters, blocks, stride1=2, name=None):
x = residual_block(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = residual_block(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
def ResNet50():
inputs = tf.keras.layers.Input(shape=INPUT_SHAPE)
x = tf.keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(inputs)
x = tf.keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv', kernel_initializer='he_uniform', bias_initializer='he_uniform', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(x)
x = tf.keras.layers.BatchNormalization(axis=3, name='conv1_bn')(x)
x = tf.keras.layers.Activation('relu', name='conv1_relu')(x)
x = tf.keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = tf.keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = residual_stack(x, 64, 3, stride1=1, name='conv2')
x = residual_stack(x, 128, 4, name='conv3')
x = residual_stack(x, 256, 6, name='conv4')
x = residual_stack(x, 512, 3, name='conv5')
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
outputs = tf.keras.layers.Dense(n_classes, activation='softmax', name='predictions')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
@tf.function
def train(model, images, labels):
with tf.GradientTape() as tape:
y_pred = model(images, training=True)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.trainable_variables))
train_acc.update_state(labels, y_pred)
train_loss.update_state(labels, y_pred)
@tf.function
def validation(model, images, labels):
y_pred = model(images, training=False)
loss = tf.reduce_mean(cost_fn(labels, y_pred))
val_acc.update_state(labels, y_pred)
val_loss.update_state(labels, y_pred)
def lrfn():
if epoch < LR_RAMPUP_EPOCHS:
lr = (LR_MAX - LR_START) / LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = (LR_MAX - LR_MIN) * LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) + LR_MIN
return lr
def tf_data_visualize(augmentation_element, name):
row, col, idx = 5, 4, 0
row = min(row, BATCH_SIZE // col)
for (image, label) in augmentation_element:
print(image.shape, label.shape)
image = image / 255.0
plt.figure(figsize=(15, int(15 * row / col)))
for j in range(row * col):
plt.subplot(row, col, j + 1)
plt.axis('off')
plt.imshow(image[j, ])
# plt.savefig(f'{SAVED_PATH}/{LOG_TIME}/{name}_{idx}.jpg')
plt.show()
idx += 1
if idx == 3:
break
if __name__ == "__main__":
# hyper parameters
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMG_SIZE = 224
INPUT_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
BATCH_SIZE = 32
EPOCHS = 1000
# learning rate scheduler
LR_START = 0.001
LR_MAX = 0.005 * strategy.num_replicas_in_sync
LR_MIN = 0.001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY = .8
# early stopping
PATIENCE = 3
EARLY_STOPPING = True
minimum_loss = float(2147000000)
total_images, total_labels, CLASSES = get_dataset('/home/v100/tf_workspace/datasets/natural_images/natural_images')
n_classes = len(CLASSES)
train_images, valid_images, train_labels, valid_labels = train_test_split(total_images, total_labels, test_size=.3, shuffle=True, random_state=777)
TRAIN_STEPS_PER_EPOCH = int(tf.math.ceil(len(train_images) / BATCH_SIZE).numpy())
VALID_STEP_PER_EPOCH = int(tf.math.ceil(len(valid_images) / BATCH_SIZE).numpy())
cost_fn = tf.keras.losses.CategoricalCrossentropy()
# optimizer = tf.keras.optimizers.Adam(learning_rate=lrfn)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00001)
inputs = tf.keras.Input(shape=(INPUT_SHAPE))
model = ResNet50()
model(inputs=inputs)
model.summary()
# tf.keras.utils.plot_model(model, show_shapes=True)
train_acc = tf.metrics.CategoricalAccuracy()
train_loss = tf.metrics.CategoricalCrossentropy()
val_acc = tf.metrics.CategoricalAccuracy()
val_loss = tf.metrics.CategoricalCrossentropy()
transforms = A.Compose([
# A.Resize(IMG_SIZE, IMG_SIZE, 3, p=1),
A.OneOf([
A.HorizontalFlip(p=0.6),
A.VerticalFlip(p=0.6),
], p=0.7),
# A.Cutout(num_holes=15, max_h_size=4, max_w_size=4, fill_value=[0, 0, 0], p=0.7),
A.OneOf([
A.RandomRotate90(p=0.6),
A.ShiftScaleRotate(p=0.6, border_mode=1)
], p=0.7),
# A.RandomBrightness(limit=0.1, p=0.5),
# A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.5),
# A.RandomContrast(limit=0.2, p=0.5),
])
# tf_data_visualize(make_tf_data(train_images, train_labels, True), 'train')
stateful_matrices = ['train_acc', 'train_loss', 'valid_acc', 'valid_loss']
print()
print('Learning started. It takes sometime.')
for epoch in range(EPOCHS):
print("Current Learning Rate : ", optimizer._decayed_lr('float32').numpy())
tf.print("Epoch {}/{}".format(epoch + 1, EPOCHS))
prog_bar = tf.keras.utils.Progbar(target=TRAIN_STEPS_PER_EPOCH, stateful_metrics=stateful_matrices)
train_acc.reset_states()
train_loss.reset_states()
val_acc.reset_states()
val_loss.reset_states()
for idx, (images, labels) in enumerate(make_tf_data(train_images, train_labels, True)):
train(model, images, labels)
values=[('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy())]
prog_bar.update(idx, values=values)
if idx+1 >= TRAIN_STEPS_PER_EPOCH:
break
for idx, (images, labels) in enumerate(make_tf_data(valid_images, valid_labels, True)):
validation(model, images, labels)
if idx+1 >= VALID_STEP_PER_EPOCH:
break
values = [('train_loss', train_loss.result().numpy()), ('train_acc', train_acc.result().numpy()), ('valid_loss', val_loss.result().numpy()), ('valid_acc', val_acc.result().numpy())]
prog_bar.update(TRAIN_STEPS_PER_EPOCH, values=values, finalize=True)
if EARLY_STOPPING:
tmp_loss = (val_loss.result().numpy())
if tmp_loss < minimum_loss:
minimum_loss = tmp_loss
PATIENCE = 3
else:
|
print('Learning Finished')
model.save('/home/v100/tf_workspace/model/resnet50_adam_he_l2_aug.h5') | PATIENCE -= 1
if PATIENCE == 0:
break | conditional_block |
sriov.go | // Copyright 2015 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ip"
"github.com/containernetworking/plugins/pkg/ipam"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/vishvananda/netlink"
)
type NetConf struct {
types.NetConf
VlanId int `json:"vlanId"`
MTU int `json:"mtu,omitempty"`
}
func init() {
// this ensures that main runs only on main thread (thread group leader).
// since namespace ops (unshare, setns) are done for a single thread, we
// must ensure that the goroutine does not jump from OS thread to thread
runtime.LockOSThread()
}
func loadConf(bytes []byte) (*NetConf, string, error) {
n := &NetConf{}
if err := json.Unmarshal(bytes, n); err != nil {
return nil, "", fmt.Errorf("failed to load netconf: %v", err)
}
if n.VlanId < 0 || n.VlanId > 4094 {
return nil, "", fmt.Errorf(`invalid VLAN ID %d (must be between 0 and 4095 inclusive)`, n.VlanId)
}
return n, n.CNIVersion, nil
}
func | (conf *NetConf, ifName string, netns ns.NetNS) (*current.Interface, error) {
vf := ¤t.Interface{}
// 申请一个可用的Virtual Function
m, vfIdx, vfDevName, err := allocFreeVF()
if err != nil {
return nil, err
}
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV 成功申请%v网卡的第%v个VF, 名称为: %v\n", m.Attrs().Name, vfIdx, vfDevName)
vfDev, err := netlink.LinkByName(vfDevName)
if err != nil {
return nil, fmt.Errorf("failed to lookup vf device %q: %v", vfDevName, err)
}
if conf.MTU <= 0 {
conf.MTU = m.Attrs().MTU
}
if err = netlink.LinkSetVfHardwareAddr(m, vfIdx, vfDev.Attrs().HardwareAddr); err != nil {
return nil, fmt.Errorf("failed to set vf %d macaddress: %v", vfIdx, err)
}
if err = netlink.LinkSetVfVlan(m, vfIdx, conf.VlanId); err != nil {
return nil, fmt.Errorf("failed to set vf %d vlan: %v", vfIdx, err)
}
if err = netlink.LinkSetUp(vfDev); err != nil {
return nil, fmt.Errorf("failed to setup vf %d device: %v", vfIdx, err)
}
// move VF device to ns
if err = netlink.LinkSetNsFd(vfDev, int(netns.Fd())); err != nil {
return nil, fmt.Errorf("failed to move vf %d to netns: %v", vfIdx, err)
}
err = netns.Do(func(_ ns.NetNS) error {
err := ip.RenameLink(vfDevName, ifName)
if err != nil {
return fmt.Errorf("failed to rename vlan to %q: %v", ifName, err)
}
vf.Name = ifName
// Re-fetch interface to get all properties/attributes
contVF, err := netlink.LinkByName(vf.Name)
if err != nil {
return fmt.Errorf("failed to refetch vlan %q: %v", vf.Name, err)
}
vf.Mac = contVF.Attrs().HardwareAddr.String()
vf.Sandbox = netns.Path()
return nil
})
if err != nil {
return nil, err
}
return vf, nil
}
func allocFreeVF() (netlink.Link, int, string, error) {
vfIdx := -1
devName := ""
// 获取机器可用物理网卡(PF)列表
links, err := netlink.LinkList()
if err != nil {
return nil, -1, "", fmt.Errorf("获取可用物理网卡失败: %v", err)
}
for _, link := range links {
if link.Type() == "device" && link.Attrs().OperState == netlink.OperUp {
master := link.Attrs().Name
sriovFile := fmt.Sprintf("/sys/class/net/%s/device/sriov_numvfs", master)
if _, err := os.Lstat(sriovFile); err != nil {
return nil, -1, "", fmt.Errorf("failed to open the sriov_numfs of device %q: %v", master, err)
}
data, err := ioutil.ReadFile(sriovFile)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to read the sriov_numfs of device %q: %v", master, err)
}
if len(data) == 0 {
return nil, -1, "", fmt.Errorf("no data in the file %q", sriovFile)
}
sriovNumfs := strings.TrimSpace(string(data))
vfTotal, err := strconv.Atoi(sriovNumfs)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to convert sriov_numfs(byte value) to int of device %q: %v", master, err)
}
if vfTotal <= 0 {
return nil, -1, "", fmt.Errorf("no virtual function in the device %q: %v", master)
}
for vf := 0; vf < vfTotal; vf++ {
devName, err = getVFDeviceName(master, vf)
// got a free vf
if err == nil {
vfIdx = vf
break
}
}
if vfIdx == -1 {
return nil, -1, "", fmt.Errorf("can not get a free virtual function in directory %s", master)
}
return link, vfIdx, devName, nil
}
}
return nil, vfIdx, devName, fmt.Errorf("该主机无可用物理网卡")
}
func getVFDeviceName(master string, vf int) (string, error) {
vfDir := fmt.Sprintf("/sys/class/net/%s/device/virtfn%d/net", master, vf)
if _, err := os.Lstat(vfDir); err != nil {
return "", fmt.Errorf("failed to open the virtfn%d dir of the device %q: %v", vf, master, err)
}
infos, err := ioutil.ReadDir(vfDir)
if err != nil {
return "", fmt.Errorf("failed to read the virtfn%d dir of the device %q: %v", vf, master, err)
}
if len(infos) != 1 {
return "", fmt.Errorf("no network device in directory %s", vfDir)
}
return infos[0].Name(), nil
}
func releaseVF(conf *NetConf, ifName string, netns ns.NetNS) error {
initns, err := ns.GetCurrentNS()
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF initns = %v\n", initns)
if err != nil {
return fmt.Errorf("failed to get init netns: %v", err)
}
// for IPAM in cmdDel
return netns.Do(func(_ ns.NetNS) error {
// get VF device
vfDev, err := netlink.LinkByName(ifName)
if err != nil {
return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
}
// device name in init netns
index := vfDev.Attrs().Index
devName := fmt.Sprintf("dev%d", index)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF index = %v devName = %v vfDev = %v\n", index, devName, vfDev)
// shutdown VF device
if err = netlink.LinkSetDown(vfDev); err != nil {
return fmt.Errorf("failed to down device: %v", err)
}
// rename VF device
err = ip.RenameLink(ifName, devName)
if err != nil {
return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
}
// move VF device to init netns
if err = netlink.LinkSetNsFd(vfDev, int(initns.Fd())); err != nil {
return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
}
return nil
})
}
func cmdAdd(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.StdinData = %v\n", string(args.StdinData))
n, cniVersion, err := loadConf(args.StdinData)
if err != nil {
return err
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
vfInterface, err := setupVF(n, args.IfName, netns)
if err != nil {
return err
}
// run the IPAM plugin and get back the config to apply
r, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// Convert whatever the IPAM result was into the current Result type
result, err := current.NewResultFromResult(r)
if err != nil {
return err
}
if len(result.IPs) == 0 {
return errors.New("IPAM plugin returned missing IP config")
}
for _, ipc := range result.IPs {
// All addresses belong to the vlan interface
ipc.Interface = current.Int(0)
}
result.Interfaces = []*current.Interface{vfInterface}
err = netns.Do(func(_ ns.NetNS) error {
return ipam.ConfigureIface(args.IfName, result)
})
if err != nil {
return err
}
result.DNS = n.DNS
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd result = %v\n", result)
return types.PrintResult(result, cniVersion)
}
func cmdDel(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.StdinData = %v\n", string(args.StdinData))
n, _, err := loadConf(args.StdinData)
if err != nil {
return err
}
if args.Netns == "" {
return nil
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
if err = releaseVF(n, args.IfName, netns); err != nil {
return err
}
err = ipam.ExecDel(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// err = ns.WithNetNSPath(args.Netns, func(_ ns.NetNS) error {
// // get VF device
// vfDev, err := netlink.LinkByName(ifName)
// if err != nil {
// return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
// }
// // device name in init netns
// index := vfDev.Attrs().Index
// devName := fmt.Sprintf("%s_%d", n.Master, index)
// // shutdown VF device
// if err = netlink.LinkSetDown(vfDev); err != nil {
// return fmt.Errorf("failed to down device: %v", err)
// }
// // rename VF device
// err = ip.RenameLink(ifName, devName)
// if err != nil {
// return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
// }
// // move VF device to init netns
// // if err = netlink.LinkSetNsFd(vfDev, int(ns.Fd())); err != nil {
// // return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
// // }
// _, err = ip.DelLinkByNameAddr(ifName, netlink.FAMILY_V4)
// // FIXME: use ip.ErrLinkNotFound when cni is revendored
// if err != nil && err.Error() == "Link not found" {
// return nil
// }
// return err
// })
return err
}
func main() {
skel.PluginMain(cmdAdd, cmdDel, version.All)
}
| setupVF | identifier_name |
sriov.go | // Copyright 2015 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ip"
"github.com/containernetworking/plugins/pkg/ipam"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/vishvananda/netlink"
)
type NetConf struct {
types.NetConf
VlanId int `json:"vlanId"`
MTU int `json:"mtu,omitempty"`
}
func init() {
// this ensures that main runs only on main thread (thread group leader).
// since namespace ops (unshare, setns) are done for a single thread, we
// must ensure that the goroutine does not jump from OS thread to thread
runtime.LockOSThread()
}
func loadConf(bytes []byte) (*NetConf, string, error) {
n := &NetConf{}
if err := json.Unmarshal(bytes, n); err != nil {
return nil, "", fmt.Errorf("failed to load netconf: %v", err)
}
if n.VlanId < 0 || n.VlanId > 4094 {
return nil, "", fmt.Errorf(`invalid VLAN ID %d (must be between 0 and 4095 inclusive)`, n.VlanId)
}
return n, n.CNIVersion, nil
}
func setupVF(conf *NetConf, ifName string, netns ns.NetNS) (*current.Interface, error) {
vf := ¤t.Interface{}
// 申请一个可用的Virtual Function
m, vfIdx, vfDevName, err := allocFreeVF()
if err != nil {
return nil, err
}
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV 成功申请%v网卡的第%v个VF, 名称为: %v\n", m.Attrs().Name, vfIdx, vfDevName)
vfDev, err := netlink.LinkByName(vfDevName)
if err != nil {
return nil, fmt.Errorf("failed to lookup vf device %q: %v", vfDevName, err)
}
if conf.MTU <= 0 {
conf.MTU = m.Attrs().MTU
}
if err = netlink.LinkSetVfHardwareAddr(m, vfIdx, vfDev.Attrs().HardwareAddr); err != nil {
return nil, fmt.Errorf("failed to set vf %d macaddress: %v", vfIdx, err)
}
if err = netlink.LinkSetVfVlan(m, vfIdx, conf.VlanId); err != nil {
return nil, fmt.Errorf("failed to set vf %d vlan: %v", vfIdx, err)
}
if err = netlink.LinkSetUp(vfDev); err != nil {
return nil, fmt.Errorf("failed to setup vf %d device: %v", vfIdx, err)
}
// move VF device to ns
if err = netlink.LinkSetNsFd(vfDev, int(netns.Fd())); err != nil {
return nil, fmt.Errorf("failed to move vf %d to netns: %v", vfIdx, err)
}
err = netns.Do(func(_ ns.NetNS) error {
err := ip.RenameLink(vfDevName, ifName)
if err != nil {
return fmt.Errorf("failed to rename vlan to %q: %v", ifName, err)
}
vf.Name = ifName
// Re-fetch interface to get all properties/attributes
contVF, err := netlink.LinkByName(vf.Name)
if err != nil {
return fmt.Errorf("failed to refetch vlan %q: %v", vf.Name, err)
}
vf.Mac = contVF.Attrs().HardwareAddr.String()
vf.Sandbox = netns.Path()
return nil
})
if err != nil {
return nil, err
}
return vf, nil
}
func allocFreeVF() (netlink.Link, int, string, error) {
vfIdx := -1
devName := ""
// 获取机器可用物理网卡(PF)列表
links, err := netlink.LinkList()
if err != nil {
return nil, -1, "", fmt.Errorf("获取可用物理网卡失败: %v", err)
}
for _, link := range links {
if link.Type() == "device" && link.Attrs().OperState == netlink.OperUp {
master := link.Attrs().Name
sriovFile := fmt.Sprintf("/sys/class/net/%s/device/sriov_numvfs", master)
if _, err := os.Lstat(sriovFile); err != nil {
return nil, -1, "", fmt.Errorf("failed to open the sriov_numfs of device %q: %v", master, err)
}
data, err := ioutil.ReadFile(sriovFile)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to read the sriov_numfs of device %q: %v", master, err)
}
if len(data) == 0 {
return nil, -1, "", fmt.Errorf("no data in the file %q", sriovFile)
}
sriovNumfs := strings.TrimSpace(string(data))
vfTotal, err := strconv.Atoi(sriovNumfs)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to convert sriov_numfs(byte value) to int of device %q: %v", master, err)
}
if vfTotal <= 0 {
return nil, -1, "", fmt.Errorf("no virtual function in the device %q: %v", master)
}
for vf := 0; vf < vfTotal; vf++ {
devName, err = getVFDeviceName(master, vf)
// got a free vf
if err == nil {
vfIdx = vf
break
}
}
if vfIdx == -1 {
return nil, -1, "", fmt.Errorf("can not get a free virtual function in directory %s", master)
}
return link, vfIdx, devName, nil
}
}
return nil, vfIdx, devName, fmt.Errorf("该主机无可用物理网卡")
}
func getVFDeviceName(master string, vf int) (string, error) {
vfDir := fmt.Sprintf("/sys/class/net/%s/device/virtfn%d/net", master, vf)
if _, err := os.Lstat(vfDir); err != nil {
return "", fmt.Errorf("failed to open the virtfn%d dir of the device %q: %v", vf, master, err)
}
infos, err := ioutil.ReadDir(vfDir)
if err != nil {
return "", fmt.Errorf("failed to read the virtfn%d dir of the device %q: %v", vf, master, err)
}
if len(infos) != 1 {
return "", fmt.Errorf("no network device in directory %s", vfDir)
}
return infos[0].Name(), nil
}
func releaseVF(conf *NetConf, ifName string, netns ns.NetNS) error {
initns, err := ns.GetCurrentNS()
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF initns = %v\n", initns)
if err != nil {
return fmt.Errorf("failed to get init netns: %v", err)
}
// for IPAM in cmdDel
return netns.Do(func(_ ns.NetNS) error {
// get VF device
vfDev, err := netlink.LinkByName(ifName)
if err != nil {
return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
}
// device name in init netns
index := vfDev.Attrs().Index
devName := fmt.Sprintf("dev%d", index)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF index = %v devName = %v vfDev = %v\n", index, devName, vfDev)
// shutdown VF device
if err = netlink.LinkSetDown(vfDev); err != nil {
return fmt.Errorf("failed to down device: %v", err)
}
// rename VF device
err = ip.RenameLink(ifName, devName)
if err != nil {
return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
}
// move VF device to init netns
if err = netlink.LinkSetNsFd(vfDev, int(initns.Fd())); err != nil {
return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
}
return nil
})
}
func cmdAdd(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.StdinData = %v\n", string(args.StdinData))
n, cniVersion, err := loadConf(args.StdinData)
if err != nil {
return err
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
vfInterface, err := setupVF(n, args.IfName, netns)
if err != nil {
return err
}
// run the IPAM plugin and get back the config to apply
r, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// Convert whatever the IPAM result was into the current Result type
result, err := current.NewResultFromResult(r)
if err != nil {
return err
}
if len(result.IPs) == 0 {
return errors.New("IPAM plugin returned missing IP config")
}
for _, ipc := range result.IPs {
// All addresses belong to the vlan interface
ipc.Interface = current.Int(0)
}
result.Interfaces = []*current.Interface{vfInterface}
err = netns.Do(func(_ ns.NetNS) error {
return ipam.ConfigureIface(args.IfName, result)
})
if err != nil {
return err
}
| }
func cmdDel(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.StdinData = %v\n", string(args.StdinData))
n, _, err := loadConf(args.StdinData)
if err != nil {
return err
}
if args.Netns == "" {
return nil
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
if err = releaseVF(n, args.IfName, netns); err != nil {
return err
}
err = ipam.ExecDel(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// err = ns.WithNetNSPath(args.Netns, func(_ ns.NetNS) error {
// // get VF device
// vfDev, err := netlink.LinkByName(ifName)
// if err != nil {
// return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
// }
// // device name in init netns
// index := vfDev.Attrs().Index
// devName := fmt.Sprintf("%s_%d", n.Master, index)
// // shutdown VF device
// if err = netlink.LinkSetDown(vfDev); err != nil {
// return fmt.Errorf("failed to down device: %v", err)
// }
// // rename VF device
// err = ip.RenameLink(ifName, devName)
// if err != nil {
// return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
// }
// // move VF device to init netns
// // if err = netlink.LinkSetNsFd(vfDev, int(ns.Fd())); err != nil {
// // return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
// // }
// _, err = ip.DelLinkByNameAddr(ifName, netlink.FAMILY_V4)
// // FIXME: use ip.ErrLinkNotFound when cni is revendored
// if err != nil && err.Error() == "Link not found" {
// return nil
// }
// return err
// })
return err
}
func main() {
skel.PluginMain(cmdAdd, cmdDel, version.All)
} | result.DNS = n.DNS
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd result = %v\n", result)
return types.PrintResult(result, cniVersion) | random_line_split |
sriov.go | // Copyright 2015 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ip"
"github.com/containernetworking/plugins/pkg/ipam"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/vishvananda/netlink"
)
type NetConf struct {
types.NetConf
VlanId int `json:"vlanId"`
MTU int `json:"mtu,omitempty"`
}
func init() {
// this ensures that main runs only on main thread (thread group leader).
// since namespace ops (unshare, setns) are done for a single thread, we
// must ensure that the goroutine does not jump from OS thread to thread
runtime.LockOSThread()
}
func loadConf(bytes []byte) (*NetConf, string, error) {
n := &NetConf{}
if err := json.Unmarshal(bytes, n); err != nil {
return nil, "", fmt.Errorf("failed to load netconf: %v", err)
}
if n.VlanId < 0 || n.VlanId > 4094 {
return nil, "", fmt.Errorf(`invalid VLAN ID %d (must be between 0 and 4095 inclusive)`, n.VlanId)
}
return n, n.CNIVersion, nil
}
func setupVF(conf *NetConf, ifName string, netns ns.NetNS) (*current.Interface, error) {
vf := ¤t.Interface{}
// 申请一个可用的Virtual Function
m, vfIdx, vfDevName, err := allocFreeVF()
if err != nil {
return nil, err
}
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV 成功申请%v网卡的第%v个VF, 名称为: %v\n", m.Attrs().Name, vfIdx, vfDevName)
vfDev, err := netlink.LinkByName(vfDevName)
if err != nil {
return nil, fmt.Errorf("failed to lookup vf device %q: %v", vfDevName, err)
}
if conf.MTU <= 0 {
conf.MTU = m.Attrs().MTU
}
if err = netlink.LinkSetVfHardwareAddr(m, vfIdx, vfDev.Attrs().HardwareAddr); err != nil {
return nil, fmt.Errorf("failed to set vf %d macaddress: %v", vfIdx, err)
}
if err = netlink.LinkSetVfVlan(m, vfIdx, conf.VlanId); err != nil {
return nil, fmt.Errorf("failed to set vf %d vlan: %v", vfIdx, err)
}
if err = netlink.LinkSetUp(vfDev); err != nil {
return nil, fmt.Errorf("failed to setup vf %d device: %v", vfIdx, err)
}
// move VF device to ns
if err = netlink.LinkSetNsFd(vfDev, int(netns.Fd())); err != nil {
return nil, fmt.Errorf("failed to move vf %d to netns: %v", vfIdx, err)
}
err = netns.Do(func(_ ns.NetNS) error {
err := ip.RenameLink(vfDevName, ifName)
if err != nil {
return fmt.Errorf("failed to rename vlan to %q: %v", ifName, err)
}
vf.Name = ifName
// Re-fetch interface to get all properties/attributes
contVF, err := netlink.LinkByName(vf.Name)
if err != nil {
return fmt.Errorf("failed to refetch vlan %q: %v", vf.Name, err)
}
vf.Mac = contVF.Attrs().HardwareAddr.String()
vf.Sandbox = netns.Path()
return nil
})
if err != nil {
return nil, err
}
return vf, nil
}
func allocFreeVF() (netlink.Link, int, string, error) {
vfIdx := -1
devName := ""
// 获取机器可用物理网卡(PF)列表
links, err := netlink.LinkList()
if err != nil {
return nil, -1, "", fmt.Errorf("获取可用物理网卡失败: %v", err)
}
for _, link := range links {
if link.Type() == "device" && link.Attrs().OperState == netlink.OperUp {
master := link.Attrs().Name
sriovFile := fmt.Sprintf("/sys/class/net/%s/device/sriov_numvfs", master)
if _, err := os.Lstat(sriovFile); err != nil {
return nil, -1, "", fmt.Errorf("failed to open the sriov_numfs of device %q: %v", master, err)
}
data, err := ioutil.ReadFile(sriovFile)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to read the sriov_numfs of device %q: %v", master, err)
}
if len(data) == 0 {
return nil, -1, "", fmt.Errorf("no data in the file %q", sriovFile)
}
sriovNumfs := strings.TrimSpace(string(data))
vfTotal, err := strconv.Atoi(sriovNumfs)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to convert sriov_numfs(byte value) to int of device %q: %v", master, err)
}
if vfTotal <= 0 {
return nil, -1, "", fmt.Errorf("no virtual function in the device %q: %v", master)
}
for vf := 0; vf < vfTotal; vf++ {
devName, err = getVFDeviceName(master, vf)
// got a free vf
if err == nil {
vfIdx = vf
break
}
}
if vfIdx == -1 {
return nil, -1, "", fmt.Errorf("can not get a free virtual function in directory %s", master)
}
return link, vfIdx, devName, nil
}
}
return nil, vfIdx, devName, fmt.Errorf("该主机无可用物理网卡")
}
func getVFDeviceName(master string, vf int) (string, error) {
vfDir := fmt.Sprintf("/sys/class/net/%s/device/virtfn%d/net", master, vf)
if _, err := os.Lstat(vfDir); err != nil {
return "", fmt.Errorf("failed to open the virtfn%d dir of the device %q: %v", vf, master, err)
}
infos, err := ioutil.ReadDir(vfDir)
if err != nil {
return "", fmt.Errorf("failed to read the virtfn%d dir of the device %q: %v", vf, master, err)
}
if len(infos) != 1 {
return "", fmt.Errorf("no network device in directory %s", vfDir)
}
return infos[0].Name(), nil
}
func releaseVF(conf *NetConf, ifName string, netns ns.NetNS) error {
initns, err := ns.GetCurrentNS()
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF initns = %v\n", initns)
if err != nil {
return fmt.Errorf("failed to get init netns: %v", err)
}
// for IPAM in cmdDel
return netns.Do(func(_ ns.NetNS) error {
// get VF device
vfDev, err := netlink.LinkByName(ifName)
if err != nil {
return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
}
// device name in init netns
index := vfDev.Attrs().Index
devName := fmt.Sprintf("dev%d", index)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF index = %v devName = %v vfDev = %v\n", index, devName, vfDev)
// shutdown VF device
if err = netlink.LinkSetDown(vfDev); err != nil {
return fmt.Errorf("failed to down device: %v", err)
}
// rename VF device
err = ip.RenameLink(ifName, devName)
if err != nil {
return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
}
// move VF device to init netns
if err = netlink.LinkSetNsFd(vfDev, int(initns.Fd())); err != nil {
return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
}
return nil
})
}
func cmdAdd(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.StdinData = %v\n", string(args.StdinData))
n, cniVersion, err := loadConf(args.StdinData)
if err != nil {
return err
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
vfInterface, err := setupVF(n, args.IfName, netns)
if err != nil {
return err
}
// run the IPAM plugin and get back the config to apply
r, err := ipam.ExecAdd(n. | tdinData)
if err != nil {
return err
}
// Convert whatever the IPAM result was into the current Result type
result, err := current.NewResultFromResult(r)
if err != nil {
return err
}
if len(result.IPs) == 0 {
return errors.New("IPAM plugin returned missing IP config")
}
for _, ipc := range result.IPs {
// All addresses belong to the vlan interface
ipc.Interface = current.Int(0)
}
result.Interfaces = []*current.Interface{vfInterface}
err = netns.Do(func(_ ns.NetNS) error {
return ipam.ConfigureIface(args.IfName, result)
})
if err != nil {
return err
}
result.DNS = n.DNS
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd result = %v\n", result)
return types.PrintResult(result, cniVersion)
}
func cmdDel(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.StdinData = %v\n", string(args.StdinData))
n, _, err := loadConf(args.StdinData)
if err != nil {
return err
}
if args.Netns == "" {
return nil
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
if err = releaseVF(n, args.IfName, netns); err != nil {
return err
}
err = ipam.ExecDel(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// err = ns.WithNetNSPath(args.Netns, func(_ ns.NetNS) error {
// // get VF device
// vfDev, err := netlink.LinkByName(ifName)
// if err != nil {
// return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
// }
// // device name in init netns
// index := vfDev.Attrs().Index
// devName := fmt.Sprintf("%s_%d", n.Master, index)
// // shutdown VF device
// if err = netlink.LinkSetDown(vfDev); err != nil {
// return fmt.Errorf("failed to down device: %v", err)
// }
// // rename VF device
// err = ip.RenameLink(ifName, devName)
// if err != nil {
// return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
// }
// // move VF device to init netns
// // if err = netlink.LinkSetNsFd(vfDev, int(ns.Fd())); err != nil {
// // return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
// // }
// _, err = ip.DelLinkByNameAddr(ifName, netlink.FAMILY_V4)
// // FIXME: use ip.ErrLinkNotFound when cni is revendored
// if err != nil && err.Error() == "Link not found" {
// return nil
// }
// return err
// })
return err
}
func main() {
skel.PluginMain(cmdAdd, cmdDel, version.All)
}
| IPAM.Type, args.S | conditional_block |
sriov.go | // Copyright 2015 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"runtime"
"strconv"
"strings"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
"github.com/containernetworking/plugins/pkg/ip"
"github.com/containernetworking/plugins/pkg/ipam"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/vishvananda/netlink"
)
type NetConf struct {
types.NetConf
VlanId int `json:"vlanId"`
MTU int `json:"mtu,omitempty"`
}
func init() {
// this ensures that main runs only on main thread (thread group leader).
// since namespace ops (unshare, setns) are done for a single thread, we
// must ensure that the goroutine does not jump from OS thread to thread
runtime.LockOSThread()
}
func loadConf(bytes []byte) (*NetConf, string, error) {
n := &NetConf{}
if err := json.Unmarshal(bytes, n); err != nil {
return nil, "", fmt.Errorf("failed to load netconf: %v", err)
}
if n.VlanId < 0 || n.VlanId > 4094 {
return nil, "", fmt.Errorf(`invalid VLAN ID %d (must be between 0 and 4095 inclusive)`, n.VlanId)
}
return n, n.CNIVersion, nil
}
func setupVF(conf *NetConf, ifName string, netns ns.NetNS) (*current.Interface, error) {
vf := ¤t.Interface{}
// 申请一个可用的Virtual Function
m, vfIdx, vfDevName, err := allocFreeVF()
if err != nil {
return nil, err
}
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV 成功申请%v网卡的第%v个VF, 名称为: %v\n", m.Attrs().Name, vfIdx, vfDevName)
vfDev, err := netlink.LinkByName(vfDevName)
if err != nil {
return nil, fmt.Errorf("failed to lookup vf device %q: %v", vfDevName, err)
}
if conf.MTU <= 0 {
conf.MTU = m.Attrs().MTU
}
if err = netlink.LinkSetVfHardwareAddr(m, vfIdx, vfDev.Attrs().HardwareAddr); err != nil {
return nil, fmt.Errorf("failed to set vf %d macaddress: %v", vfIdx, err)
}
if err = netlink.LinkSetVfVlan(m, vfIdx, conf.VlanId); err != nil {
return nil, fmt.Errorf("failed to set vf %d vlan: %v", vfIdx, err)
}
if err = netlink.LinkSetUp(vfDev); err != nil {
return nil, fmt.Errorf("failed to setup vf %d device: %v", vfIdx, err)
}
// move VF device to ns
if err = netlink.LinkSetNsFd(vfDev, int(netns.Fd())); err != nil {
return nil, fmt.Errorf("failed to move vf %d to netns: %v", vfIdx, err)
}
err = netns.Do(func(_ ns.NetNS) error {
err := ip.RenameLink(vfDevName, ifName)
if err != nil {
return fmt.Errorf("failed to rename vlan to %q: %v", ifName, err)
}
vf.Name = ifName
// Re-fetch interface to get all properties/attributes
contVF, err := netlink.LinkByName(vf.Name)
if err != nil {
return fmt.Errorf("failed to refetch vlan %q: %v", vf.Name, err)
}
vf.Mac = contVF.Attrs().HardwareAddr.String()
vf.Sandbox = netns.Path()
return nil
})
if err != nil {
return nil, err
}
return vf, nil
}
func allocFreeVF() (netlink.Link, int, string, error) {
vfIdx := -1
devName := ""
// 获取机器可用物理网卡(PF)列表
links, err := netlink.LinkList()
if err != nil {
return nil, -1, "", fmt.Errorf("获取可用物理网卡失败: %v", err)
}
for _, link := range links {
if link.Type() == "device" && link.Attrs().OperState == netlink.OperUp {
master := link.Attrs().Name
sriovFile := fmt.Sprintf("/sys/class/net/%s/device/sriov_numvfs", master)
if _, err := os.Lstat(sriovFile); err != nil {
return nil, -1, "", fmt.Errorf("failed to open the sriov_numfs of device %q: %v", master, err)
}
data, err := ioutil.ReadFile(sriovFile)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to read the sriov_numfs of device %q: %v", master, err)
}
if len(data) == 0 {
return nil, -1, "", fmt.Errorf("no data in the file %q", sriovFile)
}
sriovNumfs := strings.TrimSpace(string(data))
vfTotal, err := strconv.Atoi(sriovNumfs)
if err != nil {
return nil, -1, "", fmt.Errorf("failed to convert sriov_numfs(byte value) to int of device %q: %v", master, err)
}
if vfTotal <= 0 {
return nil, -1, "", fmt.Errorf("no virtual function in the device %q: %v", master)
}
for vf := 0; vf < vfTotal; vf++ {
devName, err = getVFDeviceName(master, vf)
// got a free vf
if err == nil {
vfIdx = vf
break
}
}
if vfIdx == -1 {
return nil, -1, "", fmt.Errorf("can not get a free virtual function in directory %s", master)
}
return link, vfIdx, devName, nil
}
}
return nil, vfIdx, devName, fmt.Errorf("该主机无可用物理网卡")
}
func getVFDeviceName(master string, vf int) (string, error) {
vfDir := fmt.Sprintf("/sys/class/net/%s/device/virtfn%d/net", master, vf)
if _, err := os.Lstat(vf | ()
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF initns = %v\n", initns)
if err != nil {
return fmt.Errorf("failed to get init netns: %v", err)
}
// for IPAM in cmdDel
return netns.Do(func(_ ns.NetNS) error {
// get VF device
vfDev, err := netlink.LinkByName(ifName)
if err != nil {
return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
}
// device name in init netns
index := vfDev.Attrs().Index
devName := fmt.Sprintf("dev%d", index)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV releaseVF index = %v devName = %v vfDev = %v\n", index, devName, vfDev)
// shutdown VF device
if err = netlink.LinkSetDown(vfDev); err != nil {
return fmt.Errorf("failed to down device: %v", err)
}
// rename VF device
err = ip.RenameLink(ifName, devName)
if err != nil {
return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
}
// move VF device to init netns
if err = netlink.LinkSetNsFd(vfDev, int(initns.Fd())); err != nil {
return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
}
return nil
})
}
func cmdAdd(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd args.StdinData = %v\n", string(args.StdinData))
n, cniVersion, err := loadConf(args.StdinData)
if err != nil {
return err
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
vfInterface, err := setupVF(n, args.IfName, netns)
if err != nil {
return err
}
// run the IPAM plugin and get back the config to apply
r, err := ipam.ExecAdd(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// Convert whatever the IPAM result was into the current Result type
result, err := current.NewResultFromResult(r)
if err != nil {
return err
}
if len(result.IPs) == 0 {
return errors.New("IPAM plugin returned missing IP config")
}
for _, ipc := range result.IPs {
// All addresses belong to the vlan interface
ipc.Interface = current.Int(0)
}
result.Interfaces = []*current.Interface{vfInterface}
err = netns.Do(func(_ ns.NetNS) error {
return ipam.ConfigureIface(args.IfName, result)
})
if err != nil {
return err
}
result.DNS = n.DNS
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdAdd result = %v\n", result)
return types.PrintResult(result, cniVersion)
}
func cmdDel(args *skel.CmdArgs) error {
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.ContainerID = %v\n", args.ContainerID)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Netns = %v\n", args.Netns)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.IfName = %v\n", args.IfName)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Args = %v\n", args.Args)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.Path = %v\n", args.Path)
fmt.Fprintf(os.Stderr, "***********CNI SR-IOV cmdDel args.StdinData = %v\n", string(args.StdinData))
n, _, err := loadConf(args.StdinData)
if err != nil {
return err
}
if args.Netns == "" {
return nil
}
netns, err := ns.GetNS(args.Netns)
if err != nil {
return fmt.Errorf("failed to open netns %q: %v", args.Netns, err)
}
defer netns.Close()
if err = releaseVF(n, args.IfName, netns); err != nil {
return err
}
err = ipam.ExecDel(n.IPAM.Type, args.StdinData)
if err != nil {
return err
}
// err = ns.WithNetNSPath(args.Netns, func(_ ns.NetNS) error {
// // get VF device
// vfDev, err := netlink.LinkByName(ifName)
// if err != nil {
// return fmt.Errorf("failed to lookup device %s: %v", ifName, err)
// }
// // device name in init netns
// index := vfDev.Attrs().Index
// devName := fmt.Sprintf("%s_%d", n.Master, index)
// // shutdown VF device
// if err = netlink.LinkSetDown(vfDev); err != nil {
// return fmt.Errorf("failed to down device: %v", err)
// }
// // rename VF device
// err = ip.RenameLink(ifName, devName)
// if err != nil {
// return fmt.Errorf("failed to rename device %s to %s: %v", ifName, devName, err)
// }
// // move VF device to init netns
// // if err = netlink.LinkSetNsFd(vfDev, int(ns.Fd())); err != nil {
// // return fmt.Errorf("failed to move device %s to init netns: %v", ifName, err)
// // }
// _, err = ip.DelLinkByNameAddr(ifName, netlink.FAMILY_V4)
// // FIXME: use ip.ErrLinkNotFound when cni is revendored
// if err != nil && err.Error() == "Link not found" {
// return nil
// }
// return err
// })
return err
}
func main() {
skel.PluginMain(cmdAdd, cmdDel, version.All)
}
| Dir); err != nil {
return "", fmt.Errorf("failed to open the virtfn%d dir of the device %q: %v", vf, master, err)
}
infos, err := ioutil.ReadDir(vfDir)
if err != nil {
return "", fmt.Errorf("failed to read the virtfn%d dir of the device %q: %v", vf, master, err)
}
if len(infos) != 1 {
return "", fmt.Errorf("no network device in directory %s", vfDir)
}
return infos[0].Name(), nil
}
func releaseVF(conf *NetConf, ifName string, netns ns.NetNS) error {
initns, err := ns.GetCurrentNS | identifier_body |
ngx-grid-table.component.ts | import { ColumnApi, GridApi, GridOptions, GridReadyEvent, IServerSideGetRowsParams, RowNode } from '@ag-grid-community/core';
import { CellRange } from '@ag-grid-community/core/dist/cjs/interfaces/iRangeController';
import { IServerSideDatasource, IServerSideGetRowsRequest } from '@ag-grid-community/core/dist/cjs/interfaces/iServerSideDatasource';
import { AllModules, ExcelCell } from '@ag-grid-enterprise/all-modules';
import { Component, EventEmitter, Input, OnDestroy, OnInit, Output, TemplateRef, ViewChild } from '@angular/core';
import { Column } from '@ag-grid-community/core/dist/cjs/entities/column';
import { ColumnVO } from '@ag-grid-community/core/dist/cjs/interfaces/iColumnVO';
import { ActivatedRoute, Router } from '@angular/router';
import { ReuseTabService } from '@delon/abc/reuse-tab';
import { ACLService } from '@delon/acl';
import { SFSchema } from '@delon/form';
import { _HttpClient } from '@delon/theme';
import { TranslateService } from '@ngx-translate/core';
import { NzMessageService } from 'ng-zorro-antd/message';
import { NzModalService } from 'ng-zorro-antd/modal';
import { Observable, of, range, Subject } from 'rxjs';
import { catchError, filter, map, merge, mergeMap, pluck, reduce, skip, take, takeUntil, tap } from 'rxjs/operators';
import { Console } from '../../../utils/console';
import { IFilter } from '../../filter-input/filter.types';
// 不要使用import {Console} from '@shared',防止循环引用
import {
buildColACL,
buildFrameworkComponents,
buildMenus,
buildOptionField,
buildResizable,
buildSideBar,
buildStatusBar,
buildTreeDataCfg,
clientSideAsRowQuery,
initGridOptions,
repairRowModeType,
reuseTabFix,
serverSideAsRowQuery,
} from '../ngx-grid-functions';
import { NgxGridTableConstants } from '../ngx-grid-table-constants';
import {
ApiGetter,
GridStatistics,
IGridDataSource,
IPage,
IRowQuery,
MenuItem,
PaginationCfg,
TreeDataCfg,
} from '../ngx-grid-table-model';
import { SfQueryFormComponent } from '../sf-query-form/sf-query-form.component';
/**
* 表格提供 分页 和 无限 两种模式。
* 分页:使用client-side模式来处理。
* 无限:使用server-side模式来处理。
*
*/
@Component({
selector: 'ngx-grid-table',
templateUrl: './ngx-grid-table.component.html',
styleUrls: ['./ngx-grid-table.component.scss'],
})
export class NgxGridTableComponent implements OnInit, OnDestroy {
// =============================== 表格内部参数 =========================
__show__ = true;
/** 添加菜单 */
private additionMenu: Array<MenuItem> = [];
/** 销毁 */
private destroy$ = new Subject();
/** 是否为树状数据 */
private treeData: false | TreeDataCfg = false;
allModules = AllModules;
/** 表格初始化后的 api 对象,同于控制表格行为 */
api!: GridApi;
/** 表格初始化后的 ColumnApi对象,用于控制表列行为 */
columnApi!: ColumnApi;
/** 是否正在加载数据中 */
dataLoading = false;
/** 加载进度条 */
loadProgressPercent = 0;
/** 统计数据 */
statistics!: Array<GridStatistics>;
/** 当前页对象 */
cur_page!: IPage<any>;
/** 表格页面所在的url */
currentUrl: string;
private haveInit = false;
// ================================== 基本配置(外部) =============================
/** 是否为全屏状态 */
@Input() fullscreen = false;
/** 当前页码 */
@Input() pageIndex = 1;
/** 页大小 */
@Input() pageSize = 20;
/** 表格基础配置 */
@Input() gridOptions!: GridOptions;
/** 表格基础配置 */
@Input() colACLTmpl!: string;
/** 表格主题 */
@Input() gridTheme = 'ag-theme-balham';
/** 表格CSS */
@Input() gridTableClass = [];
/** 数据表格样式 */
@Input() gridTableStyle: { [key: string]: any } = { width: '100%', height: '70%' };
/** 是否在表格初始化后立即执行一次查询 */
@Input() initLoadData = true;
/** 分页还是无限 */
@Input() dataLoadModel: 'pageable' | 'infinite' = 'pageable';
/** 是否显示分页控件 */
@Input() showPagination: false | PaginationCfg = {};
/** 是否显示默认状态栏, 展示用户选中项状态数据 */
@Input() defaultStatusBar = false;
/** 单行还是多行 */
@Input() rowSelection: undefined | 'single' | 'multiple' = undefined;
/** 是否显示统计 */
@Input() showStatistics = false;
// /** 是否展示删除菜单 */
// @Input() deleteMenu = false;
/** 默认是否可以改变列宽 */
@Input() resizable = false;
/** 分组时checkbox是否影响自己的子项 */
@Input() groupSelectsChildren = true;
/** 操作列模板 */
@Input() optionCell!: TemplateRef<any>;
/** 数据源 */
@Input() dataSource!: IGridDataSource<any>;
/** 表单schema */
@Input() searchSchema!: SFSchema;
/** 初始表单数据 */
@Input() initFormData!: any;
@Input() customPageView!: TemplateRef<any>;
@Input() filterHand!: (filters: IFilter[], form: SfQueryFormComponent) => IFilter[];
@Input() topToolPanel!: TemplateRef<any>;
@Input() bottomToolPanel!: TemplateRef<any>;
// ============================== 事件 ============================
@Output() fullscreenChange = new EventEmitter<boolean>();
@Output() pageIndexChange = new EventEmitter<number>();
@Output() pageSizeChange = new EventEmitter<number>();
/** 表格就绪事件 */
@Output() gridReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
@Output() gridReLoadReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
/** 删除事件 */
@Output() deleted = new EventEmitter<any>();
@Output() dataLoadingChange = new EventEmitter<boolean>();
@Output() dataLoadModelChange = new EventEmitter<'pageable' | 'infinite'>();
// ============================= 组件 =======================
@ViewChild(SfQueryFormComponent) form!: SfQueryFormComponent;
@ViewChild('defaultPageTmpl') defaultPageTmpl!: TemplateRef<any>;
@ViewChild('progressTmpl') progressTmpl!: TemplateRef<any>;
constructor(
private translateService: TranslateService,
private msg: NzMessageService,
private modal: NzModalService,
private reuseTabService: ReuseTabService,
private activatedRoute: ActivatedRoute,
private router: Router,
private http: _HttpClient,
private aclService: ACLService,
) {
this.currentUrl = this.router.url;
}
ngOnInit(): void {
if (!this.haveInit) {
this.initGridOptions();
}
}
private initGridOptions() {
// api 获取方法,用于给functions传递api对象
const apiGetter: ApiGetter = { get: () => ({ api: this.api, columnApi: this.columnApi }) };
buildFrameworkComponents(this.gridOptions);
// 构建菜单
buildMenus(
this.gridOptions,
this.translateService,
this.aclService,
this.additionMenu,
// this.deleteMenu,
this.destroy$,
apiGetter,
() => this.getSelectionData(),
// () => this.doDelete(),
);
buildTreeDataCfg(this.gridOptions, this.treeData);
buildStatusBar(this.gridOptions, this.defaultStatusBar);
buildSideBar(this.gridOptions);
buildResizable(this.gridOptions, this.resizable);
buildOptionField(this.gridOptions, this.optionCell);
reuseTabFix(this.router, this.currentUrl, this.destroy$, apiGetter);
repairRowModeType(this.gridOptions, this.dataLoadModel);
buildColACL(this.gridOptions, this.aclService, this.colACLTmpl);
if (this.showPagination !== false) {
this.showPagination = {
...NgxGridTableConstants.DEFAULT_PAGINATION,
...this.showPagination,
};
}
this.gridOptions = initGridOptions(this.gridOptions, this.rowSelection!, (event) => this.onGridReady(event));
}
private onGridReady(event: GridReadyEvent): void {
this.api = event.api;
this.columnApi = event.columnApi;
if (this.dataLoadModel === 'infinite') {
this.api.setServerSideDatasource(this.infiniteDataSource());
}
if (this.haveInit) {
this.gridReLoadReady.emit({ event, gridTable: this });
} else {
this.gridReady.emit({ event, gridTable: this });
this.haveInit = true;
}
// 当网格数据就绪时
// this.api.addEventListener('firstDataRendered', () => {
// this.firstDataRenderedTime = new Date().getTime();
// if (cellActionColumnDefs.length) {
// this.columnApi.autoSizeColumn('_action');
// }
// });
if (this.initLoadData) {
this.refresh();
}
}
private filters(): IFilter[] {
let filters = [] as IFilter[];
if (this.form) {
filters = [...this.form.filter];
}
if (this.filterHand) {
filters = [...this.filterHand(filters, this.form)];
}
return filters;
}
private infiniteDataSource(): IServerSideDatasource {
const getRows = (params: IServerSideGetRowsParams) => {
const rowQuery = serverSideAsRowQuery(params, this.treeData, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.setDataLoading(true);
this.api.showLoadingOverlay();
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
return of({} as IPage<any>);
}),
)
.subscribe((resultPage) => {
if (resultPage.records) {
params.successCallback(resultPage.records, resultPage.total);
this.statistics = resultPage.statistics || [];
this.api.hideOverlay();
} else {
this.api.showNoRowsOverlay();
params.failCallback();
}
this.setDataLoading(false);
});
};
return { getRows };
}
// private doDelete(): void {
// const data: any[] = this.getCheckedData();
// if (!data.length) {
// return;
// }
// let mapper: (v: any) => string;
// if (typeof this.gridOptions.getRowNodeId === 'undefined') {
// if (typeof data[0].id !== 'undefined') {
// mapper = (v) => v.id;
// } else {
// console.warn(
// '删除操作无法获取键,默认情况下将获取id作为键,如果没有id字段或不希望使用id作为删除键,请配置 gridOptions.getRowNodeId',
// );
// return;
// }
// } else {
// mapper = this.gridOptions.getRowNodeId;
// }
//
// const ids: string[] = data.map(mapper);
//
// this.http.delete(`/api/${this.currentUrl}/deleteByKeys`, { keys: ids }).subscribe((value) => {
// this.deleted.emit(value);
// });
// }
/**
* 查询
*/
query(pageNum: number, pageSize: number): void {
this.api.clearRangeSelection();
this.api.deselectAll();
if (this.dataLoadModel !== 'pageable') {
console.warn('pageable 模式才能进行分页查询');
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, pageNum, pageSize, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.api.showLoadingOverlay();
this.setDataLoading(true);
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.log(err);
console.groupEnd();
return of({
total: 0,
records: [],
size: rowQuery.pageSize,
current: rowQuery.pageNum,
statistics: [],
} as IPage<any>);
}),
)
.subscribe((resultPage) => {
this.cur_page = resultPage;
this.api.setRowData(resultPage.records);
this.statistics = resultPage.statistics || [];
if (!resultPage.records.length) {
this.api.showNoRowsOverlay();
} else {
this.api.hideOverlay();
}
this.setDataLoading(false);
});
}
onPageSizeChange(size: number): void {
this.setPageSize(size, true);
this.query(1, size);
}
onPageIndexChange(idx: number): void {
this.setPageIndex(idx, true);
this.query(idx, this.pageSize);
}
/**
*
* @param maxRequestCount 最大并发查询数
* @param pageSize 每次查询条数,默认采用当前分页大小
*/
exportAllPageData(maxRequestCount: number = 3, pageSize?: number): void {
const initPageNum = 1;
pageSize = pageSize || this.pageSize;
if (this.dataLoadModel !== 'pageable') {
// 暂时只支持客户端模式导出全部数据,因为服务端模式下由于缓存大小问题setRowData方法无效
console.warn('pageable 模式才能前端导出!');
// 导出当前缓存的数据
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter() });
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, initPageNum, pageSize, this.filters());
const params = { percent: 0, status: 'active' };
const confirm = this.modal.confirm({
nzContent: this.progressTmpl,
nzTitle: this.translateService.instant('grid.export.confirm'),
nzComponentParams: params,
nzOkText: this.translateService.instant('grid.export.start'),
nzOnOk: () => {
params.percent = 0.01;
params.status = 'active';
confirm.updateConfig({
nzTitle: undefined,
nzContent: this.progressTmpl,
nzComponentParams: params,
nzOkText: null,
nzClosable: false,
nzCloseIcon: undefined,
nzIconType: undefined,
nzMaskClosable: false,
nzCancelText: null,
});
let statisticsFooter: Array<GridStatistics> = [];
return this.dataSource(rowQuery)
.pipe(
mergeMap((page: IPage<any>) => {
const { total, size, current, records, statistics } = page;
statisticsFooter = statistics || [];
const totalPage = Math.ceil(total / size);
if (totalPage > current) {
const step = parseFloat((100 / totalPage).toFixed(2));
params.percent = step;
return range(current + 1, totalPage).pipe(
mergeMap((index: number) => {
return this.dataSource(Object.assign({}, rowQuery, { pageNum: index }));
}, maxRequestCount),
pluck('records'),
tap((next) => {
params.percent = parseFloat((params.percent + step).toFixed(2));
}),
reduce((acc, val) => acc.concat(val || []), records),
);
} else {
return of([]);
}
}),
)
.toPromise()
.then((next) => {
params.status = '';
params.percent = 100;
this.api.setRowData(next);
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter(statisticsFooter) });
this.refresh();
return true;
})
.catch((err) => {
Console.collapse('grid-table.component ExportAllPageData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
params.status = 'exception';
confirm.updateConfig({
nzCancelText: undefined,
nzOkText: this.translateService.instant('grid.export.retry'),
});
return false;
});
},
});
}
private exportStatisticsFooter(statistics?: Array<GridStatistics>): ExcelCell[][] {
const footers: Array<Array<ExcelCell>> = [[]];
statistics = statistics || this.statistics;
if (this.showStatistics && statistics && statistics.length) {
const footer = statistics
.filter((items) => items.skipExport !== true)
.map((items) => {
const rows: Array<ExcelCell> = [];
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: items.label || '',
},
});
items.fields.forEach((item) => {
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: `${item.label}:${item.value}`,
},
} as ExcelCell);
});
return rows;
});
footers.push(...footer);
}
return footers;
}
ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public setData(data: IPage<any>): void {
if (this.dataLoadModel === 'pageable') {
this.cur_page = data;
this.setPageIndex(this.cur_page.current, false);
this.setPageSize(this.cur_page.size, false);
this.api.setRowData(this.cur_page.records);
} else {
console.warn('只有 tableModel === ‘pageable’ 才允许直接存值');
}
}
// 添加菜单
public addMenu(item: MenuItem): void {
this.additionMenu.push(item);
}
/**
* 刷新表格
*/
public refresh(): Observable<void> {
if (this.dataLoadModel === 'pageable') {
this.query(this.pageIndex, this.pageSize);
} else {
this.api.purgeServerSideCache();
}
// 当loading状态变更为false的时候,可以视为本次数据加载已经完成,返回这个Observable,供其他业务订阅
return this.dataLoadingChange.asObservable().pipe(
filter((status) => !status),
take(1),
map(() => {}),
);
}
/**
* 获取所有范围选中(range)的行数据
*/
public getSelectionData<U>(): U[] {
return this.getSelection((s) => s.data);
}
/**
* 获取所有范围选中(range)的行,并对其进行转换
*/
public getSelection<U>(hand: (value: RowNode) => U): U[] {
const range: CellRange = this.api.getCellRanges()[0];
const nodes: RowNode[] = this.api.getRenderedNodes();
if (range && range.startRow && range.endRow) {
const r = [];
for (let i = range.startRow.rowIndex; i <= range.endRow.rowIndex; i++) {
r.push(hand(nodes[i]));
}
return r;
} else {
return [];
}
}
/**
* 获取所有checkbox选择的行数据
*/
public getCheckedData<U>(): U[] {
return this.getChecked((s) => s.data);
}
/**
* 获取所有checkbox选择的行,并对其进行转换
*/
public getChecked<U>(hand: (value: RowNode) => U): U[] {
const nodes: RowNode[] = this.api.getSelectedNodes();
return nodes.map(hand);
}
/**
* 重置表单
*/
resetForm(): void {
this.form.reset();
}
get pageViewTmpl(): TemplateRef<any> {
return this.customPageView ? this.customPageView : this.defaultPageTmpl;
}
// ================================== 数据绑定 ====================================
toggleFullscreen(): void {
this.setFullscreen(!this.fullscreen);
}
setFullscreen(fullscreen: boolean): void {
this.fullscreen = fullscreen;
this.fullscreenChange.emit(this.fullscreen);
}
setPageIndex(pageIndex: number, emit: boolean): void {
this.pageIndex = pageIndex;
if (emit) {
this.pageIndexChange.emit(pageIndex);
}
}
setPageSize(pageSize: number, emit: boolean): void {
this.pageSize = pageSize;
if (emit) {
this.pageSizeChange.emit(pageSize);
}
}
private setDataLoading(loading: boolean): void {
this.dataLoading = loading;
this.dataLoadingChange.emit(this.dataLoading);
}
toggleDataModel(): void {
if ('pageable' === this.dataLoadModel) {
this.setDataMode('infinite');
} else {
this.setDataMode('pageable');
}
}
setDataMode(model: 'pageable' | 'infinite'): void {
this.dataLoadModel = model;
repairRowModeType(this.gridOptions, this.dataLoadModel);
// TODO 刷新表格
this.repaint();
this.dataLoadModelChange.emit(this.dataLoadModel);
}
repaint(): void {
this.__show__ = false;
setTimeout(() => (this.__show__ = true), 200);
}
}
| conditional_block | ||
ngx-grid-table.component.ts | import { ColumnApi, GridApi, GridOptions, GridReadyEvent, IServerSideGetRowsParams, RowNode } from '@ag-grid-community/core';
import { CellRange } from '@ag-grid-community/core/dist/cjs/interfaces/iRangeController';
import { IServerSideDatasource, IServerSideGetRowsRequest } from '@ag-grid-community/core/dist/cjs/interfaces/iServerSideDatasource';
import { AllModules, ExcelCell } from '@ag-grid-enterprise/all-modules';
import { Component, EventEmitter, Input, OnDestroy, OnInit, Output, TemplateRef, ViewChild } from '@angular/core';
import { Column } from '@ag-grid-community/core/dist/cjs/entities/column';
import { ColumnVO } from '@ag-grid-community/core/dist/cjs/interfaces/iColumnVO';
import { ActivatedRoute, Router } from '@angular/router';
import { ReuseTabService } from '@delon/abc/reuse-tab';
import { ACLService } from '@delon/acl';
import { SFSchema } from '@delon/form';
import { _HttpClient } from '@delon/theme';
import { TranslateService } from '@ngx-translate/core';
import { NzMessageService } from 'ng-zorro-antd/message';
import { NzModalService } from 'ng-zorro-antd/modal';
import { Observable, of, range, Subject } from 'rxjs';
import { catchError, filter, map, merge, mergeMap, pluck, reduce, skip, take, takeUntil, tap } from 'rxjs/operators';
import { Console } from '../../../utils/console';
import { IFilter } from '../../filter-input/filter.types';
// 不要使用import {Console} from '@shared',防止循环引用
import {
buildColACL,
buildFrameworkComponents,
buildMenus,
buildOptionField,
buildResizable,
buildSideBar,
buildStatusBar,
buildTreeDataCfg,
clientSideAsRowQuery,
initGridOptions,
repairRowModeType,
reuseTabFix,
serverSideAsRowQuery,
} from '../ngx-grid-functions';
import { NgxGridTableConstants } from '../ngx-grid-table-constants';
import {
ApiGetter,
GridStatistics,
IGridDataSource,
IPage,
IRowQuery,
MenuItem,
PaginationCfg,
TreeDataCfg,
} from '../ngx-grid-table-model';
import { SfQueryFormComponent } from '../sf-query-form/sf-query-form.component';
/**
* 表格提供 分页 和 无限 两种模式。
* 分页:使用client-side模式来处理。
* 无限:使用server-side模式来处理。
*
*/
@Component({
selector: 'ngx-grid-table',
templateUrl: './ngx-grid-table.component.html',
styleUrls: ['./ngx-grid-table.component.scss'],
})
export class NgxGridTableComponent implements OnInit, OnDestroy {
// =============================== 表格内部参数 =========================
__show__ = true;
/** 添加菜单 */
private additionMenu: Array<MenuItem> = [];
/** 销毁 */
private destroy$ = new Subject();
/** 是否为树状数据 */
private treeData: false | TreeDataCfg = false;
allModules = AllModules;
/** 表格初始化后的 api 对象,同于控制表格行为 */
api!: GridApi;
/** 表格初始化后的 ColumnApi对象,用于控制表列行为 */
columnApi!: ColumnApi;
/** 是否正在加载数据中 */
dataLoading = false;
/** 加载进度条 */
loadProgressPercent = 0;
/** 统计数据 */
statistics!: Array<GridStatistics>;
/** 当前页对象 */
cur_page!: IPage<any>;
/** 表格页面所在的url */
currentUrl: string;
private haveInit = false;
// ================================== 基本配置(外部) =============================
/** 是否为全屏状态 */
@Input() fullscreen = false;
/** 当前页码 */
@Input() pageIndex = 1;
/** 页大小 */
@Input() pageSize = 20;
/** 表格基础配置 */
@Input() gridOptions!: GridOptions;
/** 表格基础配置 */
@Input() colACLTmpl!: string;
/** 表格主题 */
@Input() gridTheme = 'ag-theme-balham';
/** 表格CSS */
@Input() gridTableClass = [];
/** 数据表格样式 */
@Input() gridTableStyle: { [key: string]: any } = { width: '100%', height: '70%' };
/** 是否在表格初始化后立即执行一次查询 */
@Input() initLoadData = true;
/** 分页还是无限 */
@Input() dataLoadModel: 'pageable' | 'infinite' = 'pageable';
/** 是否显示分页控件 */
@Input() showPagination: false | PaginationCfg = {};
/** 是否显示默认状态栏, 展示用户选中项状态数据 */
@Input() defaultStatusBar = false;
/** 单行还是多行 */
@Input() rowSelection: undefined | 'single' | 'multiple' = undefined;
/** 是否显示统计 */
@Input() showStatistics = false;
// /** 是否展示删除菜单 */
// @Input() deleteMenu = false;
/** 默认是否可以改变列宽 */
@Input() resizable = false;
/** 分组时checkbox是否影响自己的子项 */
@Input() groupSelectsChildren = true;
/** 操作列模板 */
@Input() optionCell!: TemplateRef<any>;
/** 数据源 */
@Input() dataSource!: IGridDataSource<any>;
/** 表单schema */
@Input() searchSchema!: SFSchema;
/** 初始表单数据 */
@Input() initFormData!: any;
@Input() customPageView!: TemplateRef<any>;
@Input() filterHand!: (filters: IFilter[], form: SfQueryFormComponent) => IFilter[];
@Input() topToolPanel!: TemplateRef<any>;
@Input() bottomToolPanel!: TemplateRef<any>;
// ============================== 事件 ============================
@Output() fullscreenChange = new EventEmitter<boolean>();
@Output() pageIndexChange = new EventEmitter<number>();
@Output() pageSizeChange = new EventEmitter<number>();
/** 表格就绪事件 */
@Output() gridReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
@Output() gridReLoadReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
/** 删除事件 */
@Output() deleted = new EventEmitter<any>();
@Output() dataLoadingChange = new EventEmitter<boolean>();
@Output() dataLoadModelChange = new EventEmitter<'pageable' | 'infinite'>();
// ============================= 组件 =======================
@ViewChild(SfQueryFormComponent) form!: SfQueryFormComponent;
@ViewChild('defaultPageTmpl') defaultPageTmpl!: TemplateRef<any>;
@ViewChild('progressTmpl') progressTmpl!: TemplateRef<any>;
constructor(
private translateService: TranslateService,
private msg: NzMessageService,
private modal: NzModalService,
private reuseTabService: ReuseTabService,
private activatedRoute: ActivatedRoute,
private router: Router,
private http: _HttpClient,
private aclService: ACLService,
) {
this.currentUrl = this.router.url;
}
ngOnInit(): void {
if (!this.haveInit) {
this.initGridOptions();
}
}
private initGridOptions() {
// api 获取方法,用于给functions传递api对象
const apiGetter: ApiGetter = { get: () => ({ api: this.api, columnApi: this.columnApi }) };
buildFrameworkComponents(this.gridOptions);
// 构建菜单
buildMenus(
this.gridOptions,
this.translateService,
this.aclService,
this.additionMenu,
// this.deleteMenu,
this.destroy$,
apiGetter,
() => this.getSelectionData(),
// () => this.doDelete(),
);
buildTreeDataCfg(this.gridOptions, this.treeData);
buildStatusBar(this.gridOptions, this.defaultStatusBar);
buildSideBar(this.gridOptions);
buildResizable(this.gridOptions, this.resizable);
buildOptionField(this.gridOptions, this.optionCell);
reuseTabFix(this.router, this.currentUrl, this.destroy$, apiGetter);
repairRowModeType(this.gridOptions, this.dataLoadModel);
buildColACL(this.gridOptions, this.aclService, this.colACLTmpl);
if (this.showPagination !== false) {
this.showPagination = {
...NgxGridTableConstants.DEFAULT_PAGINATION,
...this.showPagination,
};
}
this.gridOptions = initGridOptions(this.gridOptions, this.rowSelection!, (event) => this.onGridReady(event));
}
private onGridReady(event: GridReadyEvent): void {
this.api = event.api;
this.columnApi = event.columnApi;
if (this.dataLoadModel === 'infinite') {
this.api.setServerSideDatasource(this.infiniteDataSource());
}
if (this.haveInit) {
this.gridReLoadReady.emit({ event, gridTable: this });
} else {
this.gridReady.emit({ event, gridTable: this });
this.haveInit = true;
}
// 当网格数据就绪时
// this.api.addEventListener('firstDataRendered', () => {
// this.firstDataRenderedTime = new Date().getTime();
// if (cellActionColumnDefs.length) {
// this.columnApi.autoSizeColumn('_action');
// }
// });
if (this.initLoadData) {
this.refresh();
}
}
private filters(): IFilter[] {
let filters = [] as IFilter[];
if (this.form) {
filters = [...this.form.filter];
}
if (this.filterHand) {
filters = [...this.filterHand(filters, this.form)];
}
return filters;
}
private infiniteDataSource(): IServerSideDatasource {
const getRows = (params: IServerSideGetRowsParams) => {
const rowQuery = serverSideAsRowQuery(params, this.treeData, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.setDataLoading(true);
this.api.showLoadingOverlay();
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
return of({} as IPage<any>);
}),
)
.subscribe((resultPage) => {
if (resultPage.records) {
params.successCallback(resultPage.records, resultPage.total);
this.statistics = resultPage.statistics || [];
this.api.hideOverlay();
} else {
this.api.showNoRowsOverlay();
params.failCallback();
}
this.setDataLoading(false);
});
};
return { getRows };
}
// private doDelete(): void {
// const data: any[] = this.getCheckedData();
// if (!data.length) {
// return;
// }
// let mapper: (v: any) => string;
// if (typeof this.gridOptions.getRowNodeId === 'undefined') {
// if (typeof data[0].id !== 'undefined') {
// mapper = (v) => v.id;
// } else {
// console.warn(
// '删除操作无法获取键,默认情况下将获取id作为键,如果没有id字段或不希望使用id作为删除键,请配置 gridOptions.getRowNodeId',
// );
// return;
// }
// } else {
// mapper = this.gridOptions.getRowNodeId;
// }
//
// const ids: string[] = data.map(mapper);
//
// this.http.delete(`/api/${this.currentUrl}/deleteByKeys`, { keys: ids }).subscribe((value) => {
// this.deleted.emit(value);
// });
// }
/**
* 查询
*/
query(pageNum: number, pageSize: number): void {
this.api.clearRangeSelection();
this.api.deselectAll();
if (this.dataLoadModel !== 'pageable') {
console.warn('pageable 模式才能进行分页查询');
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, pageNum, pageSize, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.api.showLoadingOverlay();
this.setDataLoading(true);
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.log(err);
console.groupEnd();
return of({
total: 0,
records: [],
size: rowQuery.pageSize,
current: rowQuery.pageNum,
statistics: [],
} as IPage<any>);
}),
)
.subscribe((resultPage) => {
this.cur_page = resultPage;
this.api.setRowData(resultPage.records);
this.statistics = resultPage.statistics || [];
if (!resultPage.records.length) {
this.api.showNoRowsOverlay();
} else {
this.api.hideOverlay();
}
this.setDataLoading(false);
});
}
onPageSizeChange(size: number): void {
this.setPageSize(size, true);
this.query(1, size);
}
onPageIndexChange(idx: number): void {
this.setPageIndex(idx, true);
this.query(idx, this.pageSize);
}
/**
*
* @param maxRequestCount 最大并发查询数
* @param pageSize 每次查询条数,默认采用当前分页大小
*/
exportAllPageData(maxRequestCount: number = 3, pageSize?: number): void {
const initPageNum = 1;
pageSize = pageSize || this.pageSize;
if (this.dataLoadModel !== 'pageable') {
// 暂时只支持客户端模式导出全部数据,因为服务端模式下由于缓存大小问题setRowData方法无效
console.warn('pageable 模式才能前端导出!');
// 导出当前缓存的数据
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter() });
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, initPageNum, pageSize, this.filters());
const params = { percent: 0, status: 'active' };
const confirm = this.modal.confirm({
nzContent: this.progressTmpl,
nzTitle: this.translateService.instant('grid.export.confirm'),
nzComponentParams: params,
nzOkText: this.translateService.instant('grid.export.start'),
nzOnOk: () => {
params.percent = 0.01;
params.status = 'active';
confirm.updateConfig({
nzTitle: undefined,
nzContent: this.progressTmpl,
nzComponentParams: params,
nzOkText: null,
nzClosable: false,
nzCloseIcon: undefined,
nzIconType: undefined,
nzMaskClosable: false,
nzCancelText: null,
});
let statisticsFooter: Array<GridStatistics> = [];
return this.dataSource(rowQuery)
.pipe(
mergeMap((page: IPage<any>) => {
const { total, size, current, records, statistics } = page;
statisticsFooter = statistics || [];
const totalPage = Math.ceil(total / size);
if (totalPage > current) {
const step = parseFloat((100 / totalPage).toFixed(2));
params.percent = step;
return range(current + 1, totalPage).pipe(
mergeMap((index: number) => {
return this.dataSource(Object.assign({}, rowQuery, { pageNum: index }));
}, maxRequestCount),
pluck('records'),
tap((next) => {
params.percent = parseFloat((params.percent + step).toFixed(2));
}),
reduce((acc, val) => acc.concat(val || []), records),
);
} else {
return of([]);
}
}),
)
.toPromise()
.then((next) => {
params.status = '';
params.percent = 100;
this.api.setRowData(next);
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter(statisticsFooter) });
this.refresh();
return true;
})
.catch((err) => {
Console.collapse('grid-table.component ExportAllPageData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
params.status = 'exception';
confirm.updateConfig({
nzCancelText: undefined,
nzOkText: this.translateService.instant('grid.export.retry'),
});
return false;
});
},
});
}
private exportStatisticsFooter(statistics?: Array<GridStatistics>): ExcelCell[][] {
const footers: Array<Array<ExcelCell>> = [[]];
statistics = statistics || this.statistics;
if (this.showStatistics && statistics && statistics.length) {
const footer = statistics
.filter((items) => items.skipExport !== true)
.map((items) => {
const rows: Array<ExcelCell> = [];
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: items.label || '',
},
});
items.fields.forEach((item) => {
rows.push({
styleId: 'bigHeader', | type: 'String',
value: `${item.label}:${item.value}`,
},
} as ExcelCell);
});
return rows;
});
footers.push(...footer);
}
return footers;
}
ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public setData(data: IPage<any>): void {
if (this.dataLoadModel === 'pageable') {
this.cur_page = data;
this.setPageIndex(this.cur_page.current, false);
this.setPageSize(this.cur_page.size, false);
this.api.setRowData(this.cur_page.records);
} else {
console.warn('只有 tableModel === ‘pageable’ 才允许直接存值');
}
}
// 添加菜单
public addMenu(item: MenuItem): void {
this.additionMenu.push(item);
}
/**
* 刷新表格
*/
public refresh(): Observable<void> {
if (this.dataLoadModel === 'pageable') {
this.query(this.pageIndex, this.pageSize);
} else {
this.api.purgeServerSideCache();
}
// 当loading状态变更为false的时候,可以视为本次数据加载已经完成,返回这个Observable,供其他业务订阅
return this.dataLoadingChange.asObservable().pipe(
filter((status) => !status),
take(1),
map(() => {}),
);
}
/**
* 获取所有范围选中(range)的行数据
*/
public getSelectionData<U>(): U[] {
return this.getSelection((s) => s.data);
}
/**
* 获取所有范围选中(range)的行,并对其进行转换
*/
public getSelection<U>(hand: (value: RowNode) => U): U[] {
const range: CellRange = this.api.getCellRanges()[0];
const nodes: RowNode[] = this.api.getRenderedNodes();
if (range && range.startRow && range.endRow) {
const r = [];
for (let i = range.startRow.rowIndex; i <= range.endRow.rowIndex; i++) {
r.push(hand(nodes[i]));
}
return r;
} else {
return [];
}
}
/**
* 获取所有checkbox选择的行数据
*/
public getCheckedData<U>(): U[] {
return this.getChecked((s) => s.data);
}
/**
* 获取所有checkbox选择的行,并对其进行转换
*/
public getChecked<U>(hand: (value: RowNode) => U): U[] {
const nodes: RowNode[] = this.api.getSelectedNodes();
return nodes.map(hand);
}
/**
* 重置表单
*/
resetForm(): void {
this.form.reset();
}
get pageViewTmpl(): TemplateRef<any> {
return this.customPageView ? this.customPageView : this.defaultPageTmpl;
}
// ================================== 数据绑定 ====================================
toggleFullscreen(): void {
this.setFullscreen(!this.fullscreen);
}
setFullscreen(fullscreen: boolean): void {
this.fullscreen = fullscreen;
this.fullscreenChange.emit(this.fullscreen);
}
setPageIndex(pageIndex: number, emit: boolean): void {
this.pageIndex = pageIndex;
if (emit) {
this.pageIndexChange.emit(pageIndex);
}
}
setPageSize(pageSize: number, emit: boolean): void {
this.pageSize = pageSize;
if (emit) {
this.pageSizeChange.emit(pageSize);
}
}
private setDataLoading(loading: boolean): void {
this.dataLoading = loading;
this.dataLoadingChange.emit(this.dataLoading);
}
toggleDataModel(): void {
if ('pageable' === this.dataLoadModel) {
this.setDataMode('infinite');
} else {
this.setDataMode('pageable');
}
}
setDataMode(model: 'pageable' | 'infinite'): void {
this.dataLoadModel = model;
repairRowModeType(this.gridOptions, this.dataLoadModel);
// TODO 刷新表格
this.repaint();
this.dataLoadModelChange.emit(this.dataLoadModel);
}
repaint(): void {
this.__show__ = false;
setTimeout(() => (this.__show__ = true), 200);
}
} | data: { | random_line_split |
ngx-grid-table.component.ts | import { ColumnApi, GridApi, GridOptions, GridReadyEvent, IServerSideGetRowsParams, RowNode } from '@ag-grid-community/core';
import { CellRange } from '@ag-grid-community/core/dist/cjs/interfaces/iRangeController';
import { IServerSideDatasource, IServerSideGetRowsRequest } from '@ag-grid-community/core/dist/cjs/interfaces/iServerSideDatasource';
import { AllModules, ExcelCell } from '@ag-grid-enterprise/all-modules';
import { Component, EventEmitter, Input, OnDestroy, OnInit, Output, TemplateRef, ViewChild } from '@angular/core';
import { Column } from '@ag-grid-community/core/dist/cjs/entities/column';
import { ColumnVO } from '@ag-grid-community/core/dist/cjs/interfaces/iColumnVO';
import { ActivatedRoute, Router } from '@angular/router';
import { ReuseTabService } from '@delon/abc/reuse-tab';
import { ACLService } from '@delon/acl';
import { SFSchema } from '@delon/form';
import { _HttpClient } from '@delon/theme';
import { TranslateService } from '@ngx-translate/core';
import { NzMessageService } from 'ng-zorro-antd/message';
import { NzModalService } from 'ng-zorro-antd/modal';
import { Observable, of, range, Subject } from 'rxjs';
import { catchError, filter, map, merge, mergeMap, pluck, reduce, skip, take, takeUntil, tap } from 'rxjs/operators';
import { Console } from '../../../utils/console';
import { IFilter } from '../../filter-input/filter.types';
// 不要使用import {Console} from '@shared',防止循环引用
import {
buildColACL,
buildFrameworkComponents,
buildMenus,
buildOptionField,
buildResizable,
buildSideBar,
buildStatusBar,
buildTreeDataCfg,
clientSideAsRowQuery,
initGridOptions,
repairRowModeType,
reuseTabFix,
serverSideAsRowQuery,
} from '../ngx-grid-functions';
import { NgxGridTableConstants } from '../ngx-grid-table-constants';
import {
ApiGetter,
GridStatistics,
IGridDataSource,
IPage,
IRowQuery,
MenuItem,
PaginationCfg,
TreeDataCfg,
} from '../ngx-grid-table-model';
import { SfQueryFormComponent } from '../sf-query-form/sf-query-form.component';
/**
* 表格提供 分页 和 无限 两种模式。
* 分页:使用client-side模式来处理。
* 无限:使用server-side模式来处理。
*
*/
@Component({
selector: 'ngx-grid-table',
templateUrl: './ngx-grid-table.component.html',
styleUrls: ['./ngx-grid-table.component.scss'],
})
export class NgxGridTableComponent implements OnInit, OnDestroy {
// =============================== 表格内部参数 =========================
__show__ = true;
/** 添加菜单 */
private additionMenu: Array<MenuItem> = [];
/** 销毁 */
private destroy$ = new Subject();
/** 是否为树状数据 */
private treeData: false | TreeDataCfg = false;
allModules = AllModules;
/** 表格初始化后的 api 对象,同于控制表格行为 */
api!: GridApi;
/** 表格初始化后的 ColumnApi对象,用于控制表列行为 */
columnApi!: ColumnApi;
/** 是否正在加载数据中 */
dataLoading = false;
/** 加载进度条 */
loadProgressPercent = 0;
/** 统计数据 */
statistics!: Array<GridStatistics>;
/** 当前页对象 */
cur_page!: IPage<any>;
/** 表格页面所在的url */
currentUrl: string;
private haveInit = false;
// ================================== 基本配置(外部) =============================
/** 是否为全屏状态 */
@Input() fullscreen = false;
/** 当前页码 */
@Input() pageIndex = 1;
/** 页大小 */
@Input() pageSize = 20;
/** 表格基础配置 */
@Input() gridOptions!: GridOptions;
/** 表格基础配置 */
@Input() colACLTmpl!: string;
/** 表格主题 */
@Input() gridTheme = 'ag-theme-balham';
/** 表格CSS */
@Input() gridTableClass = [];
/** 数据表格样式 */
@Input() gridTableStyle: { [key: string]: any } = { width: '100%', height: '70%' };
/** 是否在表格初始化后立即执行一次查询 */
@Input() initLoadData = true;
/** 分页还是无限 */
@Input() dataLoadModel: 'pageable' | 'infinite' = 'pageable';
/** 是否显示分页控件 */
@Input() showPagination: false | PaginationCfg = {};
/** 是否显示默认状态栏, 展示用户选中项状态数据 */
@Input() defaultStatusBar = false;
/** 单行还是多行 */
@Input() rowSelection: undefined | 'single' | 'multiple' = undefined;
/** 是否显示统计 */
@Input() showStatistics = false;
// /** 是否展示删除菜单 */
// @Input() deleteMenu = false;
/** 默认是否可以改变列宽 */
@Input() resizable = false;
/** 分组时checkbox是否影响自己的子项 */
@Input() groupSelectsChildren = true;
/** 操作列模板 */
@Input() optionCell!: TemplateRef<any>;
/** 数据源 */
@Input() dataSource!: IGridDataSource<any>;
/** 表单schema */
@Input() searchSchema!: SFSchema;
/** 初始表单数据 */
@Input() initFormData!: any;
@Input() customPageView!: TemplateRef<any>;
@Input() filterHand!: (filters: IFilter[], form: SfQueryFormComponent) => IFilter[];
@Input() topToolPanel!: TemplateRef<any>;
@Input() bottomToolPanel!: TemplateRef<any>;
// ============================== 事件 ============================
@Output() fullscreenChange = new EventEmitter<boolean>();
@Output() pageIndexChange = new EventEmitter<number>();
@Output() pageSizeChange = new EventEmitter<number>();
/** 表格就绪事件 */
@Output() gridReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
@Output() gridReLoadReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
/** 删除事件 */
@Output() deleted = new EventEmitter<any>();
@Output() dataLoadingChange = new EventEmitter<boolean>();
@Output() dataLoadModelChange = new EventEmitter<'pageable' | 'infinite'>();
// ============================= 组件 =======================
@ViewChild(SfQueryFormComponent) form!: SfQueryFormComponent;
@ViewChild('defaultPageTmpl') defaultPageTmpl!: TemplateRef<any>;
@ViewChild('progressTmpl') progressTmpl!: TemplateRef<any>;
constructor(
private translateService: TranslateService,
private msg: NzMessageService,
private modal: NzModalService,
private reuseTabService: ReuseTabService,
private activatedRoute: ActivatedRoute,
private router: Router,
private http: _HttpClient,
private aclService: ACLService,
) {
this.currentUrl = this.router.url;
}
ngOnInit(): void {
if (!this.haveInit) {
this.initGridOptions();
}
}
private initGridOptions() {
// api 获取方法,用于给functions传递api对象
const apiGetter: ApiGetter = { get: () => ({ api: this.api, columnApi: this.columnApi }) };
buildFrameworkComponents(this.gridOptions);
// 构建菜单
buildMenus(
this.gridOptions,
this.translateService,
this.aclService,
this.additionMenu,
// this.deleteMenu,
this.destroy$,
apiGetter,
() => this.getSelectionData(),
// () => this.doDelete(),
);
buildTreeDataCfg(this.gridOptions, this.treeData);
buildStatusBar(this.gridOptions, this.defaultStatusBar);
buildSideBar(this.gridOptions);
buildResizable(this.gridOptions, this.resizable);
buildOptionField(this.gridOptions, this.optionCell);
reuseTabFix(this.router, this.currentUrl, this.destroy$, apiGetter);
repairRowModeType(this.gridOptions, this.dataLoadModel);
buildColACL(this.gridOptions, this.aclService, this.colACLTmpl);
if (this.showPagination !== false) {
this.showPagination = {
...NgxGridTableConstants.DEFAULT_PAGINATION,
...this.showPagination,
};
}
this.gridOptions = initGridOptions(this.gridOptions, this.rowSelection!, (event) => this.onGridReady(event));
}
private onGridReady(event: GridReadyEvent): void {
this.api = event.api;
this.columnApi = event.columnApi;
if (this.dataLoadModel === 'infinite') {
this.api.setServerSideDatasource(this.infiniteDataSource());
}
if (this.haveInit) {
this.gridReLoadReady.emit({ event, gridTable: this });
} else {
this.gridReady.emit({ event, gridTable: this });
this.haveInit = true;
}
// 当网格数据就绪时
// this.api.addEventListener('firstDataRendered', () => {
// this.firstDataRenderedTime = new Date().getTime();
// if (cellActionColumnDefs.length) {
// this.columnApi.autoSizeColumn('_action');
// }
// });
if (this.initLoadData) {
this.refresh();
}
}
private filters(): IFilter[] {
let filters = [] as IFilter[];
if (this.form) {
filters = [...this.form.filter];
}
if (this.filterHand) {
filters = [...this.filterHand(filters, this.form)];
}
return filters;
}
private infiniteDataSource(): IServerSideDatasource {
const getRows = (params: IServerSideGetRowsParams) => {
const rowQuery = serverSideAsRowQuery(params, this.treeData, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.setDataLoading(true);
this.api.showLoadingOverlay();
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
return of({} as IPage<any>);
}),
)
.subscribe((resultPage) => {
if (resultPage.records) {
params.successCallback(resultPage.records, resultPage.total);
this.statistics = resultPage.statistics || [];
this.api.hideOverlay();
} else {
this.api.showNoRowsOverlay();
params.failCallback();
}
this.setDataLoading(false);
});
};
return { getRows };
}
// private doDelete(): void {
// const data: any[] = this.getCheckedData();
// if (!data.length) {
// return;
// }
// let mapper: (v: any) => string;
// if (typeof this.gridOptions.getRowNodeId === 'undefined') {
// if (typeof data[0].id !== 'undefined') {
// mapper = (v) => v.id;
// } else {
// console.warn(
// '删除操作无法获取键,默认情况下将获取id作为键,如果没有id字段或不希望使用id作为删除键,请配置 gridOptions.getRowNodeId',
// );
// return;
// }
// } else {
// mapper = this.gridOptions.getRowNodeId;
// }
//
// const ids: string[] = data.map(mapper);
//
// this.http.delete(`/api/${this.currentUrl}/deleteByKeys`, { keys: ids }).subscribe((value) => {
// this.deleted.emit(value);
// });
// }
/**
* 查询
*/
query(pageNum: number, pageSize: number): void {
this.api.clearRangeSelection();
this.api.deselectAll();
if (this.dataLoadModel !== 'pageable') {
console.warn('pageable 模式才能进行分页查询');
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, pageNum, pageSize, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.api.showLoadingOverlay();
this.setDataLoading(true);
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.log(err);
console.groupEnd();
return of({
total: 0,
records: [],
size: rowQuery.pageSize,
current: rowQuery.pageNum,
statistics: [],
} as IPage<any>);
}),
)
.subscribe((resultPage) => {
this.cur_page = resultPage;
this.api.setRowData(resultPage.records);
this.statistics = resultPage.statistics || [];
if (!resultPage.records.length) {
this.api.showNoRowsOverlay();
} else {
this.api.hideOverlay();
}
this.setDataLoading(false);
});
}
onPageSizeChange(size: number): void {
this.setPageSize(size, true);
this.query(1, size);
}
onPageIndexChange(idx: number): void {
this.setPageIndex(idx, true);
this.query(idx, this.pageSize);
}
/**
*
* @param maxRequestCount 最大并发查询数
* @param pageSize 每次查询条数,默认采用当前分页大小
*/
exportAllPageData(maxRequestCount: number = 3, pageSize?: number): void {
const initPageNum = 1;
pageSize = pageSize || this.pageSize;
if (this.dataLoadModel !== 'pageable') {
// 暂时只支持客户端模式导出全部数据,因为服务端模式下由于缓存大小问题setRowData方法无效
console.warn('pageable 模式才能前端导出!');
// 导出当前缓存的数据
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter() });
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, initPageNum, pageSize, this.filters());
const params = { percent: 0, status: 'active' };
const confirm = this.modal.confirm({
nzContent: this.progressTmpl,
nzTitle: this.translateService.instant('grid.export.confirm'),
nzComponentParams: params,
nzOkText: this.translateService.instant('grid.export.start'),
nzOnOk: () => {
params.percent = 0.01;
params.status = 'active';
confirm.updateConfig({
nzTitle: undefined,
nzContent: this.progressTmpl,
nzComponentParams: params,
nzOkText: null,
nzClosable: false,
nzCloseIcon: undefined,
nzIconType: undefined,
nzMaskClosable: false,
nzCancelText: null,
});
let statisticsFooter: Array<GridStatistics> = [];
return this.dataSource(rowQuery)
.pipe(
mergeMap((page: IPage<any>) => {
const { total, size, current, records, statistics } = page;
statisticsFooter = statistics || [];
const totalPage = Math.ceil(total / size);
if (totalPage > current) {
const step = parseFloat((100 / totalPage).toFixed(2));
params.percent = step;
return range(current + 1, totalPage).pipe(
mergeMap((index: number) => {
return this.dataSource(Object.assign({}, rowQuery, { pageNum: index }));
}, maxRequestCount),
pluck('records'),
tap((next) => {
params.percent = parseFloat((params.percent + step).toFixed(2));
}),
reduce((acc, val) => acc.concat(val || []), records),
);
} else {
return of([]);
}
}),
)
.toPromise()
.then((next) => {
params.status = '';
params.percent = 100;
this.api.setRowData(next);
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter(statisticsFooter) });
this.refresh();
return true;
})
.catch((err) => {
Console.collapse('grid-table.component ExportAllPageData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
params.status = 'exception';
confirm.updateConfig({
nzCancelText: undefined,
nzOkText: this.translateService.instant('grid.export.retry'),
});
return false;
});
},
});
}
private exportStatisticsFooter(statistics?: Array<GridStatistics>): ExcelCell[][] {
const footers: Array<Array<ExcelCell>> = [[]];
statistics = statistics || this.statistics;
if (this.showStatistics && statistics && statistics.length) {
const footer = statistics
.filter((items) => items.skipExport !== true)
.map((items) => {
const rows: Array<ExcelCell> = [];
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: items.label || '',
},
});
items.fields.forEach((item) => {
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: `${item.label}:${item.value}`,
},
} as ExcelCell);
});
return rows;
});
footers.push(...footer);
}
return footers;
}
ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public setData(data: IPage<any>): void {
if (this.dataLoadModel === 'pageable') {
this.cur_page = data;
this.setPageIndex(this.cur_page.current, false);
this.setPageSize(this.cur_page.size, false);
this.api.setRowData(this.cur_page.records);
} else {
console.warn('只有 tableModel === ‘pageable’ 才允许直接存值');
}
}
// 添加菜单
public addMenu(item: MenuItem): void {
this.additionMenu.push(item);
}
/**
* 刷新表格
*/
public refresh(): Observable<void> {
if (this.dataLoadModel === 'pageable') {
this.query(this.pageIndex, this.pageSize);
} else {
this.api.purgeServerSideCache();
}
// 当loading状态变更为false的时候,可以视为本次数据加载已经完成,返回这个Observable,供其他业务订阅
return this.dataLoadingChange.asObservable().pipe(
filter((status) => !status),
take(1),
map(() => {}),
);
}
/**
* 获取所有范围选中(range)的行数据
*/
public getSelectionData<U>(): U[] {
return this.getSelection((s) => s.data);
}
/**
* 获取所有范围选中(range)的行,并对其进行转换
*/
public getSelection<U>(hand: (value: RowNode) => U): U[] {
const range: CellRange = this.api.getCellRanges()[0];
const nodes: RowNode[] = this.api.getRenderedNodes();
if (range && range.startRow && range.endRow) {
const r = [];
for (let i = range.startRow.rowIndex; i <= range.endRow.rowIndex; i++) {
r.push(hand(nodes[i]));
}
return r;
} else {
return [];
}
}
/**
* 获取所有checkbox选择的行数据
*/
public getCheckedData<U>(): U[] {
return this.getChecked((s) => s.data);
}
/**
* 获取所有checkbox选择的行,并对其进行转换
*/
public getChecked<U>(hand: (value: RowNode) => U): U[] {
const nodes: RowNode[] = this.api.getSelectedNodes();
return nodes.map(hand);
}
/**
* 重置表单
*/
resetForm(): void {
this.form.reset();
}
get pageViewTmpl(): TemplateRef<any> {
return this.customPageView ? this.customPageView : this.defaultPageTmpl;
}
// ================================== 数据绑定 ====================================
toggleFullscreen(): void {
this.setFullscreen(!this.fullscreen);
}
setFullscreen(fullscreen: boolean): void {
this.fullscreen = fullscreen;
this.fullscreenChange.emit(this.fullscreen);
}
setPageIndex(pageIndex: number, emit: boolean): void {
this.pageIndex = pageIndex;
if (emit) {
this.pageIndexChange.emit(pageIndex);
}
}
setPageSize(pageSize: number, emit: boolean): void {
this.pageSize = pageSize;
if (emit) {
this.pageSizeChange.emit(pageSize);
}
}
private setDataLoading(loading: boolean): void {
| oading = loading;
this.dataLoadingChange.emit(this.dataLoading);
}
toggleDataModel(): void {
if ('pageable' === this.dataLoadModel) {
this.setDataMode('infinite');
} else {
this.setDataMode('pageable');
}
}
setDataMode(model: 'pageable' | 'infinite'): void {
this.dataLoadModel = model;
repairRowModeType(this.gridOptions, this.dataLoadModel);
// TODO 刷新表格
this.repaint();
this.dataLoadModelChange.emit(this.dataLoadModel);
}
repaint(): void {
this.__show__ = false;
setTimeout(() => (this.__show__ = true), 200);
}
}
| this.dataL | identifier_name |
ngx-grid-table.component.ts | import { ColumnApi, GridApi, GridOptions, GridReadyEvent, IServerSideGetRowsParams, RowNode } from '@ag-grid-community/core';
import { CellRange } from '@ag-grid-community/core/dist/cjs/interfaces/iRangeController';
import { IServerSideDatasource, IServerSideGetRowsRequest } from '@ag-grid-community/core/dist/cjs/interfaces/iServerSideDatasource';
import { AllModules, ExcelCell } from '@ag-grid-enterprise/all-modules';
import { Component, EventEmitter, Input, OnDestroy, OnInit, Output, TemplateRef, ViewChild } from '@angular/core';
import { Column } from '@ag-grid-community/core/dist/cjs/entities/column';
import { ColumnVO } from '@ag-grid-community/core/dist/cjs/interfaces/iColumnVO';
import { ActivatedRoute, Router } from '@angular/router';
import { ReuseTabService } from '@delon/abc/reuse-tab';
import { ACLService } from '@delon/acl';
import { SFSchema } from '@delon/form';
import { _HttpClient } from '@delon/theme';
import { TranslateService } from '@ngx-translate/core';
import { NzMessageService } from 'ng-zorro-antd/message';
import { NzModalService } from 'ng-zorro-antd/modal';
import { Observable, of, range, Subject } from 'rxjs';
import { catchError, filter, map, merge, mergeMap, pluck, reduce, skip, take, takeUntil, tap } from 'rxjs/operators';
import { Console } from '../../../utils/console';
import { IFilter } from '../../filter-input/filter.types';
// 不要使用import {Console} from '@shared',防止循环引用
import {
buildColACL,
buildFrameworkComponents,
buildMenus,
buildOptionField,
buildResizable,
buildSideBar,
buildStatusBar,
buildTreeDataCfg,
clientSideAsRowQuery,
initGridOptions,
repairRowModeType,
reuseTabFix,
serverSideAsRowQuery,
} from '../ngx-grid-functions';
import { NgxGridTableConstants } from '../ngx-grid-table-constants';
import {
ApiGetter,
GridStatistics,
IGridDataSource,
IPage,
IRowQuery,
MenuItem,
PaginationCfg,
TreeDataCfg,
} from '../ngx-grid-table-model';
import { SfQueryFormComponent } from '../sf-query-form/sf-query-form.component';
/**
* 表格提供 分页 和 无限 两种模式。
* 分页:使用client-side模式来处理。
* 无限:使用server-side模式来处理。
*
*/
@Component({
selector: 'ngx-grid-table',
templateUrl: './ngx-grid-table.component.html',
styleUrls: ['./ngx-grid-table.component.scss'],
})
export class NgxGridTableComponent implements OnInit, OnDestroy {
// =============================== 表格内部参数 =========================
__show__ = true;
/** 添加菜单 */
private additionMenu: Array<MenuItem> = [];
/** 销毁 */
private destroy$ = new Subject();
/** 是否为树状数据 */
private treeData: false | TreeDataCfg = false;
allModules = AllModules;
/** 表格初始化后的 api 对象,同于控制表格行为 */
api!: GridApi;
/** 表格初始化后的 ColumnApi对象,用于控制表列行为 */
columnApi!: ColumnApi;
/** 是否正在加载数据中 */
dataLoading = false;
/** 加载进度条 */
loadProgressPercent = 0;
/** 统计数据 */
statistics!: Array<GridStatistics>;
/** 当前页对象 */
cur_page!: IPage<any>;
/** 表格页面所在的url */
currentUrl: string;
private haveInit = false;
// ================================== 基本配置(外部) =============================
/** 是否为全屏状态 */
@Input() fullscreen = false;
/** 当前页码 */
@Input() pageIndex = 1;
/** 页大小 */
@Input() pageSize = 20;
/** 表格基础配置 */
@Input() gridOptions!: GridOptions;
/** 表格基础配置 */
@Input() colACLTmpl!: string;
/** 表格主题 */
@Input() gridTheme = 'ag-theme-balham';
/** 表格CSS */
@Input() gridTableClass = [];
/** 数据表格样式 */
@Input() gridTableStyle: { [key: string]: any } = { width: '100%', height: '70%' };
/** 是否在表格初始化后立即执行一次查询 */
@Input() initLoadData = true;
/** 分页还是无限 */
@Input() dataLoadModel: 'pageable' | 'infinite' = 'pageable';
/** 是否显示分页控件 */
@Input() showPagination: false | PaginationCfg = {};
/** 是否显示默认状态栏, 展示用户选中项状态数据 */
@Input() defaultStatusBar = false;
/** 单行还是多行 */
@Input() rowSelection: undefined | 'single' | 'multiple' = undefined;
/** 是否显示统计 */
@Input() showStatistics = false;
// /** 是否展示删除菜单 */
// @Input() deleteMenu = false;
/** 默认是否可以改变列宽 */
@Input() resizable = false;
/** 分组时checkbox是否影响自己的子项 */
@Input() groupSelectsChildren = true;
/** 操作列模板 */
@Input() optionCell!: TemplateRef<any>;
/** 数据源 */
@Input() dataSource!: IGridDataSource<any>;
/** 表单schema */
@Input() searchSchema!: SFSchema;
/** 初始表单数据 */
@Input() initFormData!: any;
@Input() customPageView!: TemplateRef<any>;
@Input() filterHand!: (filters: IFilter[], form: SfQueryFormComponent) => IFilter[];
@Input() topToolPanel!: TemplateRef<any>;
@Input() bottomToolPanel!: TemplateRef<any>;
// ============================== 事件 ============================
@Output() fullscreenChange = new EventEmitter<boolean>();
@Output() pageIndexChange = new EventEmitter<number>();
@Output() pageSizeChange = new EventEmitter<number>();
/** 表格就绪事件 */
@Output() gridReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
@Output() gridReLoadReady = new EventEmitter<{ event: GridReadyEvent; gridTable: NgxGridTableComponent }>();
/** 删除事件 */
@Output() deleted = new EventEmitter<any>();
@Output() dataLoadingChange = new EventEmitter<boolean>();
@Output() dataLoadModelChange = new EventEmitter<'pageable' | 'infinite'>();
// ============================= 组件 =======================
@ViewChild(SfQueryFormComponent) form!: SfQueryFormComponent;
@ViewChild('defaultPageTmpl') defaultPageTmpl!: TemplateRef<any>;
@ViewChild('progressTmpl') progressTmpl!: TemplateRef<any>;
constructor(
private translateService: TranslateService,
private msg: NzMessageService,
private modal: NzModalService,
private reuseTabService: ReuseTabService,
private activatedRoute: ActivatedRoute,
private router: Router,
private http: _HttpClient,
private aclService: ACLService,
) {
this.currentUrl = this.router.url;
}
ngOnInit(): void {
if (!this.haveInit) {
this.initGridOptions();
}
}
private initGridOptions() {
// api 获取方法,用于给functions传递api对象
const apiGetter: ApiGetter = { get: () => ({ api: this.api, columnApi: this.columnApi }) };
buildFrameworkComponents(this.gridOptions);
// 构建菜单
buildMenus(
this.gridOptions,
this.translateService,
this.aclService,
this.additionMenu,
// this.deleteMenu,
this.destroy$,
apiGetter,
() => this.getSelectionData(),
// () => this.doDelete(),
);
buildTreeDataCfg(this.gridOptions, this.treeData);
buildStatusBar(this.gridOptions, this.defaultStatusBar);
buildSideBar(this.gridOptions) | i.autoSizeColumn('_action');
// }
// });
if (this.initLoadData) {
this.refresh();
}
}
private filters(): IFilter[] {
let filters = [] as IFilter[];
if (this.form) {
filters = [...this.form.filter];
}
if (this.filterHand) {
filters = [...this.filterHand(filters, this.form)];
}
return filters;
}
private infiniteDataSource(): IServerSideDatasource {
const getRows = (params: IServerSideGetRowsParams) => {
const rowQuery = serverSideAsRowQuery(params, this.treeData, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.setDataLoading(true);
this.api.showLoadingOverlay();
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
return of({} as IPage<any>);
}),
)
.subscribe((resultPage) => {
if (resultPage.records) {
params.successCallback(resultPage.records, resultPage.total);
this.statistics = resultPage.statistics || [];
this.api.hideOverlay();
} else {
this.api.showNoRowsOverlay();
params.failCallback();
}
this.setDataLoading(false);
});
};
return { getRows };
}
// private doDelete(): void {
// const data: any[] = this.getCheckedData();
// if (!data.length) {
// return;
// }
// let mapper: (v: any) => string;
// if (typeof this.gridOptions.getRowNodeId === 'undefined') {
// if (typeof data[0].id !== 'undefined') {
// mapper = (v) => v.id;
// } else {
// console.warn(
// '删除操作无法获取键,默认情况下将获取id作为键,如果没有id字段或不希望使用id作为删除键,请配置 gridOptions.getRowNodeId',
// );
// return;
// }
// } else {
// mapper = this.gridOptions.getRowNodeId;
// }
//
// const ids: string[] = data.map(mapper);
//
// this.http.delete(`/api/${this.currentUrl}/deleteByKeys`, { keys: ids }).subscribe((value) => {
// this.deleted.emit(value);
// });
// }
/**
* 查询
*/
query(pageNum: number, pageSize: number): void {
this.api.clearRangeSelection();
this.api.deselectAll();
if (this.dataLoadModel !== 'pageable') {
console.warn('pageable 模式才能进行分页查询');
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, pageNum, pageSize, this.filters());
Console.collapse('grid-table.component getData', 'indigoBg', 'queryParams', 'indigoOutline');
console.log(rowQuery);
console.groupEnd();
this.api.showLoadingOverlay();
this.setDataLoading(true);
this.dataSource(rowQuery)
.pipe(
takeUntil(this.destroy$),
catchError((err) => {
Console.collapse('grid-table.component RefreshRowsData', 'redBg', 'ERROR', 'redOutline');
console.log(err);
console.groupEnd();
return of({
total: 0,
records: [],
size: rowQuery.pageSize,
current: rowQuery.pageNum,
statistics: [],
} as IPage<any>);
}),
)
.subscribe((resultPage) => {
this.cur_page = resultPage;
this.api.setRowData(resultPage.records);
this.statistics = resultPage.statistics || [];
if (!resultPage.records.length) {
this.api.showNoRowsOverlay();
} else {
this.api.hideOverlay();
}
this.setDataLoading(false);
});
}
onPageSizeChange(size: number): void {
this.setPageSize(size, true);
this.query(1, size);
}
onPageIndexChange(idx: number): void {
this.setPageIndex(idx, true);
this.query(idx, this.pageSize);
}
/**
*
* @param maxRequestCount 最大并发查询数
* @param pageSize 每次查询条数,默认采用当前分页大小
*/
exportAllPageData(maxRequestCount: number = 3, pageSize?: number): void {
const initPageNum = 1;
pageSize = pageSize || this.pageSize;
if (this.dataLoadModel !== 'pageable') {
// 暂时只支持客户端模式导出全部数据,因为服务端模式下由于缓存大小问题setRowData方法无效
console.warn('pageable 模式才能前端导出!');
// 导出当前缓存的数据
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter() });
return;
}
const rowQuery: IRowQuery = clientSideAsRowQuery(this.api, this.columnApi, initPageNum, pageSize, this.filters());
const params = { percent: 0, status: 'active' };
const confirm = this.modal.confirm({
nzContent: this.progressTmpl,
nzTitle: this.translateService.instant('grid.export.confirm'),
nzComponentParams: params,
nzOkText: this.translateService.instant('grid.export.start'),
nzOnOk: () => {
params.percent = 0.01;
params.status = 'active';
confirm.updateConfig({
nzTitle: undefined,
nzContent: this.progressTmpl,
nzComponentParams: params,
nzOkText: null,
nzClosable: false,
nzCloseIcon: undefined,
nzIconType: undefined,
nzMaskClosable: false,
nzCancelText: null,
});
let statisticsFooter: Array<GridStatistics> = [];
return this.dataSource(rowQuery)
.pipe(
mergeMap((page: IPage<any>) => {
const { total, size, current, records, statistics } = page;
statisticsFooter = statistics || [];
const totalPage = Math.ceil(total / size);
if (totalPage > current) {
const step = parseFloat((100 / totalPage).toFixed(2));
params.percent = step;
return range(current + 1, totalPage).pipe(
mergeMap((index: number) => {
return this.dataSource(Object.assign({}, rowQuery, { pageNum: index }));
}, maxRequestCount),
pluck('records'),
tap((next) => {
params.percent = parseFloat((params.percent + step).toFixed(2));
}),
reduce((acc, val) => acc.concat(val || []), records),
);
} else {
return of([]);
}
}),
)
.toPromise()
.then((next) => {
params.status = '';
params.percent = 100;
this.api.setRowData(next);
this.api.exportDataAsExcel({ customFooter: this.exportStatisticsFooter(statisticsFooter) });
this.refresh();
return true;
})
.catch((err) => {
Console.collapse('grid-table.component ExportAllPageData', 'redBg', 'ERROR', 'redOutline');
console.error(err);
console.groupEnd();
params.status = 'exception';
confirm.updateConfig({
nzCancelText: undefined,
nzOkText: this.translateService.instant('grid.export.retry'),
});
return false;
});
},
});
}
private exportStatisticsFooter(statistics?: Array<GridStatistics>): ExcelCell[][] {
const footers: Array<Array<ExcelCell>> = [[]];
statistics = statistics || this.statistics;
if (this.showStatistics && statistics && statistics.length) {
const footer = statistics
.filter((items) => items.skipExport !== true)
.map((items) => {
const rows: Array<ExcelCell> = [];
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: items.label || '',
},
});
items.fields.forEach((item) => {
rows.push({
styleId: 'bigHeader',
data: {
type: 'String',
value: `${item.label}:${item.value}`,
},
} as ExcelCell);
});
return rows;
});
footers.push(...footer);
}
return footers;
}
ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public setData(data: IPage<any>): void {
if (this.dataLoadModel === 'pageable') {
this.cur_page = data;
this.setPageIndex(this.cur_page.current, false);
this.setPageSize(this.cur_page.size, false);
this.api.setRowData(this.cur_page.records);
} else {
console.warn('只有 tableModel === ‘pageable’ 才允许直接存值');
}
}
// 添加菜单
public addMenu(item: MenuItem): void {
this.additionMenu.push(item);
}
/**
* 刷新表格
*/
public refresh(): Observable<void> {
if (this.dataLoadModel === 'pageable') {
this.query(this.pageIndex, this.pageSize);
} else {
this.api.purgeServerSideCache();
}
// 当loading状态变更为false的时候,可以视为本次数据加载已经完成,返回这个Observable,供其他业务订阅
return this.dataLoadingChange.asObservable().pipe(
filter((status) => !status),
take(1),
map(() => {}),
);
}
/**
* 获取所有范围选中(range)的行数据
*/
public getSelectionData<U>(): U[] {
return this.getSelection((s) => s.data);
}
/**
* 获取所有范围选中(range)的行,并对其进行转换
*/
public getSelection<U>(hand: (value: RowNode) => U): U[] {
const range: CellRange = this.api.getCellRanges()[0];
const nodes: RowNode[] = this.api.getRenderedNodes();
if (range && range.startRow && range.endRow) {
const r = [];
for (let i = range.startRow.rowIndex; i <= range.endRow.rowIndex; i++) {
r.push(hand(nodes[i]));
}
return r;
} else {
return [];
}
}
/**
* 获取所有checkbox选择的行数据
*/
public getCheckedData<U>(): U[] {
return this.getChecked((s) => s.data);
}
/**
* 获取所有checkbox选择的行,并对其进行转换
*/
public getChecked<U>(hand: (value: RowNode) => U): U[] {
const nodes: RowNode[] = this.api.getSelectedNodes();
return nodes.map(hand);
}
/**
* 重置表单
*/
resetForm(): void {
this.form.reset();
}
get pageViewTmpl(): TemplateRef<any> {
return this.customPageView ? this.customPageView : this.defaultPageTmpl;
}
// ================================== 数据绑定 ====================================
toggleFullscreen(): void {
this.setFullscreen(!this.fullscreen);
}
setFullscreen(fullscreen: boolean): void {
this.fullscreen = fullscreen;
this.fullscreenChange.emit(this.fullscreen);
}
setPageIndex(pageIndex: number, emit: boolean): void {
this.pageIndex = pageIndex;
if (emit) {
this.pageIndexChange.emit(pageIndex);
}
}
setPageSize(pageSize: number, emit: boolean): void {
this.pageSize = pageSize;
if (emit) {
this.pageSizeChange.emit(pageSize);
}
}
private setDataLoading(loading: boolean): void {
this.dataLoading = loading;
this.dataLoadingChange.emit(this.dataLoading);
}
toggleDataModel(): void {
if ('pageable' === this.dataLoadModel) {
this.setDataMode('infinite');
} else {
this.setDataMode('pageable');
}
}
setDataMode(model: 'pageable' | 'infinite'): void {
this.dataLoadModel = model;
repairRowModeType(this.gridOptions, this.dataLoadModel);
// TODO 刷新表格
this.repaint();
this.dataLoadModelChange.emit(this.dataLoadModel);
}
repaint(): void {
this.__show__ = false;
setTimeout(() => (this.__show__ = true), 200);
}
}
| ;
buildResizable(this.gridOptions, this.resizable);
buildOptionField(this.gridOptions, this.optionCell);
reuseTabFix(this.router, this.currentUrl, this.destroy$, apiGetter);
repairRowModeType(this.gridOptions, this.dataLoadModel);
buildColACL(this.gridOptions, this.aclService, this.colACLTmpl);
if (this.showPagination !== false) {
this.showPagination = {
...NgxGridTableConstants.DEFAULT_PAGINATION,
...this.showPagination,
};
}
this.gridOptions = initGridOptions(this.gridOptions, this.rowSelection!, (event) => this.onGridReady(event));
}
private onGridReady(event: GridReadyEvent): void {
this.api = event.api;
this.columnApi = event.columnApi;
if (this.dataLoadModel === 'infinite') {
this.api.setServerSideDatasource(this.infiniteDataSource());
}
if (this.haveInit) {
this.gridReLoadReady.emit({ event, gridTable: this });
} else {
this.gridReady.emit({ event, gridTable: this });
this.haveInit = true;
}
// 当网格数据就绪时
// this.api.addEventListener('firstDataRendered', () => {
// this.firstDataRenderedTime = new Date().getTime();
// if (cellActionColumnDefs.length) {
// this.columnAp | identifier_body |
normalizations.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers."""
from typing import List, Optional, Tuple
import jax
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import pytypes
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
weight_params = py_utils.weight_params
InstantiableParams = py_utils.InstantiableParams
JTensor = pytypes.JTensor
def compute_moments(
inputs: JTensor,
padding: JTensor,
reduce_over_dims: List[int],
enable_cross_replica_sum_on_tpu: bool = False,
keepdims: bool = False,
) -> Tuple[JTensor, JTensor]:
"""Computes mean and variance over the valid data points in inputs."""
assert inputs.ndim == padding.ndim
rank = inputs.ndim
assert all([0 <= dim < rank for dim in reduce_over_dims])
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=keepdims)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_v = jax.lax.psum(sum_v, axis_name='batch')
count_v = jax.lax.psum(count_v, axis_name='batch')
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = jnp.sum(
(inputs - mean) * (inputs - mean) * mask,
axis=reduce_over_dims,
keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_vv = jax.lax.psum(sum_vv, axis_name='batch')
variance = sum_vv / count_v
return mean, variance
class BatchNorm(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', False,
'If true, computes global mean and variance across all replicas.'
'Only effective for tpu.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step. When this is'
' set to True, it also disables the use of beta and gamma variables.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
self._epsilon = 0.001
self._decay = p.decay
def _get_weight_shape(self) -> JTensor:
return [self.params.dim]
def create_layer_variables(self) -> None:
p = self.params
beta_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('beta', beta_pc)
# gamma = theta.gamma + 1.0
gamma_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('gamma', gamma_pc)
mva = weight_params(
shape=[p.dim],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_mean', mva, trainable=False)
mvv = weight_params(
shape=[p.dim],
init=WeightInit.Constant(1.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_variance', mvv, trainable=False)
def _get_default_paddings(self, inputs: JTensor) -> JTensor:
"""Gets the default paddings for an input."""
in_shape = list(inputs.shape)
assert len(in_shape) > 1
in_shape[-1] = 1
return jnp.zeros(in_shape, dtype=inputs.dtype)
def _get_beta_gamma(self, theta: NestedMap) -> Tuple[JTensor, JTensor]:
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma + 1.0
return beta, gamma
def compute_and_update_moments(
self, theta: NestedMap, inputs: JTensor,
paddings: JTensor) -> Tuple[JTensor, JTensor, JTensor, JTensor]:
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1], with the same rank as
the input JTensor.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if self.do_eval:
# The mean and variance used for normalization.
norm_mean, norm_variance = theta.moving_mean, theta.moving_variance
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
else:
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
mean, variance = compute_moments(
inputs,
paddings,
reduce_over_dims,
enable_cross_replica_sum_on_tpu=p.enable_cross_replica_sum_on_tpu,
keepdims=True)
new_moving_mean = theta.moving_mean * p.decay + mean * (1.0 - p.decay)
self.forward_update_var('moving_mean', new_moving_mean)
new_moving_variance = (
theta.moving_variance * p.decay + variance * (1.0 - p.decay))
self.forward_update_var('moving_variance', new_moving_variance)
# Add some summaries for visualization.
base_layer.add_summary('mean', mean)
base_layer.add_summary('variance', variance)
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
|
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
beta, gamma = self._get_beta_gamma(theta)
return norm_mean, norm_variance, beta, gamma
def fprop(self,
theta: NestedMap,
inputs: JTensor,
paddings: Optional[JTensor] = None) -> JTensor:
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
inputs, paddings = self._cast_to_fprop_dtype((inputs, paddings))
if paddings is None:
paddings = self._get_default_paddings(inputs)
assert inputs.ndim == paddings.ndim
assert paddings.shape[-1] == 1
norm_mean, norm_variance, beta, gamma = self.compute_and_update_moments(
theta, inputs, paddings)
inv = gamma / jnp.sqrt(norm_variance + self._epsilon)
bn_output = (inputs - norm_mean) * inv + beta
if p.set_padded_output_to_zero:
bn_output *= 1.0 - paddings
return bn_output
class LayerNorm(base_layer.BaseLayer):
"""Layer normalization."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('input_dims', 0, 'Depth of the input to the network.')
p.Define('epsilon', 1e-6, 'Tiny value to guard rsqrt.')
p.Define('scale', True, 'Whether to use a learned scaling.')
p.Define('bias', True, 'Whether to use bias.')
return p
def create_layer_variables(self) -> None:
super().create_layer_variables()
p = self.params
wp = p.weight_split_dims_mapping
wp_scale = wp.wt
if p.device_mesh is not None and wp.wt is None:
# Simply replicate the weights.
wp_scale = [-1]
if p.scale:
self.create_variable(
'scale',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_scale))
if p.bias:
wp_bias = wp_scale # bias should use the same sharding as scale.
self.create_variable(
'bias',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_bias))
def fprop(self, theta: NestedMap, inputs: JTensor) -> JTensor:
"""Apply layer norm to inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs JTensor. Shaped [..., input_dims].
Returns:
Layer normalized input.
"""
p = self.params
mean = jnp.mean(inputs, axis=[-1], keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=[-1], keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.params.epsilon)
if p.scale:
normed_inputs *= (1 + theta.scale)
if p.bias:
normed_inputs += theta.bias
return normed_inputs
| norm_mean = theta.moving_mean
norm_variance = theta.moving_variance | conditional_block |
normalizations.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers."""
from typing import List, Optional, Tuple
import jax
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import pytypes
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
weight_params = py_utils.weight_params
InstantiableParams = py_utils.InstantiableParams
JTensor = pytypes.JTensor
def compute_moments(
inputs: JTensor,
padding: JTensor,
reduce_over_dims: List[int],
enable_cross_replica_sum_on_tpu: bool = False,
keepdims: bool = False,
) -> Tuple[JTensor, JTensor]:
|
class BatchNorm(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', False,
'If true, computes global mean and variance across all replicas.'
'Only effective for tpu.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step. When this is'
' set to True, it also disables the use of beta and gamma variables.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
self._epsilon = 0.001
self._decay = p.decay
def _get_weight_shape(self) -> JTensor:
return [self.params.dim]
def create_layer_variables(self) -> None:
p = self.params
beta_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('beta', beta_pc)
# gamma = theta.gamma + 1.0
gamma_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('gamma', gamma_pc)
mva = weight_params(
shape=[p.dim],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_mean', mva, trainable=False)
mvv = weight_params(
shape=[p.dim],
init=WeightInit.Constant(1.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_variance', mvv, trainable=False)
def _get_default_paddings(self, inputs: JTensor) -> JTensor:
"""Gets the default paddings for an input."""
in_shape = list(inputs.shape)
assert len(in_shape) > 1
in_shape[-1] = 1
return jnp.zeros(in_shape, dtype=inputs.dtype)
def _get_beta_gamma(self, theta: NestedMap) -> Tuple[JTensor, JTensor]:
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma + 1.0
return beta, gamma
def compute_and_update_moments(
self, theta: NestedMap, inputs: JTensor,
paddings: JTensor) -> Tuple[JTensor, JTensor, JTensor, JTensor]:
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1], with the same rank as
the input JTensor.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if self.do_eval:
# The mean and variance used for normalization.
norm_mean, norm_variance = theta.moving_mean, theta.moving_variance
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
else:
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
mean, variance = compute_moments(
inputs,
paddings,
reduce_over_dims,
enable_cross_replica_sum_on_tpu=p.enable_cross_replica_sum_on_tpu,
keepdims=True)
new_moving_mean = theta.moving_mean * p.decay + mean * (1.0 - p.decay)
self.forward_update_var('moving_mean', new_moving_mean)
new_moving_variance = (
theta.moving_variance * p.decay + variance * (1.0 - p.decay))
self.forward_update_var('moving_variance', new_moving_variance)
# Add some summaries for visualization.
base_layer.add_summary('mean', mean)
base_layer.add_summary('variance', variance)
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
norm_mean = theta.moving_mean
norm_variance = theta.moving_variance
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
beta, gamma = self._get_beta_gamma(theta)
return norm_mean, norm_variance, beta, gamma
def fprop(self,
theta: NestedMap,
inputs: JTensor,
paddings: Optional[JTensor] = None) -> JTensor:
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
inputs, paddings = self._cast_to_fprop_dtype((inputs, paddings))
if paddings is None:
paddings = self._get_default_paddings(inputs)
assert inputs.ndim == paddings.ndim
assert paddings.shape[-1] == 1
norm_mean, norm_variance, beta, gamma = self.compute_and_update_moments(
theta, inputs, paddings)
inv = gamma / jnp.sqrt(norm_variance + self._epsilon)
bn_output = (inputs - norm_mean) * inv + beta
if p.set_padded_output_to_zero:
bn_output *= 1.0 - paddings
return bn_output
class LayerNorm(base_layer.BaseLayer):
"""Layer normalization."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('input_dims', 0, 'Depth of the input to the network.')
p.Define('epsilon', 1e-6, 'Tiny value to guard rsqrt.')
p.Define('scale', True, 'Whether to use a learned scaling.')
p.Define('bias', True, 'Whether to use bias.')
return p
def create_layer_variables(self) -> None:
super().create_layer_variables()
p = self.params
wp = p.weight_split_dims_mapping
wp_scale = wp.wt
if p.device_mesh is not None and wp.wt is None:
# Simply replicate the weights.
wp_scale = [-1]
if p.scale:
self.create_variable(
'scale',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_scale))
if p.bias:
wp_bias = wp_scale # bias should use the same sharding as scale.
self.create_variable(
'bias',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_bias))
def fprop(self, theta: NestedMap, inputs: JTensor) -> JTensor:
"""Apply layer norm to inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs JTensor. Shaped [..., input_dims].
Returns:
Layer normalized input.
"""
p = self.params
mean = jnp.mean(inputs, axis=[-1], keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=[-1], keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.params.epsilon)
if p.scale:
normed_inputs *= (1 + theta.scale)
if p.bias:
normed_inputs += theta.bias
return normed_inputs
| """Computes mean and variance over the valid data points in inputs."""
assert inputs.ndim == padding.ndim
rank = inputs.ndim
assert all([0 <= dim < rank for dim in reduce_over_dims])
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=keepdims)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_v = jax.lax.psum(sum_v, axis_name='batch')
count_v = jax.lax.psum(count_v, axis_name='batch')
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = jnp.sum(
(inputs - mean) * (inputs - mean) * mask,
axis=reduce_over_dims,
keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_vv = jax.lax.psum(sum_vv, axis_name='batch')
variance = sum_vv / count_v
return mean, variance | identifier_body |
normalizations.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers."""
from typing import List, Optional, Tuple
import jax
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import pytypes
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
weight_params = py_utils.weight_params
InstantiableParams = py_utils.InstantiableParams
JTensor = pytypes.JTensor
def compute_moments(
inputs: JTensor,
padding: JTensor,
reduce_over_dims: List[int],
enable_cross_replica_sum_on_tpu: bool = False,
keepdims: bool = False,
) -> Tuple[JTensor, JTensor]:
"""Computes mean and variance over the valid data points in inputs."""
assert inputs.ndim == padding.ndim
rank = inputs.ndim
assert all([0 <= dim < rank for dim in reduce_over_dims])
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=keepdims)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_v = jax.lax.psum(sum_v, axis_name='batch')
count_v = jax.lax.psum(count_v, axis_name='batch')
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = jnp.sum(
(inputs - mean) * (inputs - mean) * mask,
axis=reduce_over_dims,
keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_vv = jax.lax.psum(sum_vv, axis_name='batch')
variance = sum_vv / count_v
return mean, variance
class BatchNorm(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', False,
'If true, computes global mean and variance across all replicas.'
'Only effective for tpu.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step. When this is'
' set to True, it also disables the use of beta and gamma variables.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
self._epsilon = 0.001
self._decay = p.decay
def _get_weight_shape(self) -> JTensor:
return [self.params.dim]
def create_layer_variables(self) -> None:
p = self.params
beta_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('beta', beta_pc)
# gamma = theta.gamma + 1.0
gamma_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('gamma', gamma_pc)
mva = weight_params(
shape=[p.dim],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_mean', mva, trainable=False)
mvv = weight_params(
shape=[p.dim],
init=WeightInit.Constant(1.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_variance', mvv, trainable=False)
def _get_default_paddings(self, inputs: JTensor) -> JTensor:
"""Gets the default paddings for an input."""
in_shape = list(inputs.shape)
assert len(in_shape) > 1
in_shape[-1] = 1
return jnp.zeros(in_shape, dtype=inputs.dtype)
def _get_beta_gamma(self, theta: NestedMap) -> Tuple[JTensor, JTensor]:
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma + 1.0
return beta, gamma
def compute_and_update_moments(
self, theta: NestedMap, inputs: JTensor,
paddings: JTensor) -> Tuple[JTensor, JTensor, JTensor, JTensor]:
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1], with the same rank as
the input JTensor.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if self.do_eval:
# The mean and variance used for normalization.
norm_mean, norm_variance = theta.moving_mean, theta.moving_variance
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
else:
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
mean, variance = compute_moments(
inputs,
paddings,
reduce_over_dims,
enable_cross_replica_sum_on_tpu=p.enable_cross_replica_sum_on_tpu,
keepdims=True)
new_moving_mean = theta.moving_mean * p.decay + mean * (1.0 - p.decay)
self.forward_update_var('moving_mean', new_moving_mean)
new_moving_variance = (
theta.moving_variance * p.decay + variance * (1.0 - p.decay))
self.forward_update_var('moving_variance', new_moving_variance)
# Add some summaries for visualization.
base_layer.add_summary('mean', mean)
base_layer.add_summary('variance', variance)
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
norm_mean = theta.moving_mean
norm_variance = theta.moving_variance
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
beta, gamma = self._get_beta_gamma(theta)
return norm_mean, norm_variance, beta, gamma
def fprop(self,
theta: NestedMap,
inputs: JTensor,
paddings: Optional[JTensor] = None) -> JTensor:
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim]. | """
p = self.params
inputs, paddings = self._cast_to_fprop_dtype((inputs, paddings))
if paddings is None:
paddings = self._get_default_paddings(inputs)
assert inputs.ndim == paddings.ndim
assert paddings.shape[-1] == 1
norm_mean, norm_variance, beta, gamma = self.compute_and_update_moments(
theta, inputs, paddings)
inv = gamma / jnp.sqrt(norm_variance + self._epsilon)
bn_output = (inputs - norm_mean) * inv + beta
if p.set_padded_output_to_zero:
bn_output *= 1.0 - paddings
return bn_output
class LayerNorm(base_layer.BaseLayer):
"""Layer normalization."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('input_dims', 0, 'Depth of the input to the network.')
p.Define('epsilon', 1e-6, 'Tiny value to guard rsqrt.')
p.Define('scale', True, 'Whether to use a learned scaling.')
p.Define('bias', True, 'Whether to use bias.')
return p
def create_layer_variables(self) -> None:
super().create_layer_variables()
p = self.params
wp = p.weight_split_dims_mapping
wp_scale = wp.wt
if p.device_mesh is not None and wp.wt is None:
# Simply replicate the weights.
wp_scale = [-1]
if p.scale:
self.create_variable(
'scale',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_scale))
if p.bias:
wp_bias = wp_scale # bias should use the same sharding as scale.
self.create_variable(
'bias',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_bias))
def fprop(self, theta: NestedMap, inputs: JTensor) -> JTensor:
"""Apply layer norm to inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs JTensor. Shaped [..., input_dims].
Returns:
Layer normalized input.
"""
p = self.params
mean = jnp.mean(inputs, axis=[-1], keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=[-1], keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.params.epsilon)
if p.scale:
normed_inputs *= (1 + theta.scale)
if p.bias:
normed_inputs += theta.bias
return normed_inputs | paddings: The paddings JTensor. Shaped [..., 1].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'. | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.