file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ExportGltf.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Bentley Systems, Incorporated. All rights reserved.
* See LICENSE.md in the project root for license terms and full copyright notice.
*--------------------------------------------------------------------------------------------*/
import * as fs from "fs";
import * as path from "path";
import * as yargs from "yargs";
import { DbResult, Id64Array, Id64String, Logger, LogLevel } from "@bentley/bentleyjs-core";
import { Angle, Geometry, Matrix3d } from "@bentley/geometry-core";
import {
ECSqlStatement, ExportGraphics, ExportGraphicsInfo, ExportGraphicsLines, ExportGraphicsMesh, ExportLinesInfo, ExportPartInfo,
ExportPartInstanceInfo, ExportPartLinesInfo, IModelHost, SnapshotDb, Texture,
} from "@bentley/imodeljs-backend";
import { ColorDef, ImageSourceFormat } from "@bentley/imodeljs-common";
const CHORD_TOL = 0.001;
const ANGLE_TOL = Angle.degreesToRadians(45);
const MIN_BREP_SIZE = 0.01;
class GltfGlobals {
public static iModel: SnapshotDb;
public static gltf: Gltf;
public static binFile: number;
public static texturesDir: string;
public static binBytesWritten: number;
public static colorToMaterialMap: Map<number, number>;
public static textureToMaterialMap: Map<Id64String, number>;
public static initialize(iModelName: string, gltfName: string) {
GltfGlobals.iModel = SnapshotDb.openFile(iModelName);
process.stdout.write(`Opened ${iModelName} successfully...\n`);
const gltfPathParts = path.parse(gltfName);
const binName = `${gltfPathParts.name}.bin`;
GltfGlobals.binFile = fs.openSync(path.join(gltfPathParts.dir, binName), "w");
GltfGlobals.texturesDir = gltfPathParts.dir;
process.stdout.write(`Writing to ${gltfName} and ${binName}...\n`);
GltfGlobals.gltf = {
accessors: [],
asset: {
generator: "iModel.js export-gltf",
version: "2.0",
},
buffers: [{ uri: binName, byteLength: 0 }],
bufferViews: [],
materials: [],
meshes: [],
nodes: [],
scenes: [{ nodes: [] }],
};
GltfGlobals.binBytesWritten = 0;
GltfGlobals.colorToMaterialMap = new Map<number, number>();
GltfGlobals.textureToMaterialMap = new Map<Id64String, number>();
}
}
function findOrAddMaterialIndexForTexture(textureId: Id64String): number {
let result = GltfGlobals.textureToMaterialMap.get(textureId);
if (result !== undefined) return result;
// glTF-Validator complains if textures/images are defined but empty - wait for texture to define.
if (GltfGlobals.gltf.textures === undefined) {
GltfGlobals.gltf.textures = [];
GltfGlobals.gltf.images = [];
GltfGlobals.gltf.samplers = [{}]; // Just use default sampler values
}
const textureInfo = GltfGlobals.iModel.elements.getElement<Texture>(textureId);
const textureName = textureId + (textureInfo.format === ImageSourceFormat.Jpeg ? ".jpg" : ".png");
const texturePath = path.join(GltfGlobals.texturesDir, textureName);
fs.writeFile(texturePath, textureInfo.data, () => { }); // async is fine
const texture: GltfTexture = { source: GltfGlobals.gltf.images!.length, sampler: 0 };
GltfGlobals.gltf.textures.push(texture);
GltfGlobals.gltf.images!.push({ uri: textureName });
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorTexture: { index: GltfGlobals.gltf.textures.length - 1 },
baseColorFactor: [1, 1, 1, 1],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.textureToMaterialMap.set(textureId, result);
return result;
}
function findOrAddMaterialIndexForColor(color: number): number {
let result = GltfGlobals.colorToMaterialMap.get(color);
if (result !== undefined) return result;
const rgb = ColorDef.getColors(color);
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorFactor: [rgb.r / 255, rgb.g / 255, rgb.b / 255, (255 - rgb.t) / 255],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
if (rgb.t > 10) material.alphaMode = "BLEND";
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.colorToMaterialMap.set(color, result);
return result;
}
function addMeshIndices(indices: Int32Array) {
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length,
byteOffset: 0,
componentType: AccessorComponentType.UInt32,
count: indices.length,
type: "SCALAR",
});
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ElementArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: indices.byteLength,
});
GltfGlobals.binBytesWritten += indices.byteLength;
fs.writeSync(GltfGlobals.binFile, indices);
}
function addMeshPointsAndNormals(points: Float64Array, normals: Float32Array) {
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(points.length);
for (let i = 0; i < points.length; i += 3)
convertPoint(outPoints, i, points[i], points[i + 1], points[i + 2]);
const outNormals = new Float32Array(normals.length);
for (let i = 0; i < normals.length; i += 3)
convertPoint(outNormals, i, normals[i], normals[i + 1], normals[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength + outNormals.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
fs.writeSync(GltfGlobals.binFile, outNormals);
GltfGlobals.binBytesWritten += outPoints.byteLength + outNormals.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: outPoints.byteLength,
componentType: AccessorComponentType.Float,
count: outNormals.length / 3,
type: "VEC3",
});
}
function addMeshParams(params: Float32Array) {
const outParams = new Float32Array(params.length);
for (let i = 0; i < params.length; i += 2) {
outParams[i] = params[i];
outParams[i + 1] = 1 - params[i + 1]; // Flip to match GLTF spec
}
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outParams.byteLength,
byteStride: 8,
});
fs.writeSync(GltfGlobals.binFile, outParams);
GltfGlobals.binBytesWritten += outParams.byteLength;
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outParams.length / 2,
type: "VEC2",
});
}
function addMesh(mesh: ExportGraphicsMesh, color: number, textureId?: Id64String) {
const material = textureId !== undefined ? findOrAddMaterialIndexForTexture(textureId) :
findOrAddMaterialIndexForColor(color);
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlTriangles,
material,
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
// eslint-disable-next-line @typescript-eslint/naming-convention
NORMAL: GltfGlobals.gltf.accessors.length + 2,
},
};
if (textureId !== undefined)
primitive.attributes.TEXCOORD_0 = GltfGlobals.gltf.accessors.length + 3;
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(mesh.indices);
addMeshPointsAndNormals(mesh.points, mesh.normals);
if (textureId !== undefined) addMeshParams(mesh.params);
}
function addMeshNode(name: string) {
GltfGlobals.gltf.scenes[0].nodes.push(GltfGlobals.gltf.nodes.length);
GltfGlobals.gltf.nodes.push({ name, mesh: GltfGlobals.gltf.meshes.length });
}
function addLines(lines: ExportGraphicsLines, color: number) {
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlLines,
material: findOrAddMaterialIndexForColor(color),
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
},
};
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(lines.indices);
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(lines.points.length);
for (let i = 0; i < outPoints.length; i += 3)
convertPoint(outPoints, i, lines.points[i], lines.points[i + 1], lines.points[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
GltfGlobals.binBytesWritten += outPoints.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
}
function exportElements(elementIdArray: Id64Array, partInstanceArray: ExportPartInstanceInfo[]) {
const onGraphics = (info: ExportGraphicsInfo) => {
addMeshNode(info.elementId);
addMesh(info.mesh, info.color, info.textureId);
};
const onLineGraphics = (info: ExportLinesInfo) => {
addMeshNode(info.elementId);
addLines(info.lines, info.color);
};
GltfGlobals.iModel.exportGraphics({
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
onGraphics,
onLineGraphics,
elementIdArray,
partInstanceArray,
});
}
function getInstancesByPart(instances: ExportPartInstanceInfo[]): Map<Id64String, ExportPartInstanceInfo[]> {
const partMap = new Map<Id64String, ExportPartInstanceInfo[]>();
for (const instance of instances) {
const instancesForThisPart = partMap.get(instance.partId);
if (instancesForThisPart !== undefined) instancesForThisPart.push(instance);
else partMap.set(instance.partId, [instance]);
}
return partMap;
}
function almostEqual(testValue: number, ...arrayValues: number[]): boolean {
for (const val of arrayValues) {
if (!Geometry.isAlmostEqualNumber(testValue, val)) return false;
}
return true;
}
// translation, rotation, scale only defined if different from GLTF default transforms
class | {
public readonly translation?: number[];
public readonly rotation?: number[];
public readonly scale?: number[];
constructor(xform?: Float64Array) {
if (xform === undefined) return;
if (!almostEqual(0, xform[3], xform[7], xform[11]))
this.translation = [xform[3], xform[11], -xform[7]]; // GLTF = RHS Y-up, iModel.js = RHS Z-up
// Uniform and positive scale guaranteed by exportGraphics
const xColumnMagnitude = Geometry.hypotenuseXYZ(xform[0], xform[4], xform[8]);
if (!almostEqual(1, xColumnMagnitude))
this.scale = [xColumnMagnitude, xColumnMagnitude, xColumnMagnitude];
const invScale = 1.0 / xColumnMagnitude;
const matrix = Matrix3d.createRowValues(
xform[0] * invScale, xform[1] * invScale, xform[2] * invScale,
xform[4] * invScale, xform[5] * invScale, xform[6] * invScale,
xform[8] * invScale, xform[9] * invScale, xform[10] * invScale);
if (!matrix.isIdentity) {
const q = matrix.toQuaternion();
this.rotation = [q.x, q.z, -q.y, -q.w]; // GLTF = RHS Y-up, iModel.js = RHS Z-up
}
}
}
function exportInstances(partInstanceArray: ExportPartInstanceInfo[]) {
const partMap: Map<Id64String, ExportPartInstanceInfo[]> = getInstancesByPart(partInstanceArray);
process.stdout.write(`Found ${partInstanceArray.length} instances for ${partMap.size} parts...\n`);
const onPartLineGraphics = (meshIndices: number[]) => (info: ExportPartLinesInfo) => {
meshIndices.push(GltfGlobals.gltf.meshes.length);
addLines(info.lines, info.color);
};
const onPartGraphics = (meshIndices: number[]) => (info: ExportPartInfo) => {
meshIndices.push(GltfGlobals.gltf.meshes.length);
addMesh(info.mesh, info.color, info.textureId);
};
const nodes: GltfNode[] = GltfGlobals.gltf.nodes;
const nodeIndices: number[] = GltfGlobals.gltf.scenes[0].nodes;
for (const instanceList of partMap.values()) {
const meshIndices: number[] = [];
const baseDisplayProps = instanceList[0].displayProps;
GltfGlobals.iModel.exportPartGraphics({
elementId: instanceList[0].partId,
displayProps: instanceList[0].displayProps,
onPartGraphics: onPartGraphics(meshIndices),
onPartLineGraphics: onPartLineGraphics(meshIndices),
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
});
for (const instance of instanceList) {
// It is legal for different GeometryPartInstances of the same GeometryPart to have different
// display properties. This can lead to different colors, materials or textures so an exporter
// that is concerned about matching the appearance of the original iModel should not reuse a
// GeometryPart exported with different display properties.
if (!ExportGraphics.arePartDisplayInfosEqual(baseDisplayProps, instance.displayProps))
process.stdout.write("Warning: GeometryPartInstances found using different display properties.\n");
const trs = new TranslationRotationScale(instance.transform);
for (const meshIndex of meshIndices) {
nodeIndices.push(nodes.length);
nodes.push({
mesh: meshIndex,
name: instance.partInstanceId,
rotation: trs.rotation,
scale: trs.scale,
translation: trs.translation,
});
}
}
}
}
interface ExportGltfArgs {
input: string;
output: string;
}
const exportGltfArgs: yargs.Arguments<ExportGltfArgs> = yargs
.usage("Usage: $0 --input [Snapshot iModel] --output [GLTF file]")
.string("input")
.alias("input", "i")
.demandOption(["input"])
.describe("input", "Path to the Snapshot iModel")
.string("output")
.alias("output", "o")
.demandOption(["output"])
.describe("output", "Path to the GLTF file that will be created")
.argv;
(async () => {
await IModelHost.startup();
Logger.initializeToConsole();
Logger.setLevelDefault(LogLevel.Warning);
GltfGlobals.initialize(exportGltfArgs.input, exportGltfArgs.output);
const elementIdArray: Id64Array = [];
const sql = "SELECT ECInstanceId FROM bis.GeometricElement3d";
GltfGlobals.iModel.withPreparedStatement(sql, (stmt: ECSqlStatement) => {
while (stmt.step() === DbResult.BE_SQLITE_ROW)
elementIdArray.push(stmt.getValue(0).getId());
});
process.stdout.write(`Found ${elementIdArray.length} 3D elements...\n`);
if (elementIdArray.length === 0) return;
const partInstanceArray: ExportPartInstanceInfo[] = [];
exportElements(elementIdArray, partInstanceArray);
exportInstances(partInstanceArray);
GltfGlobals.gltf.buffers[0].byteLength = GltfGlobals.binBytesWritten;
fs.writeFileSync(exportGltfArgs.output, JSON.stringify(GltfGlobals.gltf));
fs.closeSync(GltfGlobals.binFile);
process.stdout.write(`Export successful, wrote ${GltfGlobals.binBytesWritten} bytes.\n`);
})().catch((error) => {
process.stdout.write(`${error.message}\n${error.stack}\n`);
});
| TranslationRotationScale | identifier_name |
ExportGltf.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Bentley Systems, Incorporated. All rights reserved.
* See LICENSE.md in the project root for license terms and full copyright notice.
*--------------------------------------------------------------------------------------------*/
import * as fs from "fs";
import * as path from "path";
import * as yargs from "yargs";
import { DbResult, Id64Array, Id64String, Logger, LogLevel } from "@bentley/bentleyjs-core";
import { Angle, Geometry, Matrix3d } from "@bentley/geometry-core";
import {
ECSqlStatement, ExportGraphics, ExportGraphicsInfo, ExportGraphicsLines, ExportGraphicsMesh, ExportLinesInfo, ExportPartInfo,
ExportPartInstanceInfo, ExportPartLinesInfo, IModelHost, SnapshotDb, Texture,
} from "@bentley/imodeljs-backend";
import { ColorDef, ImageSourceFormat } from "@bentley/imodeljs-common";
const CHORD_TOL = 0.001;
const ANGLE_TOL = Angle.degreesToRadians(45);
const MIN_BREP_SIZE = 0.01;
class GltfGlobals {
public static iModel: SnapshotDb;
public static gltf: Gltf;
public static binFile: number;
public static texturesDir: string;
public static binBytesWritten: number;
public static colorToMaterialMap: Map<number, number>;
public static textureToMaterialMap: Map<Id64String, number>;
public static initialize(iModelName: string, gltfName: string) {
GltfGlobals.iModel = SnapshotDb.openFile(iModelName);
process.stdout.write(`Opened ${iModelName} successfully...\n`);
const gltfPathParts = path.parse(gltfName);
const binName = `${gltfPathParts.name}.bin`;
GltfGlobals.binFile = fs.openSync(path.join(gltfPathParts.dir, binName), "w");
GltfGlobals.texturesDir = gltfPathParts.dir;
process.stdout.write(`Writing to ${gltfName} and ${binName}...\n`);
GltfGlobals.gltf = {
accessors: [],
asset: {
generator: "iModel.js export-gltf",
version: "2.0",
},
buffers: [{ uri: binName, byteLength: 0 }],
bufferViews: [],
materials: [],
meshes: [],
nodes: [],
scenes: [{ nodes: [] }],
};
GltfGlobals.binBytesWritten = 0;
GltfGlobals.colorToMaterialMap = new Map<number, number>();
GltfGlobals.textureToMaterialMap = new Map<Id64String, number>();
}
}
function findOrAddMaterialIndexForTexture(textureId: Id64String): number |
function findOrAddMaterialIndexForColor(color: number): number {
let result = GltfGlobals.colorToMaterialMap.get(color);
if (result !== undefined) return result;
const rgb = ColorDef.getColors(color);
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorFactor: [rgb.r / 255, rgb.g / 255, rgb.b / 255, (255 - rgb.t) / 255],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
if (rgb.t > 10) material.alphaMode = "BLEND";
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.colorToMaterialMap.set(color, result);
return result;
}
function addMeshIndices(indices: Int32Array) {
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length,
byteOffset: 0,
componentType: AccessorComponentType.UInt32,
count: indices.length,
type: "SCALAR",
});
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ElementArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: indices.byteLength,
});
GltfGlobals.binBytesWritten += indices.byteLength;
fs.writeSync(GltfGlobals.binFile, indices);
}
function addMeshPointsAndNormals(points: Float64Array, normals: Float32Array) {
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(points.length);
for (let i = 0; i < points.length; i += 3)
convertPoint(outPoints, i, points[i], points[i + 1], points[i + 2]);
const outNormals = new Float32Array(normals.length);
for (let i = 0; i < normals.length; i += 3)
convertPoint(outNormals, i, normals[i], normals[i + 1], normals[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength + outNormals.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
fs.writeSync(GltfGlobals.binFile, outNormals);
GltfGlobals.binBytesWritten += outPoints.byteLength + outNormals.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: outPoints.byteLength,
componentType: AccessorComponentType.Float,
count: outNormals.length / 3,
type: "VEC3",
});
}
function addMeshParams(params: Float32Array) {
const outParams = new Float32Array(params.length);
for (let i = 0; i < params.length; i += 2) {
outParams[i] = params[i];
outParams[i + 1] = 1 - params[i + 1]; // Flip to match GLTF spec
}
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outParams.byteLength,
byteStride: 8,
});
fs.writeSync(GltfGlobals.binFile, outParams);
GltfGlobals.binBytesWritten += outParams.byteLength;
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outParams.length / 2,
type: "VEC2",
});
}
function addMesh(mesh: ExportGraphicsMesh, color: number, textureId?: Id64String) {
const material = textureId !== undefined ? findOrAddMaterialIndexForTexture(textureId) :
findOrAddMaterialIndexForColor(color);
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlTriangles,
material,
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
// eslint-disable-next-line @typescript-eslint/naming-convention
NORMAL: GltfGlobals.gltf.accessors.length + 2,
},
};
if (textureId !== undefined)
primitive.attributes.TEXCOORD_0 = GltfGlobals.gltf.accessors.length + 3;
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(mesh.indices);
addMeshPointsAndNormals(mesh.points, mesh.normals);
if (textureId !== undefined) addMeshParams(mesh.params);
}
function addMeshNode(name: string) {
GltfGlobals.gltf.scenes[0].nodes.push(GltfGlobals.gltf.nodes.length);
GltfGlobals.gltf.nodes.push({ name, mesh: GltfGlobals.gltf.meshes.length });
}
function addLines(lines: ExportGraphicsLines, color: number) {
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlLines,
material: findOrAddMaterialIndexForColor(color),
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
},
};
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(lines.indices);
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(lines.points.length);
for (let i = 0; i < outPoints.length; i += 3)
convertPoint(outPoints, i, lines.points[i], lines.points[i + 1], lines.points[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
GltfGlobals.binBytesWritten += outPoints.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
}
function exportElements(elementIdArray: Id64Array, partInstanceArray: ExportPartInstanceInfo[]) {
const onGraphics = (info: ExportGraphicsInfo) => {
addMeshNode(info.elementId);
addMesh(info.mesh, info.color, info.textureId);
};
const onLineGraphics = (info: ExportLinesInfo) => {
addMeshNode(info.elementId);
addLines(info.lines, info.color);
};
GltfGlobals.iModel.exportGraphics({
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
onGraphics,
onLineGraphics,
elementIdArray,
partInstanceArray,
});
}
function getInstancesByPart(instances: ExportPartInstanceInfo[]): Map<Id64String, ExportPartInstanceInfo[]> {
const partMap = new Map<Id64String, ExportPartInstanceInfo[]>();
for (const instance of instances) {
const instancesForThisPart = partMap.get(instance.partId);
if (instancesForThisPart !== undefined) instancesForThisPart.push(instance);
else partMap.set(instance.partId, [instance]);
}
return partMap;
}
function almostEqual(testValue: number, ...arrayValues: number[]): boolean {
for (const val of arrayValues) {
if (!Geometry.isAlmostEqualNumber(testValue, val)) return false;
}
return true;
}
// translation, rotation, scale only defined if different from GLTF default transforms
class TranslationRotationScale {
public readonly translation?: number[];
public readonly rotation?: number[];
public readonly scale?: number[];
constructor(xform?: Float64Array) {
if (xform === undefined) return;
if (!almostEqual(0, xform[3], xform[7], xform[11]))
this.translation = [xform[3], xform[11], -xform[7]]; // GLTF = RHS Y-up, iModel.js = RHS Z-up
// Uniform and positive scale guaranteed by exportGraphics
const xColumnMagnitude = Geometry.hypotenuseXYZ(xform[0], xform[4], xform[8]);
if (!almostEqual(1, xColumnMagnitude))
this.scale = [xColumnMagnitude, xColumnMagnitude, xColumnMagnitude];
const invScale = 1.0 / xColumnMagnitude;
const matrix = Matrix3d.createRowValues(
xform[0] * invScale, xform[1] * invScale, xform[2] * invScale,
xform[4] * invScale, xform[5] * invScale, xform[6] * invScale,
xform[8] * invScale, xform[9] * invScale, xform[10] * invScale);
if (!matrix.isIdentity) {
const q = matrix.toQuaternion();
this.rotation = [q.x, q.z, -q.y, -q.w]; // GLTF = RHS Y-up, iModel.js = RHS Z-up
}
}
}
function exportInstances(partInstanceArray: ExportPartInstanceInfo[]) {
const partMap: Map<Id64String, ExportPartInstanceInfo[]> = getInstancesByPart(partInstanceArray);
process.stdout.write(`Found ${partInstanceArray.length} instances for ${partMap.size} parts...\n`);
const onPartLineGraphics = (meshIndices: number[]) => (info: ExportPartLinesInfo) => {
meshIndices.push(GltfGlobals.gltf.meshes.length);
addLines(info.lines, info.color);
};
const onPartGraphics = (meshIndices: number[]) => (info: ExportPartInfo) => {
meshIndices.push(GltfGlobals.gltf.meshes.length);
addMesh(info.mesh, info.color, info.textureId);
};
const nodes: GltfNode[] = GltfGlobals.gltf.nodes;
const nodeIndices: number[] = GltfGlobals.gltf.scenes[0].nodes;
for (const instanceList of partMap.values()) {
const meshIndices: number[] = [];
const baseDisplayProps = instanceList[0].displayProps;
GltfGlobals.iModel.exportPartGraphics({
elementId: instanceList[0].partId,
displayProps: instanceList[0].displayProps,
onPartGraphics: onPartGraphics(meshIndices),
onPartLineGraphics: onPartLineGraphics(meshIndices),
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
});
for (const instance of instanceList) {
// It is legal for different GeometryPartInstances of the same GeometryPart to have different
// display properties. This can lead to different colors, materials or textures so an exporter
// that is concerned about matching the appearance of the original iModel should not reuse a
// GeometryPart exported with different display properties.
if (!ExportGraphics.arePartDisplayInfosEqual(baseDisplayProps, instance.displayProps))
process.stdout.write("Warning: GeometryPartInstances found using different display properties.\n");
const trs = new TranslationRotationScale(instance.transform);
for (const meshIndex of meshIndices) {
nodeIndices.push(nodes.length);
nodes.push({
mesh: meshIndex,
name: instance.partInstanceId,
rotation: trs.rotation,
scale: trs.scale,
translation: trs.translation,
});
}
}
}
}
interface ExportGltfArgs {
input: string;
output: string;
}
const exportGltfArgs: yargs.Arguments<ExportGltfArgs> = yargs
.usage("Usage: $0 --input [Snapshot iModel] --output [GLTF file]")
.string("input")
.alias("input", "i")
.demandOption(["input"])
.describe("input", "Path to the Snapshot iModel")
.string("output")
.alias("output", "o")
.demandOption(["output"])
.describe("output", "Path to the GLTF file that will be created")
.argv;
(async () => {
await IModelHost.startup();
Logger.initializeToConsole();
Logger.setLevelDefault(LogLevel.Warning);
GltfGlobals.initialize(exportGltfArgs.input, exportGltfArgs.output);
const elementIdArray: Id64Array = [];
const sql = "SELECT ECInstanceId FROM bis.GeometricElement3d";
GltfGlobals.iModel.withPreparedStatement(sql, (stmt: ECSqlStatement) => {
while (stmt.step() === DbResult.BE_SQLITE_ROW)
elementIdArray.push(stmt.getValue(0).getId());
});
process.stdout.write(`Found ${elementIdArray.length} 3D elements...\n`);
if (elementIdArray.length === 0) return;
const partInstanceArray: ExportPartInstanceInfo[] = [];
exportElements(elementIdArray, partInstanceArray);
exportInstances(partInstanceArray);
GltfGlobals.gltf.buffers[0].byteLength = GltfGlobals.binBytesWritten;
fs.writeFileSync(exportGltfArgs.output, JSON.stringify(GltfGlobals.gltf));
fs.closeSync(GltfGlobals.binFile);
process.stdout.write(`Export successful, wrote ${GltfGlobals.binBytesWritten} bytes.\n`);
})().catch((error) => {
process.stdout.write(`${error.message}\n${error.stack}\n`);
});
| {
let result = GltfGlobals.textureToMaterialMap.get(textureId);
if (result !== undefined) return result;
// glTF-Validator complains if textures/images are defined but empty - wait for texture to define.
if (GltfGlobals.gltf.textures === undefined) {
GltfGlobals.gltf.textures = [];
GltfGlobals.gltf.images = [];
GltfGlobals.gltf.samplers = [{}]; // Just use default sampler values
}
const textureInfo = GltfGlobals.iModel.elements.getElement<Texture>(textureId);
const textureName = textureId + (textureInfo.format === ImageSourceFormat.Jpeg ? ".jpg" : ".png");
const texturePath = path.join(GltfGlobals.texturesDir, textureName);
fs.writeFile(texturePath, textureInfo.data, () => { }); // async is fine
const texture: GltfTexture = { source: GltfGlobals.gltf.images!.length, sampler: 0 };
GltfGlobals.gltf.textures.push(texture);
GltfGlobals.gltf.images!.push({ uri: textureName });
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorTexture: { index: GltfGlobals.gltf.textures.length - 1 },
baseColorFactor: [1, 1, 1, 1],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.textureToMaterialMap.set(textureId, result);
return result;
} | identifier_body |
ExportGltf.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Bentley Systems, Incorporated. All rights reserved.
* See LICENSE.md in the project root for license terms and full copyright notice.
*--------------------------------------------------------------------------------------------*/
import * as fs from "fs";
import * as path from "path";
import * as yargs from "yargs";
import { DbResult, Id64Array, Id64String, Logger, LogLevel } from "@bentley/bentleyjs-core";
import { Angle, Geometry, Matrix3d } from "@bentley/geometry-core";
import {
ECSqlStatement, ExportGraphics, ExportGraphicsInfo, ExportGraphicsLines, ExportGraphicsMesh, ExportLinesInfo, ExportPartInfo,
ExportPartInstanceInfo, ExportPartLinesInfo, IModelHost, SnapshotDb, Texture,
} from "@bentley/imodeljs-backend";
import { ColorDef, ImageSourceFormat } from "@bentley/imodeljs-common";
const CHORD_TOL = 0.001;
const ANGLE_TOL = Angle.degreesToRadians(45);
const MIN_BREP_SIZE = 0.01;
class GltfGlobals {
public static iModel: SnapshotDb;
public static gltf: Gltf;
public static binFile: number;
public static texturesDir: string;
public static binBytesWritten: number;
public static colorToMaterialMap: Map<number, number>;
public static textureToMaterialMap: Map<Id64String, number>;
public static initialize(iModelName: string, gltfName: string) {
GltfGlobals.iModel = SnapshotDb.openFile(iModelName);
process.stdout.write(`Opened ${iModelName} successfully...\n`);
const gltfPathParts = path.parse(gltfName);
const binName = `${gltfPathParts.name}.bin`;
GltfGlobals.binFile = fs.openSync(path.join(gltfPathParts.dir, binName), "w");
GltfGlobals.texturesDir = gltfPathParts.dir;
process.stdout.write(`Writing to ${gltfName} and ${binName}...\n`);
GltfGlobals.gltf = {
accessors: [],
asset: {
generator: "iModel.js export-gltf",
version: "2.0",
},
buffers: [{ uri: binName, byteLength: 0 }],
bufferViews: [],
materials: [],
meshes: [],
nodes: [],
scenes: [{ nodes: [] }],
};
GltfGlobals.binBytesWritten = 0;
GltfGlobals.colorToMaterialMap = new Map<number, number>();
GltfGlobals.textureToMaterialMap = new Map<Id64String, number>();
}
}
function findOrAddMaterialIndexForTexture(textureId: Id64String): number {
let result = GltfGlobals.textureToMaterialMap.get(textureId);
if (result !== undefined) return result;
// glTF-Validator complains if textures/images are defined but empty - wait for texture to define.
if (GltfGlobals.gltf.textures === undefined) {
GltfGlobals.gltf.textures = [];
GltfGlobals.gltf.images = [];
GltfGlobals.gltf.samplers = [{}]; // Just use default sampler values
}
const textureInfo = GltfGlobals.iModel.elements.getElement<Texture>(textureId);
const textureName = textureId + (textureInfo.format === ImageSourceFormat.Jpeg ? ".jpg" : ".png");
const texturePath = path.join(GltfGlobals.texturesDir, textureName);
fs.writeFile(texturePath, textureInfo.data, () => { }); // async is fine
const texture: GltfTexture = { source: GltfGlobals.gltf.images!.length, sampler: 0 };
GltfGlobals.gltf.textures.push(texture);
GltfGlobals.gltf.images!.push({ uri: textureName });
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorTexture: { index: GltfGlobals.gltf.textures.length - 1 },
baseColorFactor: [1, 1, 1, 1],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.textureToMaterialMap.set(textureId, result);
return result;
}
function findOrAddMaterialIndexForColor(color: number): number {
let result = GltfGlobals.colorToMaterialMap.get(color);
if (result !== undefined) return result;
const rgb = ColorDef.getColors(color);
const pbrMetallicRoughness: GltfMaterialPbrMetallicRoughness = {
baseColorFactor: [rgb.r / 255, rgb.g / 255, rgb.b / 255, (255 - rgb.t) / 255],
metallicFactor: 0,
roughnessFactor: 1,
};
const material: GltfMaterial = ({ pbrMetallicRoughness, doubleSided: true });
if (rgb.t > 10) material.alphaMode = "BLEND";
result = GltfGlobals.gltf.materials.length;
GltfGlobals.gltf.materials.push(material);
GltfGlobals.colorToMaterialMap.set(color, result);
return result;
}
function addMeshIndices(indices: Int32Array) {
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length,
byteOffset: 0,
componentType: AccessorComponentType.UInt32,
count: indices.length,
type: "SCALAR",
});
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ElementArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: indices.byteLength,
});
GltfGlobals.binBytesWritten += indices.byteLength;
fs.writeSync(GltfGlobals.binFile, indices);
}
function addMeshPointsAndNormals(points: Float64Array, normals: Float32Array) {
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(points.length);
for (let i = 0; i < points.length; i += 3)
convertPoint(outPoints, i, points[i], points[i + 1], points[i + 2]);
const outNormals = new Float32Array(normals.length);
for (let i = 0; i < normals.length; i += 3)
convertPoint(outNormals, i, normals[i], normals[i + 1], normals[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength + outNormals.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
fs.writeSync(GltfGlobals.binFile, outNormals);
GltfGlobals.binBytesWritten += outPoints.byteLength + outNormals.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: outPoints.byteLength,
componentType: AccessorComponentType.Float,
count: outNormals.length / 3,
type: "VEC3",
});
}
function addMeshParams(params: Float32Array) {
const outParams = new Float32Array(params.length);
for (let i = 0; i < params.length; i += 2) {
outParams[i] = params[i];
outParams[i + 1] = 1 - params[i + 1]; // Flip to match GLTF spec
}
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outParams.byteLength,
byteStride: 8,
});
fs.writeSync(GltfGlobals.binFile, outParams);
GltfGlobals.binBytesWritten += outParams.byteLength;
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outParams.length / 2,
type: "VEC2",
});
}
| findOrAddMaterialIndexForColor(color);
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlTriangles,
material,
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
// eslint-disable-next-line @typescript-eslint/naming-convention
NORMAL: GltfGlobals.gltf.accessors.length + 2,
},
};
if (textureId !== undefined)
primitive.attributes.TEXCOORD_0 = GltfGlobals.gltf.accessors.length + 3;
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(mesh.indices);
addMeshPointsAndNormals(mesh.points, mesh.normals);
if (textureId !== undefined) addMeshParams(mesh.params);
}
function addMeshNode(name: string) {
GltfGlobals.gltf.scenes[0].nodes.push(GltfGlobals.gltf.nodes.length);
GltfGlobals.gltf.nodes.push({ name, mesh: GltfGlobals.gltf.meshes.length });
}
function addLines(lines: ExportGraphicsLines, color: number) {
const primitive: GltfMeshPrimitive = {
mode: MeshPrimitiveMode.GlLines,
material: findOrAddMaterialIndexForColor(color),
indices: GltfGlobals.gltf.accessors.length,
attributes: {
// eslint-disable-next-line @typescript-eslint/naming-convention
POSITION: GltfGlobals.gltf.accessors.length + 1,
},
};
GltfGlobals.gltf.meshes.push({ primitives: [primitive] });
addMeshIndices(lines.indices);
// GLTF is RHS with Y-up, iModel.js is RHS with Z-up
const convertPoint = (outArray: Float32Array, outIndex: number, x: number, y: number, z: number) => {
outArray[outIndex] = x;
outArray[outIndex + 1] = z;
outArray[outIndex + 2] = -y;
};
const outPoints = new Float32Array(lines.points.length);
for (let i = 0; i < outPoints.length; i += 3)
convertPoint(outPoints, i, lines.points[i], lines.points[i + 1], lines.points[i + 2]);
GltfGlobals.gltf.bufferViews.push({
buffer: 0,
target: BufferViewTarget.ArrayBuffer,
byteOffset: GltfGlobals.binBytesWritten,
byteLength: outPoints.byteLength,
byteStride: 12,
});
fs.writeSync(GltfGlobals.binFile, outPoints);
GltfGlobals.binBytesWritten += outPoints.byteLength;
const minPos = [outPoints[0], outPoints[1], outPoints[2]];
const maxPos = Array.from(minPos);
for (let i = 0; i < outPoints.length; i += 3) {
for (let j = 0; j < 3; ++j) {
minPos[j] = Math.min(minPos[j], outPoints[i + j]);
maxPos[j] = Math.max(maxPos[j], outPoints[i + j]);
}
}
GltfGlobals.gltf.accessors.push({
bufferView: GltfGlobals.gltf.bufferViews.length - 1,
byteOffset: 0,
componentType: AccessorComponentType.Float,
count: outPoints.length / 3,
type: "VEC3",
max: maxPos,
min: minPos,
});
}
function exportElements(elementIdArray: Id64Array, partInstanceArray: ExportPartInstanceInfo[]) {
const onGraphics = (info: ExportGraphicsInfo) => {
addMeshNode(info.elementId);
addMesh(info.mesh, info.color, info.textureId);
};
const onLineGraphics = (info: ExportLinesInfo) => {
addMeshNode(info.elementId);
addLines(info.lines, info.color);
};
GltfGlobals.iModel.exportGraphics({
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
onGraphics,
onLineGraphics,
elementIdArray,
partInstanceArray,
});
}
function getInstancesByPart(instances: ExportPartInstanceInfo[]): Map<Id64String, ExportPartInstanceInfo[]> {
const partMap = new Map<Id64String, ExportPartInstanceInfo[]>();
for (const instance of instances) {
const instancesForThisPart = partMap.get(instance.partId);
if (instancesForThisPart !== undefined) instancesForThisPart.push(instance);
else partMap.set(instance.partId, [instance]);
}
return partMap;
}
function almostEqual(testValue: number, ...arrayValues: number[]): boolean {
for (const val of arrayValues) {
if (!Geometry.isAlmostEqualNumber(testValue, val)) return false;
}
return true;
}
// translation, rotation, scale only defined if different from GLTF default transforms
class TranslationRotationScale {
public readonly translation?: number[];
public readonly rotation?: number[];
public readonly scale?: number[];
constructor(xform?: Float64Array) {
if (xform === undefined) return;
if (!almostEqual(0, xform[3], xform[7], xform[11]))
this.translation = [xform[3], xform[11], -xform[7]]; // GLTF = RHS Y-up, iModel.js = RHS Z-up
// Uniform and positive scale guaranteed by exportGraphics
const xColumnMagnitude = Geometry.hypotenuseXYZ(xform[0], xform[4], xform[8]);
if (!almostEqual(1, xColumnMagnitude))
this.scale = [xColumnMagnitude, xColumnMagnitude, xColumnMagnitude];
const invScale = 1.0 / xColumnMagnitude;
const matrix = Matrix3d.createRowValues(
xform[0] * invScale, xform[1] * invScale, xform[2] * invScale,
xform[4] * invScale, xform[5] * invScale, xform[6] * invScale,
xform[8] * invScale, xform[9] * invScale, xform[10] * invScale);
if (!matrix.isIdentity) {
const q = matrix.toQuaternion();
this.rotation = [q.x, q.z, -q.y, -q.w]; // GLTF = RHS Y-up, iModel.js = RHS Z-up
}
}
}
function exportInstances(partInstanceArray: ExportPartInstanceInfo[]) {
const partMap: Map<Id64String, ExportPartInstanceInfo[]> = getInstancesByPart(partInstanceArray);
process.stdout.write(`Found ${partInstanceArray.length} instances for ${partMap.size} parts...\n`);
const onPartLineGraphics = (meshIndices: number[]) => (info: ExportPartLinesInfo) => {
meshIndices.push(GltfGlobals.gltf.meshes.length);
addLines(info.lines, info.color);
};
const onPartGraphics = (meshIndices: number[]) => (info: ExportPartInfo) => {
meshIndices.push(GltfGlobals.gltf.meshes.length);
addMesh(info.mesh, info.color, info.textureId);
};
const nodes: GltfNode[] = GltfGlobals.gltf.nodes;
const nodeIndices: number[] = GltfGlobals.gltf.scenes[0].nodes;
for (const instanceList of partMap.values()) {
const meshIndices: number[] = [];
const baseDisplayProps = instanceList[0].displayProps;
GltfGlobals.iModel.exportPartGraphics({
elementId: instanceList[0].partId,
displayProps: instanceList[0].displayProps,
onPartGraphics: onPartGraphics(meshIndices),
onPartLineGraphics: onPartLineGraphics(meshIndices),
chordTol: CHORD_TOL,
angleTol: ANGLE_TOL,
minBRepFeatureSize: MIN_BREP_SIZE,
});
for (const instance of instanceList) {
// It is legal for different GeometryPartInstances of the same GeometryPart to have different
// display properties. This can lead to different colors, materials or textures so an exporter
// that is concerned about matching the appearance of the original iModel should not reuse a
// GeometryPart exported with different display properties.
if (!ExportGraphics.arePartDisplayInfosEqual(baseDisplayProps, instance.displayProps))
process.stdout.write("Warning: GeometryPartInstances found using different display properties.\n");
const trs = new TranslationRotationScale(instance.transform);
for (const meshIndex of meshIndices) {
nodeIndices.push(nodes.length);
nodes.push({
mesh: meshIndex,
name: instance.partInstanceId,
rotation: trs.rotation,
scale: trs.scale,
translation: trs.translation,
});
}
}
}
}
interface ExportGltfArgs {
input: string;
output: string;
}
const exportGltfArgs: yargs.Arguments<ExportGltfArgs> = yargs
.usage("Usage: $0 --input [Snapshot iModel] --output [GLTF file]")
.string("input")
.alias("input", "i")
.demandOption(["input"])
.describe("input", "Path to the Snapshot iModel")
.string("output")
.alias("output", "o")
.demandOption(["output"])
.describe("output", "Path to the GLTF file that will be created")
.argv;
(async () => {
await IModelHost.startup();
Logger.initializeToConsole();
Logger.setLevelDefault(LogLevel.Warning);
GltfGlobals.initialize(exportGltfArgs.input, exportGltfArgs.output);
const elementIdArray: Id64Array = [];
const sql = "SELECT ECInstanceId FROM bis.GeometricElement3d";
GltfGlobals.iModel.withPreparedStatement(sql, (stmt: ECSqlStatement) => {
while (stmt.step() === DbResult.BE_SQLITE_ROW)
elementIdArray.push(stmt.getValue(0).getId());
});
process.stdout.write(`Found ${elementIdArray.length} 3D elements...\n`);
if (elementIdArray.length === 0) return;
const partInstanceArray: ExportPartInstanceInfo[] = [];
exportElements(elementIdArray, partInstanceArray);
exportInstances(partInstanceArray);
GltfGlobals.gltf.buffers[0].byteLength = GltfGlobals.binBytesWritten;
fs.writeFileSync(exportGltfArgs.output, JSON.stringify(GltfGlobals.gltf));
fs.closeSync(GltfGlobals.binFile);
process.stdout.write(`Export successful, wrote ${GltfGlobals.binBytesWritten} bytes.\n`);
})().catch((error) => {
process.stdout.write(`${error.message}\n${error.stack}\n`);
}); | function addMesh(mesh: ExportGraphicsMesh, color: number, textureId?: Id64String) {
const material = textureId !== undefined ? findOrAddMaterialIndexForTexture(textureId) :
| random_line_split |
v2.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prof_dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4 | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import txt2csv as t2csv
import glob, os, re
from measurements import perform_filter
OWNER = 'rn'
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def retranslateUi(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
return "70;20"
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train", "w") # Open file to consolidate all training data from profiles
for file in glob.glob("csv/capture_*.csv"):
# find CPU usage % from file name for label in classifier
# result = re.search('ip_(.*)_RespTimeDeltaOut.csv',file)
# usage = result.group(1) #Usage percentage with Regular exp
usage = file.split('_')[1]
if usage == 111:
break
no_of_labels = file_len(file) # Determine number of labels
if usage == "stux" or usage == "st":
usage = str(99)
f = open(file, "r")
fdata.write(f.read().replace(',', '')) # write csv files into master training file
flabel.write(str(usage + ' ') * no_of_labels)
f.close()
flabel.close()
fdata.close()
print "\n Done writing Data set and labels"
self.statusLabel.setText("All Training data saved into files csv/data_train !")
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
profileDialog = QtGui.QDialog()
ui = Ui_profileDialog()
ui.setupUi(profileDialog)
profileDialog.show()
sys.exit(app.exec_()) | #
# WARNING! All changes made in this file will be lost!
import os, sys
from PyQt4 import QtCore, QtGui | random_line_split |
v2.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prof_dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
import os, sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import txt2csv as t2csv
import glob, os, re
from measurements import perform_filter
OWNER = 'rn'
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def | (self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
return "70;20"
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train", "w") # Open file to consolidate all training data from profiles
for file in glob.glob("csv/capture_*.csv"):
# find CPU usage % from file name for label in classifier
# result = re.search('ip_(.*)_RespTimeDeltaOut.csv',file)
# usage = result.group(1) #Usage percentage with Regular exp
usage = file.split('_')[1]
if usage == 111:
break
no_of_labels = file_len(file) # Determine number of labels
if usage == "stux" or usage == "st":
usage = str(99)
f = open(file, "r")
fdata.write(f.read().replace(',', '')) # write csv files into master training file
flabel.write(str(usage + ' ') * no_of_labels)
f.close()
flabel.close()
fdata.close()
print "\n Done writing Data set and labels"
self.statusLabel.setText("All Training data saved into files csv/data_train !")
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
profileDialog = QtGui.QDialog()
ui = Ui_profileDialog()
ui.setupUi(profileDialog)
profileDialog.show()
sys.exit(app.exec_())
| retranslateUi | identifier_name |
v2.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prof_dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
import os, sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import txt2csv as t2csv
import glob, os, re
from measurements import perform_filter
OWNER = 'rn'
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
|
def retranslateUi(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
return "70;20"
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train", "w") # Open file to consolidate all training data from profiles
for file in glob.glob("csv/capture_*.csv"):
# find CPU usage % from file name for label in classifier
# result = re.search('ip_(.*)_RespTimeDeltaOut.csv',file)
# usage = result.group(1) #Usage percentage with Regular exp
usage = file.split('_')[1]
if usage == 111:
break
no_of_labels = file_len(file) # Determine number of labels
if usage == "stux" or usage == "st":
usage = str(99)
f = open(file, "r")
fdata.write(f.read().replace(',', '')) # write csv files into master training file
flabel.write(str(usage + ' ') * no_of_labels)
f.close()
flabel.close()
fdata.close()
print "\n Done writing Data set and labels"
self.statusLabel.setText("All Training data saved into files csv/data_train !")
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
profileDialog = QtGui.QDialog()
ui = Ui_profileDialog()
ui.setupUi(profileDialog)
profileDialog.show()
sys.exit(app.exec_())
| profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog) | identifier_body |
v2.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'prof_dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
import os, sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import txt2csv as t2csv
import glob, os, re
from measurements import perform_filter
OWNER = 'rn'
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_profileDialog(object):
def setupUi(self, profileDialog):
profileDialog.setObjectName(_fromUtf8("profileDialog"))
profileDialog.resize(492, 428)
profileDialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
profileDialog.setAutoFillBackground(True)
self.buttonBox = QtGui.QDialogButtonBox(profileDialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 220, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.p1text = QtGui.QPlainTextEdit(profileDialog)
self.p1text.setGeometry(QtCore.QRect(50, 50, 104, 21))
self.p1text.setPlainText(_fromUtf8(""))
self.p1text.setObjectName(_fromUtf8("p1text"))
self.profilebtn1 = QtGui.QPushButton(profileDialog)
self.profilebtn1.setGeometry(QtCore.QRect(330, 50, 98, 27))
self.profilebtn1.setObjectName(_fromUtf8("profilebtn1"))
self.choice_icmp = QtGui.QRadioButton(profileDialog)
self.choice_icmp.setGeometry(QtCore.QRect(330, 80, 198, 27))
self.choice_icmp.setObjectName(_fromUtf8("choice_icmp"))
self.choice_icmp.setChecked(True)
self.choice_tcp = QtGui.QRadioButton(profileDialog)
self.choice_tcp.setGeometry(QtCore.QRect(330, 100, 198, 27))
self.choice_tcp.setObjectName(_fromUtf8("choice_tcp"))
self.label = QtGui.QLabel(profileDialog)
self.label.setGeometry(QtCore.QRect(0, 200, 81, 21))
self.label.setObjectName(_fromUtf8("label"))
self.statusLabel = QtGui.QLabel(profileDialog)
self.statusLabel.setGeometry(QtCore.QRect(60, 100, 281, 61))
self.statusLabel.setObjectName(_fromUtf8("statusLabel"))
self.label_2 = QtGui.QLabel(profileDialog)
self.label_2.setGeometry(QtCore.QRect(60, 10, 221, 31))
self.label_2.setObjectName(_fromUtf8("label_2"))
# Train button
self.pushButton_2 = QtGui.QPushButton(profileDialog)
self.pushButton_2.setGeometry(QtCore.QRect(90, 270, 98, 27))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.setEnabled(True)
self.hostText = QtGui.QTextEdit(profileDialog)
self.hostText.setGeometry(QtCore.QRect(80, 190, 101, 31))
self.hostText.setObjectName(_fromUtf8("hostText"))
# Dropdown Menu
self.cmbUsage = QtGui.QComboBox(profileDialog)
self.cmbUsage.setGeometry(QtCore.QRect(80, 50, 151, 27))
self.cmbUsage.setObjectName(_fromUtf8("cmbUsage"))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.cmbUsage.addItem(_fromUtf8(""))
self.retranslateUi(profileDialog)
self.pushButton_2.clicked.connect(self.trainbutton)
QtCore.QObject.connect(self.profilebtn1, QtCore.SIGNAL(_fromUtf8("clicked(bool)")), self.msgbtn)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), profileDialog.close)
# QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), profileDialog.close)
QtCore.QMetaObject.connectSlotsByName(profileDialog)
def retranslateUi(self, profileDialog):
profileDialog.setWindowTitle(_translate("profileDialog", "Dialog", None))
self.profilebtn1.setText(_translate("profileDialog", "profile", None))
self.label.setText(_translate("profileDialog", "Host Name", None))
self.statusLabel.setText(_translate("profileDialog", "Enter the host name", None))
self.label_2.setText(_translate("profileDialog", "Enter usage % of device", None))
self.pushButton_2.setText(_translate("profileDialog", "Train", None)) # Train button
# Set DropDown Menu
self.cmbUsage.setItemText(0, _translate("profileDialog", "15% Usage 100ns", None))
self.cmbUsage.setItemText(1, _translate("profileDialog", "30% Usage 40ns", None))
self.cmbUsage.setItemText(2, _translate("profileDialog", "50% Usage 30ns", None))
self.cmbUsage.setItemText(3, _translate("profileDialog", "70% Usage 20ns", None))
self.cmbUsage.setItemText(4, _translate("profileDialog", "99% Usage(stuxnet) 99ns", None))
self.choice_icmp.setText(_translate("MainWindow", "ICMP Profile", None))
self.choice_tcp.setText(_translate("MainWindow", "TCP Profile", None))
def assign_details(self, txt):
if txt == 0 or txt == 15:
return "15;100"
elif txt == 1 or txt == 30:
return "30;40"
elif txt == 2 or txt == 50:
return "50;30"
elif txt == 3 or txt == 70:
|
elif txt == 4 or txt == 99:
return "99;99"
def msgbtn(self):
self.pushButton_2.setEnabled(True)
txt_index = self.cmbUsage.currentIndex()
txt = self.assign_details(txt_index)
host = self.hostText.toPlainText()
usage = txt.split(';')[0]
task_cycle = txt.split(';')[1]
print usage
if host != "" and usage != "":
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("The Profiling will take approxmiately 2 minutes,")
msg.setInformativeText("Click OK to start profiling, Popup will let you know once it is done!")
msg.setWindowTitle("Profiler")
msg.setDetailedText(
"Due to our Machine Learning algorithm requiring a large dataset, we require that you profile the SCADA"
" at specific usage for 2 minutes to get most accurate reuslts during detection")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
if self.choice_tcp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
if self.choice_icmp.isChecked():
os.system('hping3 -c 10000 %s -i u10000 --icmp &' % host) ##REAL HPING command
os.system(
'timeout 5m tcpdump -i enp0s3 -w pcap/capture_%s_%s.pcap' % (usage, host)) # REAL tcpdump command
d = QDialog()
b1 = QPushButton("Profiling Done!, Click X", d)
b1.move(175, 75)
d.setWindowTitle("Completion")
d.setWindowModality(Qt.ApplicationModal)
d.exec_()
else:
print "\n Enter host! / or usage "
def retreive_pcaps(self):
files = os.listdir('pcap')
# Crappy regex TODO: regex implementation ; Function retrieves list of all training pcap ;111 excluded
# Reason being 111 -> test data set
filtered = [f for f in files if '.pcap' in f]
filtered = [x for x in filtered if not ('capture_111' in x)]
return filtered
def trainbutton(self):
# Filter data & save frame delta response time to txt file for further calculations
if self.choice_icmp.isChecked():
flag = 'icmp'
# TODO ; fix this import issue
training_pcap_list = self.retreive_pcaps()
for i, item in enumerate(training_pcap_list):
perform_filter(item, 'icmp')
t2csv.ignite(trainer='icmp')
if self.choice_tcp.isChecked():
os.system("chown " + OWNER + ":" + OWNER + " pcap/*;su -c 'bash filter_tcp.sh' -s /bin/sh rn")
t2csv.ignite(trainer='tcp')
self.statusLabel.setText("Dumping Frametime delta done \n Calculating Features Now...")
flabel = open("label_train", "w") # Open file to write labels into
fdata = open("data_train", "w") # Open file to consolidate all training data from profiles
for file in glob.glob("csv/capture_*.csv"):
# find CPU usage % from file name for label in classifier
# result = re.search('ip_(.*)_RespTimeDeltaOut.csv',file)
# usage = result.group(1) #Usage percentage with Regular exp
usage = file.split('_')[1]
if usage == 111:
break
no_of_labels = file_len(file) # Determine number of labels
if usage == "stux" or usage == "st":
usage = str(99)
f = open(file, "r")
fdata.write(f.read().replace(',', '')) # write csv files into master training file
flabel.write(str(usage + ' ') * no_of_labels)
f.close()
flabel.close()
fdata.close()
print "\n Done writing Data set and labels"
self.statusLabel.setText("All Training data saved into files csv/data_train !")
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
profileDialog = QtGui.QDialog()
ui = Ui_profileDialog()
ui.setupUi(profileDialog)
profileDialog.show()
sys.exit(app.exec_())
| return "70;20" | conditional_block |
forminput.js | var definitions = function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/test",
success: function(data) {
document.getElementById("definitions").innerHTML = data;
}
});
}
//when loading the website validate rangeout of slider and appearance of checkbox
$(document).ready(function() {
document.getElementById("rangeout").value = "[" + document.getElementById("numberrange").value + "]";
var box = document.getElementById("check-chart");
if (document.getElementById("customControlValidation3").checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
});
//change appearance of checkbox
function resetradio(checkbox) {
var box = document.getElementById("check-chart");
var radio = document.getElementById("customControlValidation3");
if (radio.checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
}
//after clicking show select parameter data and send it to server
var server = function() {
//get input from user in form
var show = document.getElementById("customControlValidation2").checked;
var show2 = document.getElementById("customControlValidation3").checked;
var documents = [0, 0, 0, 0];
documents[0] = document.getElementById("check-34").checked;
documents[1] = document.getElementById("check-71").checked;
documents[2] = document.getElementById("check-125").checked;
documents[3] = document.getElementById("check-213").checked;
//check if obligatory input is filled
if ((!show & !show2) || (!documents[0] & !documents[1] & !documents[3] & !documents[4])) {
alert("Check your input!");
}
else {
var checkChart = false;
if (!show) {
checkChart = document.getElementById("check-chart").checked;
}
var count = document.getElementById("numberrange").value;
var words = [0, 0, 0];
words[0] = document.getElementById("check-nouns").checked;
words[1] = document.getElementById("check-verbs").checked;
words[2] = document.getElementById("check-adjectives").checked;
var data = [0, 0, 0, 0, 0];
data[0] = show;
data[1] = checkChart;
data[2] = documents;
data[3] = count;
data[4] = words;
//send parameter to server
$.ajax({
url: "http://localhost:3000/",
type: "POST",
data: JSON.stringify(data),
dataType: "json",
contentType: "application/json",
success: function(data) {
$("#pic").html("");
$("#pic").append(
"<div class='loader'>" +
"</div>"
);
console.log('finished ajax request');
}
})
//check if preprocessing script is already finished
var check = setInterval(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") {
return "preposition/subordinating conjunction"
} else if (d == "cd") {
return "cardinal digit"
} else return d
});
function zoomed() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) |
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index] = true
return true
} else if (d.target == z) {
neighbors[d.source.index] = true
return true
} else {
return false
}
})
.style("stroke-opacity", 1);
node.filter(function(d) { return neighbors[d.index] })
.style("stroke-width", 3);
label.filter(function(d) { return !neighbors[d.index] })
.style("fill-opacity", 0.2);
label.filter(function(d) { return neighbors[d.index] })
.attr("font-size", 16)
};
var mouseout_node = function(z) {
link
.style("stroke-opacity", 0.2);
node
.style("stroke-width", 1)
label
.attr("font-size", 10)
.style("fill-opacity", 1)
};
window.scrollTo(($(document).width() - $(window).width()) / 2, 0);
}
var chartShow = function(jsonString) {
//load the data
jsonData = JSON.parse(jsonString);
var data = jsonData.nodes;
var length = Object.keys(data).length;
var margin = { top: 50, right: 100, bottom: 100, left: 200 },
width = document.getElementById("pic").clientWidth - margin.left - margin.right,
height = document.getElementById("pic").clientHeight * (length / 18) - margin.top - margin.bottom;
$("#pic").html("");
//design x-Axis
var x = d3.scaleLinear()
.range([0, width]);
//design y-Axis
var y = d3.scaleBand()
.rangeRound([0, height])
.padding(.1)
.paddingOuter(.1)
//set distance in percent between y axis and first bar --maybe do it not in percent but in px or something in the future?
.align(0.1);
var xAxis = d3
.axisTop(x)
var yAxis = d3
.axisLeft(y)
//select div in which svg should be created
d3.select("#pic").attr("style", "overflow-y: scroll; margin-top:15px;");
//design svg
var svg = d3.select("#pic").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
//map data
Object.keys(data).forEach(function(d) {
d.word = d.word;
d.quantity = +d.quantity;
});
x.domain([0, d3.max(data, function(d) { return d.quantity; })]);
y.domain(data.map(function(d) { return d.word; }));
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0,0)")
.call(xAxis)
.append("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", "-.55em")
.attr("transform", "rotate(-180)");
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text("quantity");
svg.append('g')
.attr('class', 'grid')
.attr('transform', 'translate(0, ${height})')
.call(d3.axisBottom()
.scale(x)
.tickSize(height, 0, 0)
.tickFormat(''))
const barGroups = svg.selectAll()
.data(data)
.enter()
.append('g')
barGroups
.append('rect')
.attr('class', 'bar')
.attr('y', function(d) { return y(d.word); })
.attr('x', 0)
.attr('height', y.bandwidth())
.attr('width', function(d) { return x(d.quantity); })
.on('mouseenter', function(actual, i) {
d3.selectAll('.quantity')
.attr('opacity', 0)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 0.6)
.attr('y', (d) => y(d.word) - 2)
.attr('height', y.bandwidth() + 4)
})
.on('mouseleave', function() {
d3.selectAll('.quantity')
.attr('opacity', 1)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 1)
.attr('y', (d) => y(d.word))
.attr('height', y.bandwidth())
svg.selectAll('#limit').remove()
})
barGroups
.append('text')
.attr('class', 'value')
.attr('y', (d) => y(d.word) + y.bandwidth() / 2)
.attr('x', (d) => x(d.quantity + 0.2))
.attr('text-anchor', 'start')
.text((d) => d.quantity);
//labels
svg.append('text')
.attr('class', 'title')
.attr('x', -margin.left + 20)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'start')
.text('Number of occurences per word')
svg.append('text')
.attr('x', -height / 2)
.attr('y', -margin.left + 30)
.attr('transform', 'rotate(-90)')
.attr('text-anchor', 'middle')
.text('Words')
svg.append('text')
.attr('x', width / 2)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'middle')
.text('Nummber of occurences')
d3.select("input").on("change", change);
var sortTimeout = setTimeout(function() {
d3.select("input").property("checked", true).each(change);
}, 2000);
//sorting chart after creating it
function change() {
clearTimeout(sortTimeout);
// Copy-on-write since tweens are evaluated after a delay.
var y0 = y.domain(data.sort(this.checked ?
function(a, b) { return b.quantity - a.quantity; } :
function(a, b) { return d3.ascending(a.word, b.word); })
.map(function(d) { return d.word; }))
.copy();
svg.selectAll(".bar")
.sort(function(a, b) { return y0(a.word) - y0(b.word); });
svg.selectAll(".value")
.sort(function(a, b) { return y0(a.quantity) - y0(b.quantity); });
var transition = svg.transition().duration(750),
delay = function(d, i) { return i * 50; };
transition.selectAll(".bar")
.delay(delay)
.attr("y", function(d) { return y0(d.word); });
transition.selectAll(".value")
.delay(delay)
.attr("y", function(d) { return y0(d.word) + 18; });
transition.select(".y.axis")
.call(yAxis)
.selectAll("g")
.delay(delay);
}
} | {
d.fx = d3.event.x;
d.fy = d3.event.y;
} | identifier_body |
forminput.js | var definitions = function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/test",
success: function(data) {
document.getElementById("definitions").innerHTML = data;
}
});
}
//when loading the website validate rangeout of slider and appearance of checkbox
$(document).ready(function() {
document.getElementById("rangeout").value = "[" + document.getElementById("numberrange").value + "]";
var box = document.getElementById("check-chart");
if (document.getElementById("customControlValidation3").checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
});
//change appearance of checkbox
function resetradio(checkbox) {
var box = document.getElementById("check-chart");
var radio = document.getElementById("customControlValidation3");
if (radio.checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
}
//after clicking show select parameter data and send it to server
var server = function() {
//get input from user in form
var show = document.getElementById("customControlValidation2").checked;
var show2 = document.getElementById("customControlValidation3").checked;
var documents = [0, 0, 0, 0];
documents[0] = document.getElementById("check-34").checked;
documents[1] = document.getElementById("check-71").checked;
documents[2] = document.getElementById("check-125").checked;
documents[3] = document.getElementById("check-213").checked;
//check if obligatory input is filled
if ((!show & !show2) || (!documents[0] & !documents[1] & !documents[3] & !documents[4])) {
alert("Check your input!");
}
else {
var checkChart = false;
if (!show) {
checkChart = document.getElementById("check-chart").checked;
}
var count = document.getElementById("numberrange").value;
var words = [0, 0, 0];
words[0] = document.getElementById("check-nouns").checked;
words[1] = document.getElementById("check-verbs").checked;
words[2] = document.getElementById("check-adjectives").checked;
var data = [0, 0, 0, 0, 0];
data[0] = show;
data[1] = checkChart;
data[2] = documents;
data[3] = count;
data[4] = words;
//send parameter to server
$.ajax({
url: "http://localhost:3000/",
type: "POST",
data: JSON.stringify(data),
dataType: "json",
contentType: "application/json",
success: function(data) {
$("#pic").html("");
$("#pic").append(
"<div class='loader'>" +
"</div>"
);
console.log('finished ajax request');
}
})
//check if preprocessing script is already finished
var check = setInterval(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") {
return "preposition/subordinating conjunction"
} else if (d == "cd") {
return "cardinal digit"
} else return d
});
function | () {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
}
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index] = true
return true
} else if (d.target == z) {
neighbors[d.source.index] = true
return true
} else {
return false
}
})
.style("stroke-opacity", 1);
node.filter(function(d) { return neighbors[d.index] })
.style("stroke-width", 3);
label.filter(function(d) { return !neighbors[d.index] })
.style("fill-opacity", 0.2);
label.filter(function(d) { return neighbors[d.index] })
.attr("font-size", 16)
};
var mouseout_node = function(z) {
link
.style("stroke-opacity", 0.2);
node
.style("stroke-width", 1)
label
.attr("font-size", 10)
.style("fill-opacity", 1)
};
window.scrollTo(($(document).width() - $(window).width()) / 2, 0);
}
var chartShow = function(jsonString) {
//load the data
jsonData = JSON.parse(jsonString);
var data = jsonData.nodes;
var length = Object.keys(data).length;
var margin = { top: 50, right: 100, bottom: 100, left: 200 },
width = document.getElementById("pic").clientWidth - margin.left - margin.right,
height = document.getElementById("pic").clientHeight * (length / 18) - margin.top - margin.bottom;
$("#pic").html("");
//design x-Axis
var x = d3.scaleLinear()
.range([0, width]);
//design y-Axis
var y = d3.scaleBand()
.rangeRound([0, height])
.padding(.1)
.paddingOuter(.1)
//set distance in percent between y axis and first bar --maybe do it not in percent but in px or something in the future?
.align(0.1);
var xAxis = d3
.axisTop(x)
var yAxis = d3
.axisLeft(y)
//select div in which svg should be created
d3.select("#pic").attr("style", "overflow-y: scroll; margin-top:15px;");
//design svg
var svg = d3.select("#pic").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
//map data
Object.keys(data).forEach(function(d) {
d.word = d.word;
d.quantity = +d.quantity;
});
x.domain([0, d3.max(data, function(d) { return d.quantity; })]);
y.domain(data.map(function(d) { return d.word; }));
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0,0)")
.call(xAxis)
.append("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", "-.55em")
.attr("transform", "rotate(-180)");
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text("quantity");
svg.append('g')
.attr('class', 'grid')
.attr('transform', 'translate(0, ${height})')
.call(d3.axisBottom()
.scale(x)
.tickSize(height, 0, 0)
.tickFormat(''))
const barGroups = svg.selectAll()
.data(data)
.enter()
.append('g')
barGroups
.append('rect')
.attr('class', 'bar')
.attr('y', function(d) { return y(d.word); })
.attr('x', 0)
.attr('height', y.bandwidth())
.attr('width', function(d) { return x(d.quantity); })
.on('mouseenter', function(actual, i) {
d3.selectAll('.quantity')
.attr('opacity', 0)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 0.6)
.attr('y', (d) => y(d.word) - 2)
.attr('height', y.bandwidth() + 4)
})
.on('mouseleave', function() {
d3.selectAll('.quantity')
.attr('opacity', 1)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 1)
.attr('y', (d) => y(d.word))
.attr('height', y.bandwidth())
svg.selectAll('#limit').remove()
})
barGroups
.append('text')
.attr('class', 'value')
.attr('y', (d) => y(d.word) + y.bandwidth() / 2)
.attr('x', (d) => x(d.quantity + 0.2))
.attr('text-anchor', 'start')
.text((d) => d.quantity);
//labels
svg.append('text')
.attr('class', 'title')
.attr('x', -margin.left + 20)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'start')
.text('Number of occurences per word')
svg.append('text')
.attr('x', -height / 2)
.attr('y', -margin.left + 30)
.attr('transform', 'rotate(-90)')
.attr('text-anchor', 'middle')
.text('Words')
svg.append('text')
.attr('x', width / 2)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'middle')
.text('Nummber of occurences')
d3.select("input").on("change", change);
var sortTimeout = setTimeout(function() {
d3.select("input").property("checked", true).each(change);
}, 2000);
//sorting chart after creating it
function change() {
clearTimeout(sortTimeout);
// Copy-on-write since tweens are evaluated after a delay.
var y0 = y.domain(data.sort(this.checked ?
function(a, b) { return b.quantity - a.quantity; } :
function(a, b) { return d3.ascending(a.word, b.word); })
.map(function(d) { return d.word; }))
.copy();
svg.selectAll(".bar")
.sort(function(a, b) { return y0(a.word) - y0(b.word); });
svg.selectAll(".value")
.sort(function(a, b) { return y0(a.quantity) - y0(b.quantity); });
var transition = svg.transition().duration(750),
delay = function(d, i) { return i * 50; };
transition.selectAll(".bar")
.delay(delay)
.attr("y", function(d) { return y0(d.word); });
transition.selectAll(".value")
.delay(delay)
.attr("y", function(d) { return y0(d.word) + 18; });
transition.select(".y.axis")
.call(yAxis)
.selectAll("g")
.delay(delay);
}
} | zoomed | identifier_name |
forminput.js | var definitions = function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/test",
success: function(data) {
document.getElementById("definitions").innerHTML = data;
}
});
}
//when loading the website validate rangeout of slider and appearance of checkbox
$(document).ready(function() {
document.getElementById("rangeout").value = "[" + document.getElementById("numberrange").value + "]";
var box = document.getElementById("check-chart");
if (document.getElementById("customControlValidation3").checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
});
//change appearance of checkbox
function resetradio(checkbox) {
var box = document.getElementById("check-chart");
var radio = document.getElementById("customControlValidation3");
if (radio.checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
}
//after clicking show select parameter data and send it to server
var server = function() {
//get input from user in form
var show = document.getElementById("customControlValidation2").checked;
var show2 = document.getElementById("customControlValidation3").checked;
var documents = [0, 0, 0, 0];
documents[0] = document.getElementById("check-34").checked;
documents[1] = document.getElementById("check-71").checked;
documents[2] = document.getElementById("check-125").checked;
documents[3] = document.getElementById("check-213").checked;
//check if obligatory input is filled
if ((!show & !show2) || (!documents[0] & !documents[1] & !documents[3] & !documents[4])) {
alert("Check your input!");
}
else {
var checkChart = false;
if (!show) {
checkChart = document.getElementById("check-chart").checked;
}
var count = document.getElementById("numberrange").value;
var words = [0, 0, 0];
words[0] = document.getElementById("check-nouns").checked;
words[1] = document.getElementById("check-verbs").checked;
words[2] = document.getElementById("check-adjectives").checked;
var data = [0, 0, 0, 0, 0];
data[0] = show;
data[1] = checkChart;
data[2] = documents;
data[3] = count;
data[4] = words;
//send parameter to server
$.ajax({
url: "http://localhost:3000/",
type: "POST",
data: JSON.stringify(data),
dataType: "json",
contentType: "application/json",
success: function(data) {
$("#pic").html("");
$("#pic").append(
"<div class='loader'>" +
"</div>"
);
console.log('finished ajax request');
}
})
//check if preprocessing script is already finished
var check = setInterval(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") | else if (d == "cd") {
return "cardinal digit"
} else return d
});
function zoomed() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
}
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index] = true
return true
} else if (d.target == z) {
neighbors[d.source.index] = true
return true
} else {
return false
}
})
.style("stroke-opacity", 1);
node.filter(function(d) { return neighbors[d.index] })
.style("stroke-width", 3);
label.filter(function(d) { return !neighbors[d.index] })
.style("fill-opacity", 0.2);
label.filter(function(d) { return neighbors[d.index] })
.attr("font-size", 16)
};
var mouseout_node = function(z) {
link
.style("stroke-opacity", 0.2);
node
.style("stroke-width", 1)
label
.attr("font-size", 10)
.style("fill-opacity", 1)
};
window.scrollTo(($(document).width() - $(window).width()) / 2, 0);
}
var chartShow = function(jsonString) {
//load the data
jsonData = JSON.parse(jsonString);
var data = jsonData.nodes;
var length = Object.keys(data).length;
var margin = { top: 50, right: 100, bottom: 100, left: 200 },
width = document.getElementById("pic").clientWidth - margin.left - margin.right,
height = document.getElementById("pic").clientHeight * (length / 18) - margin.top - margin.bottom;
$("#pic").html("");
//design x-Axis
var x = d3.scaleLinear()
.range([0, width]);
//design y-Axis
var y = d3.scaleBand()
.rangeRound([0, height])
.padding(.1)
.paddingOuter(.1)
//set distance in percent between y axis and first bar --maybe do it not in percent but in px or something in the future?
.align(0.1);
var xAxis = d3
.axisTop(x)
var yAxis = d3
.axisLeft(y)
//select div in which svg should be created
d3.select("#pic").attr("style", "overflow-y: scroll; margin-top:15px;");
//design svg
var svg = d3.select("#pic").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
//map data
Object.keys(data).forEach(function(d) {
d.word = d.word;
d.quantity = +d.quantity;
});
x.domain([0, d3.max(data, function(d) { return d.quantity; })]);
y.domain(data.map(function(d) { return d.word; }));
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0,0)")
.call(xAxis)
.append("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", "-.55em")
.attr("transform", "rotate(-180)");
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text("quantity");
svg.append('g')
.attr('class', 'grid')
.attr('transform', 'translate(0, ${height})')
.call(d3.axisBottom()
.scale(x)
.tickSize(height, 0, 0)
.tickFormat(''))
const barGroups = svg.selectAll()
.data(data)
.enter()
.append('g')
barGroups
.append('rect')
.attr('class', 'bar')
.attr('y', function(d) { return y(d.word); })
.attr('x', 0)
.attr('height', y.bandwidth())
.attr('width', function(d) { return x(d.quantity); })
.on('mouseenter', function(actual, i) {
d3.selectAll('.quantity')
.attr('opacity', 0)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 0.6)
.attr('y', (d) => y(d.word) - 2)
.attr('height', y.bandwidth() + 4)
})
.on('mouseleave', function() {
d3.selectAll('.quantity')
.attr('opacity', 1)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 1)
.attr('y', (d) => y(d.word))
.attr('height', y.bandwidth())
svg.selectAll('#limit').remove()
})
barGroups
.append('text')
.attr('class', 'value')
.attr('y', (d) => y(d.word) + y.bandwidth() / 2)
.attr('x', (d) => x(d.quantity + 0.2))
.attr('text-anchor', 'start')
.text((d) => d.quantity);
//labels
svg.append('text')
.attr('class', 'title')
.attr('x', -margin.left + 20)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'start')
.text('Number of occurences per word')
svg.append('text')
.attr('x', -height / 2)
.attr('y', -margin.left + 30)
.attr('transform', 'rotate(-90)')
.attr('text-anchor', 'middle')
.text('Words')
svg.append('text')
.attr('x', width / 2)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'middle')
.text('Nummber of occurences')
d3.select("input").on("change", change);
var sortTimeout = setTimeout(function() {
d3.select("input").property("checked", true).each(change);
}, 2000);
//sorting chart after creating it
function change() {
clearTimeout(sortTimeout);
// Copy-on-write since tweens are evaluated after a delay.
var y0 = y.domain(data.sort(this.checked ?
function(a, b) { return b.quantity - a.quantity; } :
function(a, b) { return d3.ascending(a.word, b.word); })
.map(function(d) { return d.word; }))
.copy();
svg.selectAll(".bar")
.sort(function(a, b) { return y0(a.word) - y0(b.word); });
svg.selectAll(".value")
.sort(function(a, b) { return y0(a.quantity) - y0(b.quantity); });
var transition = svg.transition().duration(750),
delay = function(d, i) { return i * 50; };
transition.selectAll(".bar")
.delay(delay)
.attr("y", function(d) { return y0(d.word); });
transition.selectAll(".value")
.delay(delay)
.attr("y", function(d) { return y0(d.word) + 18; });
transition.select(".y.axis")
.call(yAxis)
.selectAll("g")
.delay(delay);
}
} | {
return "preposition/subordinating conjunction"
} | conditional_block |
forminput.js | var definitions = function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/test",
success: function(data) {
document.getElementById("definitions").innerHTML = data;
}
});
}
//when loading the website validate rangeout of slider and appearance of checkbox
$(document).ready(function() {
document.getElementById("rangeout").value = "[" + document.getElementById("numberrange").value + "]";
var box = document.getElementById("check-chart");
if (document.getElementById("customControlValidation3").checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
});
//change appearance of checkbox
function resetradio(checkbox) {
var box = document.getElementById("check-chart");
var radio = document.getElementById("customControlValidation3");
if (radio.checked == true) {
box.style.display = 'block';
$('label[for="check-chart"]').show();
} else {
box.style.display = 'none';
$('label[for="check-chart"]').hide();
}
}
//after clicking show select parameter data and send it to server
var server = function() {
//get input from user in form
var show = document.getElementById("customControlValidation2").checked;
var show2 = document.getElementById("customControlValidation3").checked;
var documents = [0, 0, 0, 0];
documents[0] = document.getElementById("check-34").checked;
documents[1] = document.getElementById("check-71").checked;
documents[2] = document.getElementById("check-125").checked;
documents[3] = document.getElementById("check-213").checked;
//check if obligatory input is filled
if ((!show & !show2) || (!documents[0] & !documents[1] & !documents[3] & !documents[4])) {
alert("Check your input!");
}
else {
var checkChart = false;
if (!show) {
checkChart = document.getElementById("check-chart").checked;
}
var count = document.getElementById("numberrange").value;
var words = [0, 0, 0];
words[0] = document.getElementById("check-nouns").checked;
words[1] = document.getElementById("check-verbs").checked;
words[2] = document.getElementById("check-adjectives").checked;
var data = [0, 0, 0, 0, 0];
data[0] = show;
data[1] = checkChart;
data[2] = documents;
data[3] = count;
data[4] = words;
//send parameter to server
$.ajax({
url: "http://localhost:3000/",
type: "POST",
data: JSON.stringify(data),
dataType: "json",
contentType: "application/json",
success: function(data) {
$("#pic").html("");
$("#pic").append(
"<div class='loader'>" +
"</div>"
);
console.log('finished ajax request');
}
})
//check if preprocessing script is already finished
var check = setInterval(function() {
$.ajax({
type: "GET",
url: "http://localhost:3000/",
success: function(data) {
//select what should be shown
if (data != false) {
if (show == true) { graphShow(data) } else { chartShow(data) }
//stop checking if preprocessing script is finished
clearInterval(check);
return;
}
}
});
}, 2000);
}
}
//show graph with d3
var graphShow = function(jsonString) {
$("#pic").html("");
$("#pic").append(
"<svg style='width:100%; height:100%;'>" +
"</svg>"
);
var zoom = d3.zoom();
//defining display window
var svg = d3.select("svg"),
width = document.getElementById("pic").clientWidth,
height = document.getElementById("pic").clientHeight
//make svg zoomable
transform = d3.zoomIdentity;
//select svg to container for better zooming functionality
var container = svg.append("g")
.attr("class", "container");
//function for generating different colors depending on the word cluster
var color = d3.scaleOrdinal(d3.schemeCategory20c);
//defining the standard radius of the nodes
var radius = d3.scaleSqrt()
.range([0, 6]);
//simulation of the nodes and links: What kind of forces exists between them; force of attraction or the colliding
var simulation = d3.forceSimulation()
.force("link",
d3.forceLink().id(function(d) { return d.word; })
.distance(function(d) { return radius(d.source.quantity / 2) + radius(d.target.quantity / 2); })
.strength(function(d) { return 0.2; })
)
.force("charge", d3.forceManyBody().strength(-500))
.force("center", d3.forceCenter(width / 3 * 2, height / 3 * 2))
.force("collide", d3.forceCollide(function(d) { return d.quantity * 2 }));
//reading the JSON file that inludes the nodes and links
graph = JSON.parse(jsonString);
//defining a link
var link = container.append("g")
.attr("class", "links")
.selectAll("path")
.data(graph.links)
.enter().append("svg:path")
//defining the style of a link
link.style('fill', 'none')
.style('stroke', 'gray')
.style("stroke-width", function(d) { return d.strength; })
//defining a node
var node = container.append("g")
.attr("class", "nodes")
.selectAll("g")
.data(graph.nodes)
.enter().append("g")
.style('transform-origin', '20% 20%')
.on("mouseover", function(d) { mouseover_node(d); })
.on("mouseout", function(d) { mouseout_node(d) })
//defining which function run if a node is dragged
.call(d3.drag()
.on("start", dragstarted)
.on("drag", dragged)
.on("end", dragended));
//assign the attribute quantity(JSON) to the radius of the node
var circles = node.append("circle")
.attr("r", function(d) { return radius(d.quantity / 2); })
.attr("fill", function(d) { return color(d.cluster); })
.attr("transperancy", "50%");
var labels = node.append("text")
.attr("dy", ".35em")
.attr("text-anchor", "middle")
//define the text that is displayed (word out of the JSON file)
.text(function(d) { return d.word; })
//define the color of the text (cluster out of the JSON file)
.attr("fill", "black");
simulation
.nodes(graph.nodes)
.on("tick", ticked);
simulation.force("link")
.links(graph.links);
//select what is standard zoom and what to do on zoom
svg.call(d3.zoom()
.scaleExtent([1 / 8, 8])
.on("zoom", zoomed));
//Legende
var margin = { top: 10, right: 10, bottom: 10, left: 10 };
var divWidth = document.getElementById("pic").offsetWidth;
var legendHolder = container.append('g')
.attr('transform', "translate(10,30)")
var legend = legendHolder.selectAll(".legend")
.data(color.domain())
.enter().append("g")
.attr("class", "legend")
.attr("transform", function(d, i) { return "translate(0," + i * 20 + ")"; });
legend.append("circle")
.attr("cx", 0)
.attr("cy", 0)
.attr("r", 9)
.style("fill", color);
legend.append("text")
.attr("x", 12)
.attr("y", 0)
.attr("dy", ".35em")
.attr("stroke", "black")
.style("text-anchor", "start")
.text(function(d) {
if (d == "nn") {
return "noun, singular"
} else if (d == "nns") {
return "noun, plural"
} else if (d == "vbg") {
return "verb, gerund"
} else if (d == "vbz") {
return "verb, present tense, third person singular"
} else if (d == "vbn") {
return "verb past participle"
} else if (d == "vbp") {
return "verb, present tense, not third person singular"
} else if (d == "jjr") {
return "adjective, comparative"
} else if (d == "md") {
return "modal"
} else if (d == "prp") {
return "personal pronoun"
} else if (d == "rbr") {
return "adverb, comparative"
} else if (d == "rb") {
return "adverb"
} else if (d == "pdt") {
return "predeterminer"
} else if (d == "jj") {
return "adjective"
} else if (d == "vbd") {
return "verb, past tense"
} else if (d == "fw") {
return "foreign word"
} else if (d == "vb") {
return "verb"
} else if (d == "jjs") {
return "adjectiv, superlative"
} else if (d == "cc") {
return "coordinating conjunction"
} else if (d == "dt") {
return "determiner"
} else if (d == "rp") {
return "particle"
} else if (d == "in") {
return "preposition/subordinating conjunction"
} else if (d == "cd") {
return "cardinal digit"
} else return d
});
function zoomed() {
var g = d3.selectAll(".container");
g.attr("transform", d3.event.transform);
}
function ticked() {
link.attr("d", function(d) {
var dx = d.target.x - d.source.x,
dy = d.target.y - d.source.y,
dr = Math.sqrt(dx * dx + dy * dy);
return "M" +
d.source.x + "," +
d.source.y + "A" +
dr + "," + dr + " 0 0,1 " +
d.target.x + "," +
d.target.y;
})
.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node
.attr("transform", function(d) { return "translate(" + d.x + ", " + d.y + ")"; });
edgepaths.attr('d', function(d) {
return 'M ' + d.source.x + ' ' + d.source.y + ' L ' + d.target.x + ' ' + d.target.y;
});
}
function dragstarted(d) {
if (!d3.event.active) simulation.alphaTarget(0.3).restart()
d.fx = d.x;
d.fy = d.y;
}
function dragged(d) {
d.fx = d3.event.x;
d.fy = d3.event.y;
}
function dragended(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = null;
d.fy = null;
}
var mouseover_node = function(z) {
var neighbors = {};
neighbors[z.index] = true;
link.filter(function(d) {
if (d.source == z) {
neighbors[d.target.index] = true
return true
} else if (d.target == z) {
neighbors[d.source.index] = true
return true
} else {
return false
}
})
.style("stroke-opacity", 1);
node.filter(function(d) { return neighbors[d.index] })
.style("stroke-width", 3);
label.filter(function(d) { return !neighbors[d.index] })
.style("fill-opacity", 0.2);
label.filter(function(d) { return neighbors[d.index] })
.attr("font-size", 16)
};
var mouseout_node = function(z) {
link
.style("stroke-opacity", 0.2);
node
.style("stroke-width", 1)
label
.attr("font-size", 10)
.style("fill-opacity", 1)
};
window.scrollTo(($(document).width() - $(window).width()) / 2, 0);
}
var chartShow = function(jsonString) {
//load the data
jsonData = JSON.parse(jsonString);
var data = jsonData.nodes;
var length = Object.keys(data).length;
var margin = { top: 50, right: 100, bottom: 100, left: 200 },
width = document.getElementById("pic").clientWidth - margin.left - margin.right,
height = document.getElementById("pic").clientHeight * (length / 18) - margin.top - margin.bottom;
$("#pic").html("");
//design x-Axis
var x = d3.scaleLinear()
.range([0, width]);
//design y-Axis
var y = d3.scaleBand()
.rangeRound([0, height])
.padding(.1)
.paddingOuter(.1)
//set distance in percent between y axis and first bar --maybe do it not in percent but in px or something in the future?
.align(0.1);
var xAxis = d3
.axisTop(x)
var yAxis = d3
.axisLeft(y)
//select div in which svg should be created
d3.select("#pic").attr("style", "overflow-y: scroll; margin-top:15px;");
//design svg
var svg = d3.select("#pic").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
//map data
Object.keys(data).forEach(function(d) {
d.word = d.word;
d.quantity = +d.quantity;
});
x.domain([0, d3.max(data, function(d) { return d.quantity; })]);
y.domain(data.map(function(d) { return d.word; }));
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0,0)")
.call(xAxis)
.append("text")
.style("text-anchor", "end")
.attr("dx", "-.8em")
.attr("dy", "-.55em")
.attr("transform", "rotate(-180)");
svg.append("g")
.attr("class", "y axis")
.call(yAxis)
.append("text")
.attr("transform", "rotate(-90)")
.style("text-anchor", "end")
.text("quantity");
svg.append('g')
.attr('class', 'grid')
.attr('transform', 'translate(0, ${height})')
.call(d3.axisBottom()
.scale(x)
.tickSize(height, 0, 0)
.tickFormat(''))
const barGroups = svg.selectAll()
.data(data)
.enter()
.append('g')
barGroups
.append('rect')
.attr('class', 'bar')
.attr('y', function(d) { return y(d.word); })
.attr('x', 0)
.attr('height', y.bandwidth())
.attr('width', function(d) { return x(d.quantity); })
.on('mouseenter', function(actual, i) {
d3.selectAll('.quantity')
.attr('opacity', 0)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 0.6)
.attr('y', (d) => y(d.word) - 2)
.attr('height', y.bandwidth() + 4)
})
.on('mouseleave', function() {
d3.selectAll('.quantity')
.attr('opacity', 1)
d3.select(this)
.transition()
.duration(300)
.attr('opacity', 1)
.attr('y', (d) => y(d.word))
.attr('height', y.bandwidth())
svg.selectAll('#limit').remove()
})
barGroups
.append('text')
.attr('class', 'value')
.attr('y', (d) => y(d.word) + y.bandwidth() / 2) | .attr('x', (d) => x(d.quantity + 0.2))
.attr('text-anchor', 'start')
.text((d) => d.quantity);
//labels
svg.append('text')
.attr('class', 'title')
.attr('x', -margin.left + 20)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'start')
.text('Number of occurences per word')
svg.append('text')
.attr('x', -height / 2)
.attr('y', -margin.left + 30)
.attr('transform', 'rotate(-90)')
.attr('text-anchor', 'middle')
.text('Words')
svg.append('text')
.attr('x', width / 2)
.attr('y', -margin.top + 20)
.attr('text-anchor', 'middle')
.text('Nummber of occurences')
d3.select("input").on("change", change);
var sortTimeout = setTimeout(function() {
d3.select("input").property("checked", true).each(change);
}, 2000);
//sorting chart after creating it
function change() {
clearTimeout(sortTimeout);
// Copy-on-write since tweens are evaluated after a delay.
var y0 = y.domain(data.sort(this.checked ?
function(a, b) { return b.quantity - a.quantity; } :
function(a, b) { return d3.ascending(a.word, b.word); })
.map(function(d) { return d.word; }))
.copy();
svg.selectAll(".bar")
.sort(function(a, b) { return y0(a.word) - y0(b.word); });
svg.selectAll(".value")
.sort(function(a, b) { return y0(a.quantity) - y0(b.quantity); });
var transition = svg.transition().duration(750),
delay = function(d, i) { return i * 50; };
transition.selectAll(".bar")
.delay(delay)
.attr("y", function(d) { return y0(d.word); });
transition.selectAll(".value")
.delay(delay)
.attr("y", function(d) { return y0(d.word) + 18; });
transition.select(".y.axis")
.call(yAxis)
.selectAll("g")
.delay(delay);
}
} | random_line_split | |
supervisor_processor.go | // Copyright 2020 The Monogon Project Authors.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package supervisor
import (
"context"
"errors"
"fmt"
"runtime/debug"
"time"
)
// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
// record the result and act accordingly. It is also responsible for detecting and acting upon supervision subtrees that
// need to be restarted after death (via a 'GC' process)
// processorRequest is a request for the processor. Only one of the fields can be set.
type processorRequest struct {
schedule *processorRequestSchedule
died *processorRequestDied
waitSettled *processorRequestWaitSettled
}
// processorRequestSchedule requests that a given node's runnable be started.
type processorRequestSchedule struct {
dn string
}
// processorRequestDied is a signal from a runnable goroutine that the runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
// Okay, so a Runnable has quit. What now?
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for |
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
// children must be restartable in order for this node to be restartable.
childrenReady := true
for _, c := range cur.children {
if !ready[c.dn()] {
childrenReady = false
break
}
}
// In addition to children, the node itself must be restartable (ie. DONE, DEAD or CANCELED).
curReady := false
switch cur.state {
case nodeStateDone:
curReady = true
case nodeStateCanceled:
curReady = true
case nodeStateDead:
curReady = true
}
// Note down that we have an opinion on this node, and note that opinion down.
visited[curDn] = true
ready[curDn] = childrenReady && curReady
// Now we can also enqueue the parent of this node for processing.
if cur.parent != nil && !visited[cur.parent.dn()] {
queue = append(queue, cur.parent)
}
}
// Phase 3: traverse tree from root to find largest subtrees that need to be restarted and are ready to be
// restarted.
// All DNs that need to be restarted by the GC process.
want := make(map[string]bool)
// All DNs that need to be restarted and can be restarted by the GC process - a subset of 'want' DNs.
can := make(map[string]bool)
// The set difference between 'want' and 'can' are all nodes that should be restarted but can't yet (ie. because
// a child is still in the process of being canceled).
// DFS from root.
queue = []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
// If this node is DEAD or CANCELED it should be restarted.
if cur.state == nodeStateDead || cur.state == nodeStateCanceled {
want[cur.dn()] = true
}
// If it should be restarted and is ready to be restarted...
if want[cur.dn()] && ready[cur.dn()] {
// And its parent context is valid (ie hasn't been canceled), mark it as restartable.
if cur.parent == nil || cur.parent.ctx.Err() == nil {
can[cur.dn()] = true
continue
}
}
// Otherwise, traverse further down the tree to see if something else needs to be done.
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Reinitialize and reschedule all subtrees
for dn, _ := range can {
n := s.nodeByDN(dn)
// Only back off when the node unexpectedly died - not when it got canceled.
bo := time.Duration(0)
if n.state == nodeStateDead {
bo = n.bo.NextBackOff()
}
// Prepare node for rescheduling - remove its children, reset its state to new.
n.reset()
s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
// Reschedule node runnable to run after backoff.
go func(n *node, bo time.Duration) {
time.Sleep(bo)
s.pReq <- &processorRequest{
schedule: &processorRequestSchedule{dn: n.dn()},
}
}(n, bo)
}
}
| {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
} | conditional_block |
supervisor_processor.go | // Copyright 2020 The Monogon Project Authors.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package supervisor
import (
"context"
"errors"
"fmt"
"runtime/debug"
"time"
)
// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
// record the result and act accordingly. It is also responsible for detecting and acting upon supervision subtrees that
// need to be restarted after death (via a 'GC' process)
// processorRequest is a request for the processor. Only one of the fields can be set.
type processorRequest struct {
schedule *processorRequestSchedule
died *processorRequestDied
waitSettled *processorRequestWaitSettled
}
// processorRequestSchedule requests that a given node's runnable be started.
type processorRequestSchedule struct {
dn string
}
// processorRequestDied is a signal from a runnable goroutine that the runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) | () {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
// Okay, so a Runnable has quit. What now?
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
// children must be restartable in order for this node to be restartable.
childrenReady := true
for _, c := range cur.children {
if !ready[c.dn()] {
childrenReady = false
break
}
}
// In addition to children, the node itself must be restartable (ie. DONE, DEAD or CANCELED).
curReady := false
switch cur.state {
case nodeStateDone:
curReady = true
case nodeStateCanceled:
curReady = true
case nodeStateDead:
curReady = true
}
// Note down that we have an opinion on this node, and note that opinion down.
visited[curDn] = true
ready[curDn] = childrenReady && curReady
// Now we can also enqueue the parent of this node for processing.
if cur.parent != nil && !visited[cur.parent.dn()] {
queue = append(queue, cur.parent)
}
}
// Phase 3: traverse tree from root to find largest subtrees that need to be restarted and are ready to be
// restarted.
// All DNs that need to be restarted by the GC process.
want := make(map[string]bool)
// All DNs that need to be restarted and can be restarted by the GC process - a subset of 'want' DNs.
can := make(map[string]bool)
// The set difference between 'want' and 'can' are all nodes that should be restarted but can't yet (ie. because
// a child is still in the process of being canceled).
// DFS from root.
queue = []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
// If this node is DEAD or CANCELED it should be restarted.
if cur.state == nodeStateDead || cur.state == nodeStateCanceled {
want[cur.dn()] = true
}
// If it should be restarted and is ready to be restarted...
if want[cur.dn()] && ready[cur.dn()] {
// And its parent context is valid (ie hasn't been canceled), mark it as restartable.
if cur.parent == nil || cur.parent.ctx.Err() == nil {
can[cur.dn()] = true
continue
}
}
// Otherwise, traverse further down the tree to see if something else needs to be done.
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Reinitialize and reschedule all subtrees
for dn, _ := range can {
n := s.nodeByDN(dn)
// Only back off when the node unexpectedly died - not when it got canceled.
bo := time.Duration(0)
if n.state == nodeStateDead {
bo = n.bo.NextBackOff()
}
// Prepare node for rescheduling - remove its children, reset its state to new.
n.reset()
s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
// Reschedule node runnable to run after backoff.
go func(n *node, bo time.Duration) {
time.Sleep(bo)
s.pReq <- &processorRequest{
schedule: &processorRequestSchedule{dn: n.dn()},
}
}(n, bo)
}
}
| processKill | identifier_name |
supervisor_processor.go | // Copyright 2020 The Monogon Project Authors.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package supervisor
import (
"context"
"errors"
"fmt"
"runtime/debug"
"time"
)
// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
// record the result and act accordingly. It is also responsible for detecting and acting upon supervision subtrees that
// need to be restarted after death (via a 'GC' process)
// processorRequest is a request for the processor. Only one of the fields can be set.
type processorRequest struct {
schedule *processorRequestSchedule
died *processorRequestDied
waitSettled *processorRequestWaitSettled
}
// processorRequestSchedule requests that a given node's runnable be started.
type processorRequestSchedule struct {
dn string
}
// processorRequestDied is a signal from a runnable goroutine that the runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
}
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
| n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
// children must be restartable in order for this node to be restartable.
childrenReady := true
for _, c := range cur.children {
if !ready[c.dn()] {
childrenReady = false
break
}
}
// In addition to children, the node itself must be restartable (ie. DONE, DEAD or CANCELED).
curReady := false
switch cur.state {
case nodeStateDone:
curReady = true
case nodeStateCanceled:
curReady = true
case nodeStateDead:
curReady = true
}
// Note down that we have an opinion on this node, and note that opinion down.
visited[curDn] = true
ready[curDn] = childrenReady && curReady
// Now we can also enqueue the parent of this node for processing.
if cur.parent != nil && !visited[cur.parent.dn()] {
queue = append(queue, cur.parent)
}
}
// Phase 3: traverse tree from root to find largest subtrees that need to be restarted and are ready to be
// restarted.
// All DNs that need to be restarted by the GC process.
want := make(map[string]bool)
// All DNs that need to be restarted and can be restarted by the GC process - a subset of 'want' DNs.
can := make(map[string]bool)
// The set difference between 'want' and 'can' are all nodes that should be restarted but can't yet (ie. because
// a child is still in the process of being canceled).
// DFS from root.
queue = []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
// If this node is DEAD or CANCELED it should be restarted.
if cur.state == nodeStateDead || cur.state == nodeStateCanceled {
want[cur.dn()] = true
}
// If it should be restarted and is ready to be restarted...
if want[cur.dn()] && ready[cur.dn()] {
// And its parent context is valid (ie hasn't been canceled), mark it as restartable.
if cur.parent == nil || cur.parent.ctx.Err() == nil {
can[cur.dn()] = true
continue
}
}
// Otherwise, traverse further down the tree to see if something else needs to be done.
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Reinitialize and reschedule all subtrees
for dn, _ := range can {
n := s.nodeByDN(dn)
// Only back off when the node unexpectedly died - not when it got canceled.
bo := time.Duration(0)
if n.state == nodeStateDead {
bo = n.bo.NextBackOff()
}
// Prepare node for rescheduling - remove its children, reset its state to new.
n.reset()
s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
// Reschedule node runnable to run after backoff.
go func(n *node, bo time.Duration) {
time.Sleep(bo)
s.pReq <- &processorRequest{
schedule: &processorRequestSchedule{dn: n.dn()},
}
}(n, bo)
}
} | // Okay, so a Runnable has quit. What now? | random_line_split |
supervisor_processor.go | // Copyright 2020 The Monogon Project Authors.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package supervisor
import (
"context"
"errors"
"fmt"
"runtime/debug"
"time"
)
// The processor maintains runnable goroutines - ie., when requested will start one, and then once it exists it will
// record the result and act accordingly. It is also responsible for detecting and acting upon supervision subtrees that
// need to be restarted after death (via a 'GC' process)
// processorRequest is a request for the processor. Only one of the fields can be set.
type processorRequest struct {
schedule *processorRequestSchedule
died *processorRequestDied
waitSettled *processorRequestWaitSettled
}
// processorRequestSchedule requests that a given node's runnable be started.
type processorRequestSchedule struct {
dn string
}
// processorRequestDied is a signal from a runnable goroutine that the runnable has died.
type processorRequestDied struct {
dn string
err error
}
type processorRequestWaitSettled struct {
waiter chan struct{}
}
// processor is the main processing loop.
func (s *supervisor) processor(ctx context.Context) |
// processKill cancels all nodes in the supervision tree. This is only called right before exiting the processor, so
// they do not get automatically restarted.
func (s *supervisor) processKill() {
s.mu.Lock()
defer s.mu.Unlock()
// Gather all context cancel functions.
var cancels []func()
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
cancels = append(cancels, cur.ctxC)
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Call all context cancels.
for _, c := range cancels {
c()
}
}
// processSchedule starts a node's runnable in a goroutine and records its output once it's done.
func (s *supervisor) processSchedule(r *processorRequestSchedule) {
s.mu.Lock()
defer s.mu.Unlock()
n := s.nodeByDN(r.dn)
go func() {
if !s.propagatePanic {
defer func() {
if rec := recover(); rec != nil {
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: fmt.Errorf("panic: %v, stacktrace: %s", rec, string(debug.Stack())),
},
}
}
}()
}
res := n.runnable(n.ctx)
s.pReq <- &processorRequest{
died: &processorRequestDied{
dn: r.dn,
err: res,
},
}
}()
}
// processDied records the result from a runnable goroutine, and updates its node state accordingly. If the result
// is a death and not an expected exit, related nodes (ie. children and group siblings) are canceled accordingly.
func (s *supervisor) processDied(r *processorRequestDied) {
s.mu.Lock()
defer s.mu.Unlock()
// Okay, so a Runnable has quit. What now?
n := s.nodeByDN(r.dn)
ctx := n.ctx
// Simple case: it was marked as Done and quit with no error.
if n.state == nodeStateDone && r.err == nil {
// Do nothing. This was supposed to happen. Keep the process as DONE.
return
}
// Find innermost error to check if it's a context canceled error.
perr := r.err
for {
if inner := errors.Unwrap(perr); inner != nil {
perr = inner
continue
}
break
}
// Simple case: the context was canceled and the returned error is the context error.
if err := ctx.Err(); err != nil && perr == err {
// Mark the node as canceled successfully.
n.state = nodeStateCanceled
return
}
// Otherwise, the Runnable should not have died or quit. Handle accordingly.
err := r.err
// A lack of returned error is also an error.
if err == nil {
err = fmt.Errorf("returned when %s", n.state)
} else {
err = fmt.Errorf("returned error when %s: %w", n.state, err)
}
s.ilogger.Errorf("Runnable %s died: %v", n.dn(), err)
// Mark as dead.
n.state = nodeStateDead
// Cancel that node's context, just in case something still depends on it.
n.ctxC()
// Cancel all siblings.
if n.parent != nil {
for name, _ := range n.parent.groupSiblings(n.name) {
if name == n.name {
continue
}
sibling := n.parent.children[name]
// TODO(q3k): does this need to run in a goroutine, ie. can a context cancel block?
sibling.ctxC()
}
}
}
// processGC runs the GC process. It's not really Garbage Collection, as in, it doesn't remove unnecessary tree nodes -
// but it does find nodes that need to be restarted, find the subset that can and then schedules them for running.
// As such, it's less of a Garbage Collector and more of a Necromancer. However, GC is a friendlier name.
func (s *supervisor) processGC() {
s.mu.Lock()
defer s.mu.Unlock()
// The 'GC' serves is the main business logic of the supervision tree. It traverses a locked tree and tries to
// find subtrees that must be restarted (because of a DEAD/CANCELED runnable). It then finds which of these
// subtrees that should be restarted can be restarted, ie. which ones are fully recursively DEAD/CANCELED. It
// also finds the smallest set of largest subtrees that can be restarted, ie. if there's multiple DEAD runnables
// that can be restarted at once, it will do so.
// Phase one: Find all leaves.
// This is a simple DFS that finds all the leaves of the tree, ie all nodes that do not have children nodes.
leaves := make(map[string]bool)
queue := []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
for _, c := range cur.children {
queue = append([]*node{c}, queue...)
}
if len(cur.children) == 0 {
leaves[cur.dn()] = true
}
}
// Phase two: traverse tree from node to root and make note of all subtrees that can be restarted.
// A subtree is restartable/ready iff every node in that subtree is either CANCELED, DEAD or DONE.
// Such a 'ready' subtree can be restarted by the supervisor if needed.
// DNs that we already visited.
visited := make(map[string]bool)
// DNs whose subtrees are ready to be restarted.
// These are all subtrees recursively - ie., root.a.a and root.a will both be marked here.
ready := make(map[string]bool)
// We build a queue of nodes to visit, starting from the leaves.
queue = []*node{}
for l, _ := range leaves {
queue = append(queue, s.nodeByDN(l))
}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
curDn := cur.dn()
queue = queue[1:]
// Do we have a decision about our children?
allVisited := true
for _, c := range cur.children {
if !visited[c.dn()] {
allVisited = false
break
}
}
// If no decision about children is available, it means we ended up in this subtree through some shorter path
// of a shorter/lower-order leaf. There is a path to a leaf that's longer than the one that caused this node
// to be enqueued. Easy solution: just push back the current element and retry later.
if !allVisited {
// Push back to queue and wait for a decision later.
queue = append(queue, cur)
continue
}
// All children have been visited and we have an idea about whether they're ready/restartable. All of the node's
// children must be restartable in order for this node to be restartable.
childrenReady := true
for _, c := range cur.children {
if !ready[c.dn()] {
childrenReady = false
break
}
}
// In addition to children, the node itself must be restartable (ie. DONE, DEAD or CANCELED).
curReady := false
switch cur.state {
case nodeStateDone:
curReady = true
case nodeStateCanceled:
curReady = true
case nodeStateDead:
curReady = true
}
// Note down that we have an opinion on this node, and note that opinion down.
visited[curDn] = true
ready[curDn] = childrenReady && curReady
// Now we can also enqueue the parent of this node for processing.
if cur.parent != nil && !visited[cur.parent.dn()] {
queue = append(queue, cur.parent)
}
}
// Phase 3: traverse tree from root to find largest subtrees that need to be restarted and are ready to be
// restarted.
// All DNs that need to be restarted by the GC process.
want := make(map[string]bool)
// All DNs that need to be restarted and can be restarted by the GC process - a subset of 'want' DNs.
can := make(map[string]bool)
// The set difference between 'want' and 'can' are all nodes that should be restarted but can't yet (ie. because
// a child is still in the process of being canceled).
// DFS from root.
queue = []*node{s.root}
for {
if len(queue) == 0 {
break
}
cur := queue[0]
queue = queue[1:]
// If this node is DEAD or CANCELED it should be restarted.
if cur.state == nodeStateDead || cur.state == nodeStateCanceled {
want[cur.dn()] = true
}
// If it should be restarted and is ready to be restarted...
if want[cur.dn()] && ready[cur.dn()] {
// And its parent context is valid (ie hasn't been canceled), mark it as restartable.
if cur.parent == nil || cur.parent.ctx.Err() == nil {
can[cur.dn()] = true
continue
}
}
// Otherwise, traverse further down the tree to see if something else needs to be done.
for _, c := range cur.children {
queue = append(queue, c)
}
}
// Reinitialize and reschedule all subtrees
for dn, _ := range can {
n := s.nodeByDN(dn)
// Only back off when the node unexpectedly died - not when it got canceled.
bo := time.Duration(0)
if n.state == nodeStateDead {
bo = n.bo.NextBackOff()
}
// Prepare node for rescheduling - remove its children, reset its state to new.
n.reset()
s.ilogger.Infof("rescheduling supervised node %s with backoff %s", dn, bo.String())
// Reschedule node runnable to run after backoff.
go func(n *node, bo time.Duration) {
time.Sleep(bo)
s.pReq <- &processorRequest{
schedule: &processorRequestSchedule{dn: n.dn()},
}
}(n, bo)
}
}
| {
s.ilogger.Info("supervisor processor started")
// Waiters waiting for the GC to be settled.
var waiters []chan struct{}
// The GC will run every millisecond if needed. Any time the processor requests a change in the supervision tree
// (ie a death or a new runnable) it will mark the state as dirty and run the GC on the next millisecond cycle.
gc := time.NewTicker(1 * time.Millisecond)
defer gc.Stop()
clean := true
// How long has the GC been clean. This is used to notify 'settled' waiters.
cleanCycles := 0
markDirty := func() {
clean = false
cleanCycles = 0
}
for {
select {
case <-ctx.Done():
s.ilogger.Infof("supervisor processor exiting: %v", ctx.Err())
s.processKill()
s.ilogger.Info("supervisor exited")
return
case <-gc.C:
if !clean {
s.processGC()
}
clean = true
cleanCycles += 1
// This threshold is somewhat arbitrary. It's a balance between test speed and test reliability.
if cleanCycles > 50 {
for _, w := range waiters {
close(w)
}
waiters = nil
}
case r := <-s.pReq:
switch {
case r.schedule != nil:
s.processSchedule(r.schedule)
markDirty()
case r.died != nil:
s.processDied(r.died)
markDirty()
case r.waitSettled != nil:
waiters = append(waiters, r.waitSettled.waiter)
default:
panic(fmt.Errorf("unhandled request %+v", r))
}
}
}
} | identifier_body |
ipc.rs | //! IPC Transport for *nix
#[cfg(unix)]
extern crate tokio_uds;
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::Path;
use std::sync::{atomic, Arc};
#[cfg(unix)]
use self::tokio_uds::UnixStream;
use crate::api::SubscriptionId;
use crate::helpers;
use crate::rpc;
use crate::transports::shared::{EventLoopHandle, Response};
use crate::transports::tokio_core::reactor;
use crate::transports::tokio_io::io::{ReadHalf, WriteHalf};
use crate::transports::tokio_io::AsyncRead;
use crate::transports::Result;
use crate::{BatchTransport, DuplexTransport, Error, RequestId, Transport};
use futures::sync::{mpsc, oneshot};
use futures::{self, Future, Stream};
use parking_lot::Mutex;
macro_rules! try_nb {
($e:expr) => {
match $e {
Ok(t) => t,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(futures::Async::NotReady),
Err(e) => {
log::warn!("Unexpected IO error: {:?}", e);
return Err(());
}
}
};
}
type Pending = oneshot::Sender<Result<Vec<Result<rpc::Value>>>>;
type Subscription = mpsc::UnboundedSender<rpc::Value>;
/// A future representing pending IPC request, resolves to a response.
pub type IpcTask<F> = Response<F, Vec<Result<rpc::Value>>>;
/// Unix Domain Sockets (IPC) transport
#[derive(Debug, Clone)]
pub struct Ipc {
id: Arc<atomic::AtomicUsize>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
write_sender: mpsc::UnboundedSender<Vec<u8>>,
}
impl Ipc {
/// Create new IPC transport with separate event loop.
/// NOTE: Dropping event loop handle will stop the transport layer!
///
/// IPC is only available on Unix. On other systems, this always returns an error.
pub fn new<P>(path: P) -> Result<(EventLoopHandle, Self)>
where
P: AsRef<Path>,
{
let path = path.as_ref().to_owned();
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn | <T>(&self, requests: T) -> Self::Batch
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::Notification(notification) => {
if let rpc::Params::Map(params) = notification.params {
let id = params.get("subscription");
let result = params.get("result");
if let (Some(&rpc::Value::String(ref id)), Some(result)) = (id, result) {
let id: SubscriptionId = id.clone().into();
if let Some(stream) = self.subscriptions.lock().get(&id) {
if let Err(e) = stream.unbounded_send(result.clone()) {
log::error!("Error sending notification (id: {:?}): {:?}", id, e);
}
} else {
log::warn!("Got notification for unknown subscription (id: {:?})", id);
}
} else {
log::error!("Got unsupported notification (id: {:?})", id);
}
}
}
}
}
fn extract_response(buf: &[u8], min: usize) -> Option<(Message, usize)> {
for pos in (min..buf.len()).rev() {
// Look for end character
if buf[pos] == b']' || buf[pos] == b'}' {
// Try to deserialize
let pos = pos + 1;
match helpers::to_response_from_slice(&buf[0..pos]) {
Ok(rpc::Response::Single(output)) => return Some((Message::Rpc(vec![output]), pos)),
Ok(rpc::Response::Batch(outputs)) => return Some((Message::Rpc(outputs), pos)),
// just continue
_ => {}
}
match helpers::to_notification_from_slice(&buf[0..pos]) {
Ok(notification) => return Some((Message::Notification(notification), pos)),
_ => {}
}
}
}
None
}
}
#[cfg(all(test, unix))]
mod tests {
extern crate tokio_core;
extern crate tokio_uds;
use super::Ipc;
use crate::rpc;
use crate::Transport;
use futures::{self, Future};
use std::io::{self, Read, Write};
#[test]
fn should_send_a_request() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(
&request,
r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}"#
);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(eloop.run(res), Ok(rpc::Value::String("x".into())));
}
#[test]
fn should_handle_double_response() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(&request, r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":2}"#);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}{"jsonrpc":"2.0","id":2,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res1 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
let res2 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(
eloop.run(res1.join(res2)),
Ok((rpc::Value::String("x".into()), rpc::Value::String("x".into())))
);
}
}
| send_batch | identifier_name |
ipc.rs | //! IPC Transport for *nix
#[cfg(unix)]
extern crate tokio_uds;
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::Path;
use std::sync::{atomic, Arc};
#[cfg(unix)]
use self::tokio_uds::UnixStream;
use crate::api::SubscriptionId;
use crate::helpers;
use crate::rpc;
use crate::transports::shared::{EventLoopHandle, Response};
use crate::transports::tokio_core::reactor;
use crate::transports::tokio_io::io::{ReadHalf, WriteHalf};
use crate::transports::tokio_io::AsyncRead;
use crate::transports::Result;
use crate::{BatchTransport, DuplexTransport, Error, RequestId, Transport};
use futures::sync::{mpsc, oneshot};
use futures::{self, Future, Stream};
use parking_lot::Mutex;
macro_rules! try_nb {
($e:expr) => {
match $e {
Ok(t) => t,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(futures::Async::NotReady),
Err(e) => {
log::warn!("Unexpected IO error: {:?}", e);
return Err(());
}
}
};
}
type Pending = oneshot::Sender<Result<Vec<Result<rpc::Value>>>>;
type Subscription = mpsc::UnboundedSender<rpc::Value>;
/// A future representing pending IPC request, resolves to a response.
pub type IpcTask<F> = Response<F, Vec<Result<rpc::Value>>>;
/// Unix Domain Sockets (IPC) transport
#[derive(Debug, Clone)]
pub struct Ipc {
id: Arc<atomic::AtomicUsize>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
write_sender: mpsc::UnboundedSender<Vec<u8>>,
}
impl Ipc {
/// Create new IPC transport with separate event loop.
/// NOTE: Dropping event loop handle will stop the transport layer!
///
/// IPC is only available on Unix. On other systems, this always returns an error.
pub fn new<P>(path: P) -> Result<(EventLoopHandle, Self)>
where
P: AsRef<Path>,
{
let path = path.as_ref().to_owned();
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn send_batch<T>(&self, requests: T) -> Self::Batch
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id | else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::Notification(notification) => {
if let rpc::Params::Map(params) = notification.params {
let id = params.get("subscription");
let result = params.get("result");
if let (Some(&rpc::Value::String(ref id)), Some(result)) = (id, result) {
let id: SubscriptionId = id.clone().into();
if let Some(stream) = self.subscriptions.lock().get(&id) {
if let Err(e) = stream.unbounded_send(result.clone()) {
log::error!("Error sending notification (id: {:?}): {:?}", id, e);
}
} else {
log::warn!("Got notification for unknown subscription (id: {:?})", id);
}
} else {
log::error!("Got unsupported notification (id: {:?})", id);
}
}
}
}
}
fn extract_response(buf: &[u8], min: usize) -> Option<(Message, usize)> {
for pos in (min..buf.len()).rev() {
// Look for end character
if buf[pos] == b']' || buf[pos] == b'}' {
// Try to deserialize
let pos = pos + 1;
match helpers::to_response_from_slice(&buf[0..pos]) {
Ok(rpc::Response::Single(output)) => return Some((Message::Rpc(vec![output]), pos)),
Ok(rpc::Response::Batch(outputs)) => return Some((Message::Rpc(outputs), pos)),
// just continue
_ => {}
}
match helpers::to_notification_from_slice(&buf[0..pos]) {
Ok(notification) => return Some((Message::Notification(notification), pos)),
_ => {}
}
}
}
None
}
}
#[cfg(all(test, unix))]
mod tests {
extern crate tokio_core;
extern crate tokio_uds;
use super::Ipc;
use crate::rpc;
use crate::Transport;
use futures::{self, Future};
use std::io::{self, Read, Write};
#[test]
fn should_send_a_request() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(
&request,
r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}"#
);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(eloop.run(res), Ok(rpc::Value::String("x".into())));
}
#[test]
fn should_handle_double_response() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(&request, r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":2}"#);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}{"jsonrpc":"2.0","id":2,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res1 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
let res2 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(
eloop.run(res1.join(res2)),
Ok((rpc::Value::String("x".into()), rpc::Value::String("x".into())))
);
}
}
| {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} | conditional_block |
ipc.rs | //! IPC Transport for *nix
#[cfg(unix)]
extern crate tokio_uds;
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::Path;
use std::sync::{atomic, Arc};
#[cfg(unix)]
use self::tokio_uds::UnixStream;
use crate::api::SubscriptionId;
use crate::helpers;
use crate::rpc;
use crate::transports::shared::{EventLoopHandle, Response};
use crate::transports::tokio_core::reactor;
use crate::transports::tokio_io::io::{ReadHalf, WriteHalf};
use crate::transports::tokio_io::AsyncRead;
use crate::transports::Result;
use crate::{BatchTransport, DuplexTransport, Error, RequestId, Transport};
use futures::sync::{mpsc, oneshot};
use futures::{self, Future, Stream};
use parking_lot::Mutex;
macro_rules! try_nb {
($e:expr) => {
match $e {
Ok(t) => t,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(futures::Async::NotReady),
Err(e) => {
log::warn!("Unexpected IO error: {:?}", e);
return Err(());
}
}
};
}
type Pending = oneshot::Sender<Result<Vec<Result<rpc::Value>>>>;
type Subscription = mpsc::UnboundedSender<rpc::Value>;
/// A future representing pending IPC request, resolves to a response.
pub type IpcTask<F> = Response<F, Vec<Result<rpc::Value>>>;
/// Unix Domain Sockets (IPC) transport
#[derive(Debug, Clone)]
pub struct Ipc {
id: Arc<atomic::AtomicUsize>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
write_sender: mpsc::UnboundedSender<Vec<u8>>,
}
impl Ipc {
/// Create new IPC transport with separate event loop.
/// NOTE: Dropping event loop handle will stop the transport layer!
///
/// IPC is only available on Unix. On other systems, this always returns an error.
pub fn new<P>(path: P) -> Result<(EventLoopHandle, Self)>
where
P: AsRef<Path>,
{
let path = path.as_ref().to_owned();
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn send_batch<T>(&self, requests: T) -> Self::Batch
where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None));
let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream |
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::Notification(notification) => {
if let rpc::Params::Map(params) = notification.params {
let id = params.get("subscription");
let result = params.get("result");
if let (Some(&rpc::Value::String(ref id)), Some(result)) = (id, result) {
let id: SubscriptionId = id.clone().into();
if let Some(stream) = self.subscriptions.lock().get(&id) {
if let Err(e) = stream.unbounded_send(result.clone()) {
log::error!("Error sending notification (id: {:?}): {:?}", id, e);
}
} else {
log::warn!("Got notification for unknown subscription (id: {:?})", id);
}
} else {
log::error!("Got unsupported notification (id: {:?})", id);
}
}
}
}
}
fn extract_response(buf: &[u8], min: usize) -> Option<(Message, usize)> {
for pos in (min..buf.len()).rev() {
// Look for end character
if buf[pos] == b']' || buf[pos] == b'}' {
// Try to deserialize
let pos = pos + 1;
match helpers::to_response_from_slice(&buf[0..pos]) {
Ok(rpc::Response::Single(output)) => return Some((Message::Rpc(vec![output]), pos)),
Ok(rpc::Response::Batch(outputs)) => return Some((Message::Rpc(outputs), pos)),
// just continue
_ => {}
}
match helpers::to_notification_from_slice(&buf[0..pos]) {
Ok(notification) => return Some((Message::Notification(notification), pos)),
_ => {}
}
}
}
None
}
}
#[cfg(all(test, unix))]
mod tests {
extern crate tokio_core;
extern crate tokio_uds;
use super::Ipc;
use crate::rpc;
use crate::Transport;
use futures::{self, Future};
use std::io::{self, Read, Write};
#[test]
fn should_send_a_request() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(
&request,
r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}"#
);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(eloop.run(res), Ok(rpc::Value::String("x".into())));
}
#[test]
fn should_handle_double_response() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(&request, r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":2}"#);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}{"jsonrpc":"2.0","id":2,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res1 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
let res2 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(
eloop.run(res1.join(res2)),
Ok((rpc::Value::String("x".into()), rpc::Value::String("x".into())))
);
}
}
| {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
} | identifier_body |
ipc.rs | //! IPC Transport for *nix
#[cfg(unix)]
extern crate tokio_uds;
use std::collections::BTreeMap;
use std::io::{self, Read, Write};
use std::path::Path;
use std::sync::{atomic, Arc};
#[cfg(unix)]
use self::tokio_uds::UnixStream;
use crate::api::SubscriptionId;
use crate::helpers;
use crate::rpc;
use crate::transports::shared::{EventLoopHandle, Response};
use crate::transports::tokio_core::reactor;
use crate::transports::tokio_io::io::{ReadHalf, WriteHalf};
use crate::transports::tokio_io::AsyncRead;
use crate::transports::Result;
use crate::{BatchTransport, DuplexTransport, Error, RequestId, Transport};
use futures::sync::{mpsc, oneshot};
use futures::{self, Future, Stream};
use parking_lot::Mutex;
macro_rules! try_nb {
($e:expr) => {
match $e {
Ok(t) => t,
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return Ok(futures::Async::NotReady),
Err(e) => {
log::warn!("Unexpected IO error: {:?}", e);
return Err(());
}
}
};
}
type Pending = oneshot::Sender<Result<Vec<Result<rpc::Value>>>>;
type Subscription = mpsc::UnboundedSender<rpc::Value>;
/// A future representing pending IPC request, resolves to a response.
pub type IpcTask<F> = Response<F, Vec<Result<rpc::Value>>>;
/// Unix Domain Sockets (IPC) transport
#[derive(Debug, Clone)]
pub struct Ipc {
id: Arc<atomic::AtomicUsize>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
write_sender: mpsc::UnboundedSender<Vec<u8>>,
}
impl Ipc {
/// Create new IPC transport with separate event loop.
/// NOTE: Dropping event loop handle will stop the transport layer!
///
/// IPC is only available on Unix. On other systems, this always returns an error.
pub fn new<P>(path: P) -> Result<(EventLoopHandle, Self)>
where
P: AsRef<Path>,
{
let path = path.as_ref().to_owned();
EventLoopHandle::spawn(move |handle| Self::with_event_loop(&path, &handle).map_err(Into::into))
}
/// Create new IPC transport within existing Event Loop.
///
/// IPC is only available on Unix. On other systems, this always returns an error.
#[cfg(unix)]
pub fn with_event_loop<P>(path: P, handle: &reactor::Handle) -> Result<Self>
where
P: AsRef<Path>,
{
log::trace!("Connecting to: {:?}", path.as_ref());
let stream = UnixStream::connect(path, handle)?;
Self::with_stream(stream, handle)
}
/// Creates new IPC transport from existing `UnixStream` and `Handle`
#[cfg(unix)]
fn with_stream(stream: UnixStream, handle: &reactor::Handle) -> Result<Self> {
let (read, write) = stream.split();
let (write_sender, write_receiver) = mpsc::unbounded();
let pending: Arc<Mutex<BTreeMap<RequestId, Pending>>> = Default::default();
let subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>> = Default::default();
let r = ReadStream {
read,
pending: pending.clone(),
subscriptions: subscriptions.clone(),
buffer: vec![],
current_pos: 0,
};
let w = WriteStream {
write,
incoming: write_receiver,
state: WriteState::WaitingForRequest,
};
handle.spawn(r);
handle.spawn(w);
Ok(Ipc {
id: Arc::new(atomic::AtomicUsize::new(1)),
write_sender,
pending,
subscriptions,
})
}
#[cfg(not(unix))]
pub fn with_event_loop<P>(_path: P, _handle: &reactor::Handle) -> Result<Self> {
return Err(Error::Transport("IPC transport is only supported on Unix".into()).into());
}
fn send_request<F, O>(&self, id: RequestId, request: rpc::Request, extract: F) -> IpcTask<F>
where
F: Fn(Vec<Result<rpc::Value>>) -> O,
{
let request = helpers::to_string(&request);
log::debug!("[{}] Calling: {}", id, request);
let (tx, rx) = futures::oneshot();
self.pending.lock().insert(id, tx);
let result = self
.write_sender
.unbounded_send(request.into_bytes())
.map_err(|_| Error::Io(io::ErrorKind::BrokenPipe.into()));
Response::new(id, result, rx, extract)
}
}
impl Transport for Ipc {
type Out = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<rpc::Value>>;
fn prepare(&self, method: &str, params: Vec<rpc::Value>) -> (RequestId, rpc::Call) {
let id = self.id.fetch_add(1, atomic::Ordering::AcqRel);
let request = helpers::build_request(id, method, params);
(id, request)
}
fn send(&self, id: RequestId, request: rpc::Call) -> Self::Out {
self.send_request(id, rpc::Request::Single(request), single_response)
}
}
fn single_response(response: Vec<Result<rpc::Value>>) -> Result<rpc::Value> {
match response.into_iter().next() {
Some(res) => res,
None => Err(Error::InvalidResponse("Expected single, got batch.".into())),
}
}
impl BatchTransport for Ipc {
type Batch = IpcTask<fn(Vec<Result<rpc::Value>>) -> Result<Vec<Result<rpc::Value>>>>;
fn send_batch<T>(&self, requests: T) -> Self::Batch | let requests = first.into_iter().chain(it.map(|x| x.1)).collect();
self.send_request(id, rpc::Request::Batch(requests), Ok)
}
}
impl DuplexTransport for Ipc {
type NotificationStream = Box<dyn Stream<Item = rpc::Value, Error = Error> + Send + 'static>;
fn subscribe(&self, id: &SubscriptionId) -> Self::NotificationStream {
let (tx, rx) = mpsc::unbounded();
if self.subscriptions.lock().insert(id.clone(), tx).is_some() {
log::warn!("Replacing already-registered subscription with id {:?}", id)
}
Box::new(rx.map_err(|()| Error::Transport("No data available".into())))
}
fn unsubscribe(&self, id: &SubscriptionId) {
self.subscriptions.lock().remove(id);
}
}
enum WriteState {
WaitingForRequest,
Writing { buffer: Vec<u8>, current_pos: usize },
}
/// Writing part of the IPC transport
/// Awaits new requests using `mpsc::UnboundedReceiver` and writes them to the socket.
#[cfg(unix)]
struct WriteStream {
write: WriteHalf<UnixStream>,
incoming: mpsc::UnboundedReceiver<Vec<u8>>,
state: WriteState,
}
#[cfg(unix)]
impl Future for WriteStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
loop {
self.state = match self.state {
WriteState::WaitingForRequest => {
// Ask for more to write
let to_send = try_ready!(self.incoming.poll());
if let Some(to_send) = to_send {
log::trace!("Got new message to write: {:?}", String::from_utf8_lossy(&to_send));
WriteState::Writing {
buffer: to_send,
current_pos: 0,
}
} else {
return Ok(futures::Async::NotReady);
}
}
WriteState::Writing {
ref buffer,
ref mut current_pos,
} => {
// Write everything in the buffer
while *current_pos < buffer.len() {
let n = try_nb!(self.write.write(&buffer[*current_pos..]));
*current_pos += n;
if n == 0 {
log::warn!("IO Error: Zero write.");
return Err(()); // zero write?
}
}
WriteState::WaitingForRequest
}
};
}
}
}
/// Reading part of the IPC transport.
/// Reads data on the socket and tries to dispatch it to awaiting requests.
#[cfg(unix)]
struct ReadStream {
read: ReadHalf<UnixStream>,
pending: Arc<Mutex<BTreeMap<RequestId, Pending>>>,
subscriptions: Arc<Mutex<BTreeMap<SubscriptionId, Subscription>>>,
buffer: Vec<u8>,
current_pos: usize,
}
#[cfg(unix)]
impl Future for ReadStream {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
const DEFAULT_BUF_SIZE: usize = 4096;
let mut new_write_size = 128;
loop {
if self.current_pos == self.buffer.len() {
if new_write_size < DEFAULT_BUF_SIZE {
new_write_size *= 2;
}
self.buffer.resize(self.current_pos + new_write_size, 0);
}
let read = try_nb!(self.read.read(&mut self.buffer[self.current_pos..]));
if read == 0 {
return Ok(futures::Async::NotReady);
}
let mut min = self.current_pos;
self.current_pos += read;
while let Some((response, len)) = Self::extract_response(&self.buffer[0..self.current_pos], min) {
// Respond
self.respond(response);
// copy rest of buffer to the beginning
for i in len..self.current_pos {
self.buffer.swap(i, i - len);
}
// truncate the buffer
let new_len = self.current_pos - len;
self.buffer.truncate(new_len + new_write_size);
// Set new positions
self.current_pos = new_len;
min = 0;
}
}
}
}
enum Message {
Rpc(Vec<rpc::Output>),
Notification(rpc::Notification),
}
#[cfg(unix)]
impl ReadStream {
fn respond(&self, response: Message) {
match response {
Message::Rpc(outputs) => {
let id = match outputs.get(0) {
Some(&rpc::Output::Success(ref success)) => success.id.clone(),
Some(&rpc::Output::Failure(ref failure)) => failure.id.clone(),
None => rpc::Id::Num(0),
};
if let rpc::Id::Num(num) = id {
if let Some(request) = self.pending.lock().remove(&(num as usize)) {
log::trace!("Responding to (id: {:?}) with {:?}", num, outputs);
if let Err(err) = request.send(helpers::to_results_from_outputs(outputs)) {
log::warn!("Sending a response to deallocated channel: {:?}", err);
}
} else {
log::warn!("Got response for unknown request (id: {:?})", num);
}
} else {
log::warn!("Got unsupported response (id: {:?})", id);
}
}
Message::Notification(notification) => {
if let rpc::Params::Map(params) = notification.params {
let id = params.get("subscription");
let result = params.get("result");
if let (Some(&rpc::Value::String(ref id)), Some(result)) = (id, result) {
let id: SubscriptionId = id.clone().into();
if let Some(stream) = self.subscriptions.lock().get(&id) {
if let Err(e) = stream.unbounded_send(result.clone()) {
log::error!("Error sending notification (id: {:?}): {:?}", id, e);
}
} else {
log::warn!("Got notification for unknown subscription (id: {:?})", id);
}
} else {
log::error!("Got unsupported notification (id: {:?})", id);
}
}
}
}
}
fn extract_response(buf: &[u8], min: usize) -> Option<(Message, usize)> {
for pos in (min..buf.len()).rev() {
// Look for end character
if buf[pos] == b']' || buf[pos] == b'}' {
// Try to deserialize
let pos = pos + 1;
match helpers::to_response_from_slice(&buf[0..pos]) {
Ok(rpc::Response::Single(output)) => return Some((Message::Rpc(vec![output]), pos)),
Ok(rpc::Response::Batch(outputs)) => return Some((Message::Rpc(outputs), pos)),
// just continue
_ => {}
}
match helpers::to_notification_from_slice(&buf[0..pos]) {
Ok(notification) => return Some((Message::Notification(notification), pos)),
_ => {}
}
}
}
None
}
}
#[cfg(all(test, unix))]
mod tests {
extern crate tokio_core;
extern crate tokio_uds;
use super::Ipc;
use crate::rpc;
use crate::Transport;
use futures::{self, Future};
use std::io::{self, Read, Write};
#[test]
fn should_send_a_request() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(
&request,
r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}"#
);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(eloop.run(res), Ok(rpc::Value::String("x".into())));
}
#[test]
fn should_handle_double_response() {
// given
let mut eloop = tokio_core::reactor::Core::new().unwrap();
let handle = eloop.handle();
let (server, client) = tokio_uds::UnixStream::pair(&handle).unwrap();
let ipc = Ipc::with_stream(client, &handle).unwrap();
eloop.remote().spawn(move |_| {
struct Task {
server: tokio_uds::UnixStream,
}
impl Future for Task {
type Item = ();
type Error = ();
fn poll(&mut self) -> futures::Poll<(), ()> {
let mut data = [0; 2048];
// Read request
let read = try_nb!(self.server.read(&mut data));
let request = String::from_utf8(data[0..read].to_vec()).unwrap();
assert_eq!(&request, r#"{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":1}{"jsonrpc":"2.0","method":"eth_accounts","params":["1"],"id":2}"#);
// Write response
let response = r#"{"jsonrpc":"2.0","id":1,"result":"x"}{"jsonrpc":"2.0","id":2,"result":"x"}"#;
self.server.write_all(response.as_bytes()).unwrap();
self.server.flush().unwrap();
Ok(futures::Async::Ready(()))
}
}
Task { server }
});
// when
let res1 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
let res2 = ipc.execute("eth_accounts", vec![rpc::Value::String("1".into())]);
// then
assert_eq!(
eloop.run(res1.join(res2)),
Ok((rpc::Value::String("x".into()), rpc::Value::String("x".into())))
);
}
} | where
T: IntoIterator<Item = (RequestId, rpc::Call)>,
{
let mut it = requests.into_iter();
let (id, first) = it.next().map(|x| (x.0, Some(x.1))).unwrap_or_else(|| (0, None)); | random_line_split |
map.js | BMap.register(function (cK) {
if (cK.config && cK.config.isOverviewMap) {
return
}
if (cK.isLoaded()) {
bj(cK)
} else {
cK.addEventListener("load", function () {
bj(this)
})
}
cK.cityName = "\u4e2d\u56fd";
var T = {};
T.enableRequest = true;
T.request = function () {
if (T.enableRequest) {
T.enableRequest = false;
setTimeout(function () {
T._request()
}, 500)
}
};
T._request = function () {
var cM = cK.getBounds(),
cO = cK.getZoom(),
cL = a9.convertLL2MC(cM.getSouthWest()),
cN = a9.convertLL2MC(cM.getNorthEast());
ba.request(function (cP) {
T.enableRequest = true;
if (cP && cP.current_city && cP.current_city["name"]) {
cK.cityName = cP.current_city["name"];
aY(cK)
}
}, {
qt: "cen",
b: cL.lng + "," + cL.lat + ";" + cN.lng + "," + cN.lat,
l: cO
}, "", "", true)
};
cK.addEventListener("load", function (cL) {
T.request()
});
cK.addEventListener("moveend", function (cL) {
T.request()
});
cK.addEventListener("zoomend", function (cL) {
T.request()
})
});
function bj(cK) {
if (cK.temp.copyadded) {
return
}
cK.temp.copyadded = true;
var cM = new aG(81, 2);
if (az()) {
if (cK.highResolutionEnabled()) {
cM.width = 148;
fontSize = "21px"
} else {
cM.width = 72;
cM.height = 0
}
}
var cL = new al({
offset: cM,
printable: true
});
cK.cpyCtrl = cL;
aY(cK);
cK.addEventListener("maptypechange", function () {
aY(cK)
});
cK.addControl(cL);
var T = new b0();
T._opts = {
printable: true
};
cK.addControl(T);
cK.addEventListener("resize", function () {
if (this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
}
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function b0(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM)) {
return
}
if (this.offsetX == T && this.offsetY == cM) {
return
}
this._updateCenterPoint(this.offsetX - T, this.offsetY - cM, cK);
T = Math.round(T);
cM = Math.round(cM);
this.offsetX = T;
this.offsetY = cM;
this.platform.style.left = T + "px";
this.platform.style.top = cM + "px";
this.maskLayer.style.left = -T + "px";
this.maskLayer.style.top = -cM + "px";
if (cL != false) {
this.dispatchEvent(new bf("onmoving"))
}
},
panTo: function (cK, cM) {
if (!(cK instanceof cc)) {
return
}
var cL = this.pointToPixel(cK);
var T = Math.round(this.width / 2);
var cN = Math.round(this.height / 2);
cM = cM || {};
if (Math.abs(T - cL.x) > this.width || Math.abs(cN - cL.y) > this.height || cM.noAnimation) {
this._panTo(T - cL.x, cN - cL.y, cK)
} else {
this._panBy(T - cL.x, cN - cL.y, {
duration: cM.duration
})
}
},
_panTo: function (cK, T, cM) {
var cL = this.temp;
if (cL.operating == true) {
return
}
if (cL.dragAni) {
cL.dragAni.stop()
}
this.dispatchEvent(new bf("onmovestart"));
this._setPlatformPosition(this.offsetX + cK, this.offsetY + T, cM);
this.dispatchEvent(new bf("onmoveend"))
},
panBy: function (cK, T, cL) {
cK = Math.round(cK) || 0;
T = Math.round(T) || 0;
cL = cL || {};
if (Math.abs(cK) <= this.width && Math.abs(T) <= this.height && (!cL.noAnimation)) {
this._panBy(cK, T)
} else {
this._panTo(cK, T)
}
},
_panBy: function (cK, T, cN) {
if (this.temp.operating == true) |
cN = cN || {};
this.dispatchEvent(new bf("onmovestart"));
var cM = this,
cL = cM.temp;
cL.pl = cM.offsetX;
cL.pt = cM.offsetY;
if (cL.tlPan) {
cL.tlPan.cancel()
}
if (cL.dragAni) {
cL.dragAni.stop()
}
cL.tlPan = new g({
fps: cN.fps || cM.config.fps,
duration: cN.duration || cM.config.actionDuration,
transition: cN.transition || av.easeInOutQuad,
render: function (cO) {
this.terminative = cM.temp.operating;
if (cM.temp.operating) {
return
}
cM._setPlatformPosition(cL.pl + Math.ceil(cK * cO), cL.pt + Math.ceil(T * cO))
},
finish: function (cO) {
cM.dispatchEvent(new bf("onmoveend"));
cM.temp.tlPan = false;
if (cM.temp.stopArrow == true) {
cM.temp.stopArrow = false;
if (cM.temp.arrow != 0) {
cM._arrow()
}
}
}
})
}
}); | {
return
} | conditional_block |
map.js | BMap.register(function (cK) {
if (cK.config && cK.config.isOverviewMap) {
return
}
if (cK.isLoaded()) {
bj(cK)
} else {
cK.addEventListener("load", function () {
bj(this)
})
}
cK.cityName = "\u4e2d\u56fd";
var T = {};
T.enableRequest = true;
T.request = function () {
if (T.enableRequest) {
T.enableRequest = false;
setTimeout(function () {
T._request()
}, 500)
}
};
T._request = function () {
var cM = cK.getBounds(),
cO = cK.getZoom(),
cL = a9.convertLL2MC(cM.getSouthWest()),
cN = a9.convertLL2MC(cM.getNorthEast());
ba.request(function (cP) {
T.enableRequest = true;
if (cP && cP.current_city && cP.current_city["name"]) {
cK.cityName = cP.current_city["name"];
aY(cK)
}
}, {
qt: "cen",
b: cL.lng + "," + cL.lat + ";" + cN.lng + "," + cN.lat,
l: cO
}, "", "", true)
};
cK.addEventListener("load", function (cL) {
T.request()
});
cK.addEventListener("moveend", function (cL) {
T.request()
});
cK.addEventListener("zoomend", function (cL) {
T.request()
})
});
function bj(cK) |
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function b0(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM)) {
return
}
if (this.offsetX == T && this.offsetY == cM) {
return
}
this._updateCenterPoint(this.offsetX - T, this.offsetY - cM, cK);
T = Math.round(T);
cM = Math.round(cM);
this.offsetX = T;
this.offsetY = cM;
this.platform.style.left = T + "px";
this.platform.style.top = cM + "px";
this.maskLayer.style.left = -T + "px";
this.maskLayer.style.top = -cM + "px";
if (cL != false) {
this.dispatchEvent(new bf("onmoving"))
}
},
panTo: function (cK, cM) {
if (!(cK instanceof cc)) {
return
}
var cL = this.pointToPixel(cK);
var T = Math.round(this.width / 2);
var cN = Math.round(this.height / 2);
cM = cM || {};
if (Math.abs(T - cL.x) > this.width || Math.abs(cN - cL.y) > this.height || cM.noAnimation) {
this._panTo(T - cL.x, cN - cL.y, cK)
} else {
this._panBy(T - cL.x, cN - cL.y, {
duration: cM.duration
})
}
},
_panTo: function (cK, T, cM) {
var cL = this.temp;
if (cL.operating == true) {
return
}
if (cL.dragAni) {
cL.dragAni.stop()
}
this.dispatchEvent(new bf("onmovestart"));
this._setPlatformPosition(this.offsetX + cK, this.offsetY + T, cM);
this.dispatchEvent(new bf("onmoveend"))
},
panBy: function (cK, T, cL) {
cK = Math.round(cK) || 0;
T = Math.round(T) || 0;
cL = cL || {};
if (Math.abs(cK) <= this.width && Math.abs(T) <= this.height && (!cL.noAnimation)) {
this._panBy(cK, T)
} else {
this._panTo(cK, T)
}
},
_panBy: function (cK, T, cN) {
if (this.temp.operating == true) {
return
}
cN = cN || {};
this.dispatchEvent(new bf("onmovestart"));
var cM = this,
cL = cM.temp;
cL.pl = cM.offsetX;
cL.pt = cM.offsetY;
if (cL.tlPan) {
cL.tlPan.cancel()
}
if (cL.dragAni) {
cL.dragAni.stop()
}
cL.tlPan = new g({
fps: cN.fps || cM.config.fps,
duration: cN.duration || cM.config.actionDuration,
transition: cN.transition || av.easeInOutQuad,
render: function (cO) {
this.terminative = cM.temp.operating;
if (cM.temp.operating) {
return
}
cM._setPlatformPosition(cL.pl + Math.ceil(cK * cO), cL.pt + Math.ceil(T * cO))
},
finish: function (cO) {
cM.dispatchEvent(new bf("onmoveend"));
cM.temp.tlPan = false;
if (cM.temp.stopArrow == true) {
cM.temp.stopArrow = false;
if (cM.temp.arrow != 0) {
cM._arrow()
}
}
}
})
}
}); | {
if (cK.temp.copyadded) {
return
}
cK.temp.copyadded = true;
var cM = new aG(81, 2);
if (az()) {
if (cK.highResolutionEnabled()) {
cM.width = 148;
fontSize = "21px"
} else {
cM.width = 72;
cM.height = 0
}
}
var cL = new al({
offset: cM,
printable: true
});
cK.cpyCtrl = cL;
aY(cK);
cK.addEventListener("maptypechange", function () {
aY(cK)
});
cK.addControl(cL);
var T = new b0();
T._opts = {
printable: true
};
cK.addControl(T);
cK.addEventListener("resize", function () {
if (this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
} | identifier_body |
map.js | BMap.register(function (cK) {
if (cK.config && cK.config.isOverviewMap) {
return
}
if (cK.isLoaded()) {
bj(cK)
} else {
cK.addEventListener("load", function () {
bj(this)
})
}
cK.cityName = "\u4e2d\u56fd";
var T = {};
T.enableRequest = true;
T.request = function () {
if (T.enableRequest) {
T.enableRequest = false;
setTimeout(function () {
T._request()
}, 500)
}
};
T._request = function () {
var cM = cK.getBounds(),
cO = cK.getZoom(),
cL = a9.convertLL2MC(cM.getSouthWest()),
cN = a9.convertLL2MC(cM.getNorthEast());
ba.request(function (cP) {
T.enableRequest = true;
if (cP && cP.current_city && cP.current_city["name"]) {
cK.cityName = cP.current_city["name"];
aY(cK)
}
}, {
qt: "cen",
b: cL.lng + "," + cL.lat + ";" + cN.lng + "," + cN.lat,
l: cO
}, "", "", true)
};
cK.addEventListener("load", function (cL) {
T.request()
});
cK.addEventListener("moveend", function (cL) {
T.request()
});
cK.addEventListener("zoomend", function (cL) {
T.request()
})
});
function bj(cK) {
if (cK.temp.copyadded) {
return
}
cK.temp.copyadded = true;
var cM = new aG(81, 2);
if (az()) {
if (cK.highResolutionEnabled()) {
cM.width = 148;
fontSize = "21px"
} else {
cM.width = 72;
cM.height = 0
}
}
var cL = new al({
offset: cM,
printable: true
});
cK.cpyCtrl = cL;
aY(cK);
cK.addEventListener("maptypechange", function () {
aY(cK)
});
cK.addControl(cL);
var T = new b0();
T._opts = {
printable: true
};
cK.addControl(T);
cK.addEventListener("resize", function () {
if (this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
}
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function | (T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM)) {
return
}
if (this.offsetX == T && this.offsetY == cM) {
return
}
this._updateCenterPoint(this.offsetX - T, this.offsetY - cM, cK);
T = Math.round(T);
cM = Math.round(cM);
this.offsetX = T;
this.offsetY = cM;
this.platform.style.left = T + "px";
this.platform.style.top = cM + "px";
this.maskLayer.style.left = -T + "px";
this.maskLayer.style.top = -cM + "px";
if (cL != false) {
this.dispatchEvent(new bf("onmoving"))
}
},
panTo: function (cK, cM) {
if (!(cK instanceof cc)) {
return
}
var cL = this.pointToPixel(cK);
var T = Math.round(this.width / 2);
var cN = Math.round(this.height / 2);
cM = cM || {};
if (Math.abs(T - cL.x) > this.width || Math.abs(cN - cL.y) > this.height || cM.noAnimation) {
this._panTo(T - cL.x, cN - cL.y, cK)
} else {
this._panBy(T - cL.x, cN - cL.y, {
duration: cM.duration
})
}
},
_panTo: function (cK, T, cM) {
var cL = this.temp;
if (cL.operating == true) {
return
}
if (cL.dragAni) {
cL.dragAni.stop()
}
this.dispatchEvent(new bf("onmovestart"));
this._setPlatformPosition(this.offsetX + cK, this.offsetY + T, cM);
this.dispatchEvent(new bf("onmoveend"))
},
panBy: function (cK, T, cL) {
cK = Math.round(cK) || 0;
T = Math.round(T) || 0;
cL = cL || {};
if (Math.abs(cK) <= this.width && Math.abs(T) <= this.height && (!cL.noAnimation)) {
this._panBy(cK, T)
} else {
this._panTo(cK, T)
}
},
_panBy: function (cK, T, cN) {
if (this.temp.operating == true) {
return
}
cN = cN || {};
this.dispatchEvent(new bf("onmovestart"));
var cM = this,
cL = cM.temp;
cL.pl = cM.offsetX;
cL.pt = cM.offsetY;
if (cL.tlPan) {
cL.tlPan.cancel()
}
if (cL.dragAni) {
cL.dragAni.stop()
}
cL.tlPan = new g({
fps: cN.fps || cM.config.fps,
duration: cN.duration || cM.config.actionDuration,
transition: cN.transition || av.easeInOutQuad,
render: function (cO) {
this.terminative = cM.temp.operating;
if (cM.temp.operating) {
return
}
cM._setPlatformPosition(cL.pl + Math.ceil(cK * cO), cL.pt + Math.ceil(T * cO))
},
finish: function (cO) {
cM.dispatchEvent(new bf("onmoveend"));
cM.temp.tlPan = false;
if (cM.temp.stopArrow == true) {
cM.temp.stopArrow = false;
if (cM.temp.arrow != 0) {
cM._arrow()
}
}
}
})
}
}); | b0 | identifier_name |
map.js | BMap.register(function (cK) {
if (cK.config && cK.config.isOverviewMap) {
return
}
if (cK.isLoaded()) {
bj(cK)
} else {
cK.addEventListener("load", function () {
bj(this)
})
}
cK.cityName = "\u4e2d\u56fd";
var T = {};
T.enableRequest = true;
T.request = function () {
if (T.enableRequest) {
T.enableRequest = false;
setTimeout(function () {
T._request()
}, 500)
}
};
T._request = function () {
var cM = cK.getBounds(),
cO = cK.getZoom(),
cL = a9.convertLL2MC(cM.getSouthWest()),
cN = a9.convertLL2MC(cM.getNorthEast());
ba.request(function (cP) {
T.enableRequest = true;
if (cP && cP.current_city && cP.current_city["name"]) {
cK.cityName = cP.current_city["name"];
aY(cK)
}
}, {
qt: "cen",
b: cL.lng + "," + cL.lat + ";" + cN.lng + "," + cN.lat,
l: cO
}, "", "", true)
};
cK.addEventListener("load", function (cL) {
T.request()
});
cK.addEventListener("moveend", function (cL) {
T.request()
});
cK.addEventListener("zoomend", function (cL) {
T.request()
})
});
function bj(cK) {
if (cK.temp.copyadded) {
return
}
cK.temp.copyadded = true;
var cM = new aG(81, 2);
if (az()) {
if (cK.highResolutionEnabled()) {
cM.width = 148;
fontSize = "21px"
} else {
cM.width = 72;
cM.height = 0
}
}
var cL = new al({
offset: cM,
printable: true
});
cK.cpyCtrl = cL;
aY(cK);
cK.addEventListener("maptypechange", function () {
aY(cK)
});
cK.addControl(cL);
var T = new b0();
T._opts = {
printable: true
};
cK.addControl(T);
cK.addEventListener("resize", function () {
if (this.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show();
cL.setOffset(cM)
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
});
if (cK.getSize().width >= 220 && cK.getSize().height >= 100) {
T.show()
} else {
T.hide();
cL.setOffset(new aG(4, 2))
}
if (cK.highResolutionEnabled()) {
T.setOffset(new aG(3, 2))
}
}
function aY(T) {
var cQ = "11px",
cP = T.cityName || "\u4e2d\u56fd",
cM = T.getMapType(),
cR = ["\u5e38\u5dde\u5e02", "\u6210\u90fd\u5e02", "\u5927\u8fde\u5e02", "\u91cd\u5e86\u5e02", "\u5357\u4eac\u5e02", "\u5357\u660c\u5e02", "\u6b66\u6c49\u5e02"],
cL = [],
cO, cN = "color:#fff;font-size:" + cQ + ";text-shadow:0 1px 3px black";
switch (cM) {
case BMAP_SATELLITE_MAP:
case BMAP_HYBRID_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/" style="' + cN + '">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/" style="' + cN + '">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/" style="' + cN + '">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/" style="' + cN + '">\u9053\u9053\u901a</a>');
cL.push(" , Image © DigitalGlobe & </span>");
cL.push('<a href="http://www.chinasiwei.com" target="_blank" style="' + cN + '">chinasiwei</a>');
break;
case BMAP_PERSPECTIVE_MAP:
cL = ['<span style="' + cN + '">© 2013 Baidu - Data © </span>', '<a href="http://o.cn" target="_blank" style="color:#fff;font-size:' + cQ + ';text-shadow:0 1px 3px black">\u90fd\u5e02\u5708</a>'];
break;
default:
cL = ['<span style="font-size:' + cQ + '">© 2013 Baidu - Data © '];
cL.push('<a target="_blank" href="http://www.navinfo.com/">NavInfo</a> & ');
for (var cK in cR) {
if (cR[cK] == cP) {
cO = true;
break
}
}
if (cO) {
cL.push('<a target="_blank" href="http://www.yootu.com/">yootu</a>')
} else {
cL.push('<a target="_blank" href="http://www.cennavi.com.cn/">CenNavi</a>')
}
cL.push(' & <a target="_blank" href="http://www.365ditu.com/">\u9053\u9053\u901a</a>');
cL.push("</span>");
break
}
cL = cL.join("");
T.cpyCtrl.addCopyright({
id: 1,
content: cL
})
}
function b0(T) {
this.defaultAnchor = BMAP_ANCHOR_BOTTOM_LEFT;
this.defaultOffset = new aG(1, 0);
this.IMG_URL = cb.imgPath + (az() ? "copyright_logo_s.png" : "copyright_logo.png")
}
b0.prototype = new co();
b0.prototype.initialize = function (cK) {
this._map = cK;
var cL = Z("div");
cL.style.height = "32px";
var T = Z("a", {
title: "\u5230\u767e\u5ea6\u5730\u56fe\u67e5\u770b\u6b64\u533a\u57df",
target: "_blank",
href: "http://map.baidu.com/?sr=1"
});
T.style.outline = "none";
if (a7.browser.ie == 6) {
T.innerHTML = "<div style='cursor:pointer;width:77px;height:32px;filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src=" + this.IMG_URL + ")'></div>"
} else {
T.innerHTML = "<img style='border:none;width:77px;height:32px' src='" + this.IMG_URL + "' />"
}
if (az()) {
if (this._map.highResolutionEnabled()) {
cL.style.height = "50px";
T.href = "#";
this.IMG_URL = cb.imgPath + "copyright_logo_hd.png";
T.innerHTML = "<img style='border:none;width:136px;height:50px' src='" + this.IMG_URL + "' />"
} else {
cL.style.height = "25px";
T.href = "#";
T.innerHTML = "<img style='border:none;width:68px;height:25px' src='" + this.IMG_URL + "' />"
}
}
cL.appendChild(T);
cK.getContainer().appendChild(cL);
return cL
};
a7.extend(by.prototype, {
_draw: function () {
this._bind()
},
_bind: function () {
var T = this;
T._watchSize = function () {
var cL = T.getSize();
if (T.width != cL.width || T.height != cL.height) {
var cN = new aG(T.width, T.height);
var cP = new bf("onbeforeresize");
cP.size = cN;
T.dispatchEvent(cP);
T._updateCenterPoint((cL.width - T.width) / 2, (cL.height - T.height) / 2);
T.maskLayer.style.width = (T.width = cL.width) + "px";
T.maskLayer.style.height = (T.height = cL.height) + "px";
var cM = new bf("onresize");
cM.size = cL;
T.dispatchEvent(cM);
var cK = parseInt(T.platform.style.left) || 0;
var cO = parseInt(T.platform.style.top) || 0;
if (T.currentOperation != 0 && (T.offsetX != cK || T.offsetY != cO)) {
T._setPlatformPosition(cK, cO)
}
}
};
a7.on(T.maskLayer, "mouseover", function (cK) {
T.dispatchEvent(new bf("onmouseover"))
});
a7.on(T.maskLayer, "mouseout", function (cK) {
T.dispatchEvent(new bf("onmouseout"))
})
},
_setPlatformPosition: function (T, cM, cK, cL) {
if (isNaN(T) || isNaN(cM)) {
return
}
if (this.offsetX == T && this.offsetY == cM) {
return
}
this._updateCenterPoint(this.offsetX - T, this.offsetY - cM, cK);
T = Math.round(T);
cM = Math.round(cM);
this.offsetX = T;
this.offsetY = cM;
this.platform.style.left = T + "px";
this.platform.style.top = cM + "px";
this.maskLayer.style.left = -T + "px";
this.maskLayer.style.top = -cM + "px";
if (cL != false) {
this.dispatchEvent(new bf("onmoving"))
}
},
panTo: function (cK, cM) {
if (!(cK instanceof cc)) {
return
}
var cL = this.pointToPixel(cK);
| var cN = Math.round(this.height / 2);
cM = cM || {};
if (Math.abs(T - cL.x) > this.width || Math.abs(cN - cL.y) > this.height || cM.noAnimation) {
this._panTo(T - cL.x, cN - cL.y, cK)
} else {
this._panBy(T - cL.x, cN - cL.y, {
duration: cM.duration
})
}
},
_panTo: function (cK, T, cM) {
var cL = this.temp;
if (cL.operating == true) {
return
}
if (cL.dragAni) {
cL.dragAni.stop()
}
this.dispatchEvent(new bf("onmovestart"));
this._setPlatformPosition(this.offsetX + cK, this.offsetY + T, cM);
this.dispatchEvent(new bf("onmoveend"))
},
panBy: function (cK, T, cL) {
cK = Math.round(cK) || 0;
T = Math.round(T) || 0;
cL = cL || {};
if (Math.abs(cK) <= this.width && Math.abs(T) <= this.height && (!cL.noAnimation)) {
this._panBy(cK, T)
} else {
this._panTo(cK, T)
}
},
_panBy: function (cK, T, cN) {
if (this.temp.operating == true) {
return
}
cN = cN || {};
this.dispatchEvent(new bf("onmovestart"));
var cM = this,
cL = cM.temp;
cL.pl = cM.offsetX;
cL.pt = cM.offsetY;
if (cL.tlPan) {
cL.tlPan.cancel()
}
if (cL.dragAni) {
cL.dragAni.stop()
}
cL.tlPan = new g({
fps: cN.fps || cM.config.fps,
duration: cN.duration || cM.config.actionDuration,
transition: cN.transition || av.easeInOutQuad,
render: function (cO) {
this.terminative = cM.temp.operating;
if (cM.temp.operating) {
return
}
cM._setPlatformPosition(cL.pl + Math.ceil(cK * cO), cL.pt + Math.ceil(T * cO))
},
finish: function (cO) {
cM.dispatchEvent(new bf("onmoveend"));
cM.temp.tlPan = false;
if (cM.temp.stopArrow == true) {
cM.temp.stopArrow = false;
if (cM.temp.arrow != 0) {
cM._arrow()
}
}
}
})
}
}); | var T = Math.round(this.width / 2);
| random_line_split |
GroupINN.py | from . import tf, arguments, argparse
from functools import reduce
class gcn_classification_net:
class loss_weights:
cross_entropy = 1.0
neg_penalty_reduce = 0.1
neg_penalty_gnn = 0.2
ortho_penalty_p = 0.2
ortho_penalty_n = 0.2
variance_penalty_p = 0.3
variance_penalty_n = 0.5
l2_penalty = 2e-3
@classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
|
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FN": tf.metrics.false_negatives(
labels=labels, predictions=predictions["classes"]),
"metrics/precision": tf.metrics.precision(
labels=labels, predictions=predictions["classes"]),
"metrics/recall": tf.metrics.recall(
labels=labels, predictions=predictions["classes"])
}
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def count_params(self):
"print number of trainable variables"
size = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size(v) for v in tf.trainable_variables())
print("Model size: {}K".format(n / 1000))
| tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss") | conditional_block |
GroupINN.py | from . import tf, arguments, argparse
from functools import reduce
class gcn_classification_net:
class | :
cross_entropy = 1.0
neg_penalty_reduce = 0.1
neg_penalty_gnn = 0.2
ortho_penalty_p = 0.2
ortho_penalty_n = 0.2
variance_penalty_p = 0.3
variance_penalty_n = 0.5
l2_penalty = 2e-3
@classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FN": tf.metrics.false_negatives(
labels=labels, predictions=predictions["classes"]),
"metrics/precision": tf.metrics.precision(
labels=labels, predictions=predictions["classes"]),
"metrics/recall": tf.metrics.recall(
labels=labels, predictions=predictions["classes"])
}
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def count_params(self):
"print number of trainable variables"
size = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size(v) for v in tf.trainable_variables())
print("Model size: {}K".format(n / 1000))
| loss_weights | identifier_name |
GroupINN.py | from . import tf, arguments, argparse
from functools import reduce
class gcn_classification_net:
class loss_weights:
cross_entropy = 1.0
neg_penalty_reduce = 0.1
neg_penalty_gnn = 0.2
ortho_penalty_p = 0.2
ortho_penalty_n = 0.2
variance_penalty_p = 0.3
variance_penalty_n = 0.5
l2_penalty = 2e-3
@classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
|
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FN": tf.metrics.false_negatives(
labels=labels, predictions=predictions["classes"]),
"metrics/precision": tf.metrics.precision(
labels=labels, predictions=predictions["classes"]),
"metrics/recall": tf.metrics.recall(
labels=labels, predictions=predictions["classes"])
}
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def count_params(self):
"print number of trainable variables"
size = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size(v) for v in tf.trainable_variables())
print("Model size: {}K".format(n / 1000))
| self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN) | identifier_body |
GroupINN.py | from . import tf, arguments, argparse
from functools import reduce
class gcn_classification_net:
class loss_weights:
cross_entropy = 1.0
neg_penalty_reduce = 0.1
neg_penalty_gnn = 0.2
ortho_penalty_p = 0.2
ortho_penalty_n = 0.2
variance_penalty_p = 0.3
variance_penalty_n = 0.5
l2_penalty = 2e-3
@classmethod
def update_parser_argument(cls, parser: argparse.ArgumentParser):
args, _ = parser.parse_known_args()
parser.set_defaults(selected_model="gcn_classification_net")
print("===> Selected model: GroupINN")
group = parser.add_argument_group(title="GroupINN arguments")
group.add_argument("--dropout_rate", default=0, type=float, help="(default: %(default)s)")
group.add_argument("--c", default=0.85, type=float, help="(default: %(default)s)")
group.add_argument("--feature_reduction", default=5, type=int, help="(default: %(default)s)")
group.add_argument("--learning_rate", default=0.001, help="(default: %(default)s)")
arguments.add_loss_weights_argument(parser, cls.loss_weights, cls.__name__)
return parser
def __init__(self):
self.feature_notify = 0
def runtime_init(self, features, labels, mode):
self.losses = []
self.is_training = (mode==tf.estimator.ModeKeys.TRAIN)
def model_fn(self, features, labels,
mode:tf.estimator.ModeKeys, params):
"""
features: batch_features from input_fn
labels: batch_labels from input_fn
mode: An instance of tf.estimator.ModeKeys
params: Additional configuration
"""
self.runtime_init(features, labels, mode)
# Load parameters
self.num_features = params["args"].feature_reduction
self.c = params["args"].c
self.dropout_rate = params["args"].dropout_rate
self.selected_timeseries = params["args"].selected_timeseries
self.learning_rate = params["args"].learning_rate
self.tf_summary = (not params["args"].no_tensorboard)
# Construct network
s_feature = features[self.selected_timeseries]
s_feature_p = s_feature[0]
s_feature_n = s_feature[1]
num_columns = int(s_feature_p.shape[-1])
self.initializer = tf.initializers.random_uniform(0, 0.5/self.num_features)
p_reduce = self.dim_reduction(s_feature_p, self.num_features, "reduction_p",
self.loss_weights.ortho_penalty_p, self.loss_weights.variance_penalty_p, self.loss_weights.neg_penalty_reduce)
p_conv1 = self.gnn_conv(None, p_reduce, "conv1_p", self.loss_weights.neg_penalty_gnn)
p_conv2 = self.gnn_conv(p_conv1, p_reduce, "conv2_p", self.loss_weights.neg_penalty_gnn)
p_conv3 = self.gnn_conv(p_conv2, p_reduce, "conv3_p", self.loss_weights.neg_penalty_gnn)
n_reduce = self.dim_reduction(s_feature_n, self.num_features, "reduction_n",
self.loss_weights.ortho_penalty_n, self.loss_weights.variance_penalty_n, self.loss_weights.neg_penalty_reduce)
n_conv1 = self.gnn_conv(None, n_reduce, "conv1_n", self.loss_weights.neg_penalty_gnn)
n_conv2 = self.gnn_conv(n_conv1, n_reduce, "conv2_n", self.loss_weights.neg_penalty_gnn)
n_conv3 = self.gnn_conv(n_conv2, n_reduce, "conv3_n", self.loss_weights.neg_penalty_gnn)
conv_concat = tf.reshape(tf.concat([p_conv3,n_conv3], -1), [-1, 2*self.num_features**2])
dense_output = self.dense_layers(conv_concat, self.loss_weights.l2_penalty)
output = self.generate_output(dense_output, labels, mode)
if self.is_training:
if self.feature_notify % 10 == 0:
print("Selected feature: {}".format(self.selected_timeseries))
self.loss_weights._print_current_weights() #pylint: disable=E1101
self.count_params()
self.feature_notify += 1
return output
def dim_reduction(self, adj_matrix, num_reduce, name_scope,
ortho_penalty, variance_penalty, neg_penalty):
column_dim = int(adj_matrix.shape[-1])
with tf.variable_scope(name_scope):
kernel = tf.get_variable("dim_reduction_kernel", shape=[column_dim, num_reduce],
trainable=True, initializer=self.initializer,
regularizer=tf.contrib.layers.l1_regularizer(scale=0.05)
)
kernel_p = tf.nn.relu(kernel)
AF = tf.tensordot(adj_matrix, kernel_p, axes=[[-1],[0]])
reduced_adj_matrix = tf.transpose(
tf.tensordot(kernel_p, AF, axes=[[0],[1]]), #num_reduce*batch*num_reduce
perm=[1,0,2], name="reduced_adj")
if self.tf_summary:
tf.summary.image("dim_reduction_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
tf.summary.image("dim_reduction_kernel_p", tf.expand_dims(
tf.expand_dims(kernel_p, axis=0),
axis=-1))
gram_matrix = tf.matmul(kernel_p, kernel_p, transpose_a=True)
diag_elements = tf.diag_part(gram_matrix)
zero = tf.constant(0, dtype=tf.float32)
mask = tf.not_equal(diag_elements, zero)
if ortho_penalty!=0:
ortho_loss_matrix = tf.square(gram_matrix - tf.diag(diag_elements))
ortho_loss = tf.multiply(ortho_penalty, tf.reduce_sum(ortho_loss_matrix), name="ortho_penalty")
self.losses.append(ortho_loss)
if variance_penalty!=0:
_ , variance = tf.nn.moments(tf.boolean_mask(diag_elements,mask), axes=[0])
variance_loss = tf.multiply(variance_penalty, variance, name="variance_penalty")
self.losses.append(variance_loss)
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return reduced_adj_matrix
def gnn_conv(self, prev_output, adj_matrix, name_scope, neg_penalty): #I+c*A*X*W,X0=I
feature_dim = int(adj_matrix.shape[-1])
eye = tf.eye(feature_dim)
with tf.variable_scope(name_scope):
kernel = tf.get_variable("gnn_kernel",
shape=[feature_dim,feature_dim], trainable=True, initializer=self.initializer)
if prev_output is None:
AXW = tf.tensordot(adj_matrix, kernel, [[-1],[0]])
else:
XW = tf.tensordot(prev_output, kernel, [[-1],[0]]) #batch*feature_dim*feature_dim
AXW = tf.matmul(adj_matrix, XW)
I_cAXW = eye+self.c*AXW
y_relu = tf.nn.relu(I_cAXW)
col_mean = tf.tile(tf.reduce_mean(y_relu, axis=-2, keepdims=True)+1e-6,[1,feature_dim,1])
y_norm = tf.divide(y_relu, col_mean)
output = tf.nn.softplus(y_norm, name="gnn_output")
if self.tf_summary:
tf.summary.image("gnn_kernel", tf.expand_dims(
tf.expand_dims(kernel, axis=0),
axis=-1))
if neg_penalty!=0:
neg_loss = tf.multiply(neg_penalty, tf.reduce_sum(tf.nn.relu(tf.constant(1e-6)-kernel)), name="negative_penalty")
self.losses.append(neg_loss)
return output
def dense_layers(self, input_flat, l2_penalty, name_scope="dense_layers"):
with tf.variable_scope(name_scope):
output_layer = tf.layers.Dense(2, name="output_layer")
logits = output_layer(input_flat)
kernel_var = output_layer.trainable_variables[0]
if l2_penalty != 0:
dense_kernel = output_layer.trainable_variables[0].read_value()
l2_loss = tf.multiply(l2_penalty, tf.nn.l2_loss(dense_kernel), name="l2_penalty")
self.losses.append(l2_loss)
return logits
def generate_output(self, logits, labels, mode:tf.estimator.ModeKeys):
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Define loss function
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
self.losses.append(
tf.multiply(self.loss_weights.cross_entropy,
tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits),
name="cross_entropy_loss")
)
# Define loss function
loss = tf.reduce_sum(self.losses, name="total_loss")
for loss_scalar in self.losses:
tf.summary.scalar(loss_scalar.name, loss_scalar, family="loss")
# Define accuracy metric
eval_metric_ops = {
"metrics/accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TP": tf.metrics.true_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/TN": tf.metrics.true_negatives( | "metrics/precision": tf.metrics.precision(
labels=labels, predictions=predictions["classes"]),
"metrics/recall": tf.metrics.recall(
labels=labels, predictions=predictions["classes"])
}
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def count_params(self):
"print number of trainable variables"
size = lambda v: reduce(lambda x, y: x*y, v.get_shape().as_list())
n = sum(size(v) for v in tf.trainable_variables())
print("Model size: {}K".format(n / 1000)) | labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FP": tf.metrics.false_positives(
labels=labels, predictions=predictions["classes"]),
"confusion_matrix/FN": tf.metrics.false_negatives(
labels=labels, predictions=predictions["classes"]), | random_line_split |
plot_TL_results.py | import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from scipy.stats import pearsonr, wilcoxon, mannwhitneyu
import scipy.stats as ss
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def loewner(A, B):
"""
Return true, if A>=B
where >= is loewner order (matrix comparison)
if A>=B, A spans over B
used to detect poor fits of coregionalization
if [coregionalization matrix] > [measured covariance]is broken,
covariance matrix is overestimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def | (b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
title = f'{i+1}a) {expname}'
axs[0,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
title = f'{i+1}b) {expname}'
axs[1,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
# collect table of loss function values
c_speed = clean_rows
loss_table = loss_function_table(c_speed, expname)
if tot_loss_table is None:
tot_loss_table = loss_table
else:
tot_loss_table = pd.concat([tot_loss_table,loss_table], axis = 0)
axs[0,0].set_ylabel('BO iterations to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylabel('CPU time to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylim(-0.05*cputime_max,1.4*cputime_max)
for ax in axs[1,:]:
ax.set_xlabel('secondary initpts', fontsize = SMALL_SIZE)
for ax in axs.flatten():
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params('x',labelrotation = 40)
ax.tick_params(axis = 'both',
width = 3, length = 4,
labelsize = SMALL_SIZE)
plt.savefig(filename)
return tot_loss_table
| indicator_loss | identifier_name |
plot_TL_results.py | import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from scipy.stats import pearsonr, wilcoxon, mannwhitneyu
import scipy.stats as ss
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def loewner(A, B):
"""
Return true, if A>=B
where >= is loewner order (matrix comparison)
if A>=B, A spans over B
used to detect poor fits of coregionalization
if [coregionalization matrix] > [measured covariance]is broken,
covariance matrix is overestimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def indicator_loss(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
|
axs[0,0].set_ylabel('BO iterations to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylabel('CPU time to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylim(-0.05*cputime_max,1.4*cputime_max)
for ax in axs[1,:]:
ax.set_xlabel('secondary initpts', fontsize = SMALL_SIZE)
for ax in axs.flatten():
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params('x',labelrotation = 40)
ax.tick_params(axis = 'both',
width = 3, length = 4,
labelsize = SMALL_SIZE)
plt.savefig(filename)
return tot_loss_table
| experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
title = f'{i+1}a) {expname}'
axs[0,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
title = f'{i+1}b) {expname}'
axs[1,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
# collect table of loss function values
c_speed = clean_rows
loss_table = loss_function_table(c_speed, expname)
if tot_loss_table is None:
tot_loss_table = loss_table
else:
tot_loss_table = pd.concat([tot_loss_table,loss_table], axis = 0) | conditional_block |
plot_TL_results.py | import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from scipy.stats import pearsonr, wilcoxon, mannwhitneyu
import scipy.stats as ss
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def loewner(A, B):
|
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def indicator_loss(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max:
cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1
polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
title = f'{i+1}a) {expname}'
axs[0,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
title = f'{i+1}b) {expname}'
axs[1,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
# collect table of loss function values
c_speed = clean_rows
loss_table = loss_function_table(c_speed, expname)
if tot_loss_table is None:
tot_loss_table = loss_table
else:
tot_loss_table = pd.concat([tot_loss_table,loss_table], axis = 0)
axs[0,0].set_ylabel('BO iterations to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylabel('CPU time to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylim(-0.05*cputime_max,1.4*cputime_max)
for ax in axs[1,:]:
ax.set_xlabel('secondary initpts', fontsize = SMALL_SIZE)
for ax in axs.flatten():
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params('x',labelrotation = 40)
ax.tick_params(axis = 'both',
width = 3, length = 4,
labelsize = SMALL_SIZE)
plt.savefig(filename)
return tot_loss_table
| """
Return true, if A>=B
where >= is loewner order (matrix comparison)
if A>=B, A spans over B
used to detect poor fits of coregionalization
if [coregionalization matrix] > [measured covariance]is broken,
covariance matrix is overestimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list | identifier_body |
plot_TL_results.py | import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from scipy.stats import pearsonr, wilcoxon, mannwhitneyu
import scipy.stats as ss
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def loewner(A, B):
"""
Return true, if A>=B
where >= is loewner order (matrix comparison)
if A>=B, A spans over B
used to detect poor fits of coregionalization
if [coregionalization matrix] > [measured covariance]is broken,
covariance matrix is overestimated / fitted poorly
"""
ret_list = []
for b in B:
D = (A-b).reshape((2,2))
det = np.linalg.det(D)
ret = 1
if det < 0 or D[0,0] < 0:
ret = 0
ret_list.append(ret)
return ret_list
## compare each number of secondary initpts
## to baseline with wilcoxon 2 sample signed rank test to see
## when TL is faster than the baseline, also collect the lowest,
## highest, median and mean expected improvement and their secondary initpts
def indicator_loss(b_times, r_times, N = None, alpha = 0.1, method = mannwhitneyu):
"""
do wilxocon test to see if b_times - r_times median is less than 0
H0: it is
"""
if N is None:
N = min([len(b_times), len(r_times)])*5
b = np.random.choice(b_times, size = N, replace = True)
r = np.random.choice(r_times, size = N, replace = True)
#diff = b-r
#diff = diff[diff != 0]
# is the median of the differences b-r less than zero
test = method(b,r, alternative = 'less')
if test[1] < alpha:
# reject
return False
else:
return True
def loss_function_table(c_speed, name):
"""
Sample n convergence speed results from baseline (b_times)
and experiment with k secondary points (r_times)
With wilcoxon 2 sample signed rank test determine,
if TL is faster than the baseline with that many secondary initpts
return true
else false
"""
initpts_list = np.unique(c_speed[:,0]).reshape(-1,1)
initpts_list = initpts_list[initpts_list != 0] # remove baselines
b_times = c_speed[c_speed[:,0] == 0,1]
b_mean = np.mean(b_times)
faster = [] # which number of secondary initpts are faster than the baseline
for initpts in initpts_list:
r_times = c_speed[c_speed[:,0] == initpts, 1]
#median_ixd = np.argsort(r_times)[len(r_times)//2]
# add initpts, mean (loss function), wx test (indicator loss function) if faster than baseline
faster.append([initpts, round(np.mean(r_times)/b_mean, 2), indicator_loss(b_times, r_times)])
faster = np.array(faster).reshape(-1, 3)
ret = pd.DataFrame({'experiment':name,
'secondary_initpts':faster[:,0],
'mean_loss':faster[:,1],
'indicator_loss':faster[:,2]})
# normalize mean acquisition time
# loss function minima ->
# plot loss function minima against number of secondary initpts
return ret
## plot convergence and collect loss function table
def plot_TL_convergence(filename, experiment_folders, baseline_folders):
"""
Plot for list of TL experiments:
convergence speed to 0.1 kcal/mol in
- BO iterations and CPU time
- mean of both (statistical expected value)
- linear trend
"""
cputime_max = 0
N = len(experiment_folders)
fig, axs = plt.subplots(2,N,
figsize = (5*N,10),
sharey = 'row')
SMALL_SIZE = 15
MEDIUM_SIZE = 20
LARGE_SIZE = 25
tot_loss_table = None
for i in range(N):
experiment = experiment_folders[i].copy()
baseline = baseline_folders[i].copy()
explist = baseline
for exp in experiment:
explist.append(exp)
convergence_iterations = []
convergence_times = []
for exp in explist:
if len(exp['initpts'])>1:
secondary_initpts = int(exp['initpts'][1])
else:
secondary_initpts = 0
# convergence by iteration
convergence_iter = exp['iterations_to_gmp_convergence'][5]
convergence_iterations.append([secondary_initpts,convergence_iter])
# convergence by cpu time
convergence_time = exp['totaltime_to_gmp_convergence'][5]
convergence_times.append([secondary_initpts, convergence_time])
# plot
convergence_iterations = np.array(convergence_iterations, dtype = float)
axs[0, i].scatter(convergence_iterations[:,0],
convergence_iterations[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
# linear fit
raw_rows = convergence_iterations
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
reg = LinearRegression().fit(x_train, y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
y = reg.predict(x)
axs[0, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
# plot means
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[0,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[0,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[0,i].legend(fontsize = SMALL_SIZE)
###
convergence_times = np.array(convergence_times, dtype = float)
axs[1, i].scatter(convergence_times[:,0],
convergence_times[:,1],
color = 'blue', alpha = 0.5, marker = 'x',
label = 'observation')
### linear fit
raw_rows = convergence_times
clean_rows = raw_rows[np.logical_not(np.logical_or(np.isnan(raw_rows[:,0]),
np.isnan(raw_rows[:,1]))),:]
clean_rows = clean_rows.reshape(-1,2)
#outliers = clean_rows[clean_rows[:,1] > cputime_max,:]
# outlier if more than 2 stds off the mean
outlier_idx = []
for row in clean_rows:
initpts = row[0]
val = row[1]
obs = clean_rows[clean_rows[:,0] == initpts,:]
#obs = obs[obs != row]
m = np.mean(obs)
sd = np.std(obs)
if (val - m) / sd > 2.5: # z-score - assuming normal
# distribution only 0.5% of data should be at least this far
outlier_idx.append(True)
else:
outlier_idx.append(False)
outliers = clean_rows[outlier_idx, :]
#clean_rows = clean_rows[clean_rows[:,1] <= cputime_max, :]
clean_rows = clean_rows[np.logical_not(outlier_idx),:]
if max(clean_rows[:,1]) > cputime_max: | polyreg=make_pipeline(PolynomialFeatures(degree),LinearRegression())
polyreg.fit(x_train,y_train)
x = np.unique(convergence_iterations[:,0]).reshape(-1,1)
axs[0,i].set_xticks(x[::2])
y = polyreg.predict(x)
axs[1, i].plot(x,y, color = 'red', label = 'trend', linewidth = 3)
axs[1,i].set_xticks(x[::2])
outlier_labelled = False
for outlier in outliers:
if outlier_labelled:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black')
else:
axs[1,i].scatter([outlier[0]],[cputime_max*1.1],
marker = 6, color = 'black',
label = 'outlier')
outlier_labelled = True
axs[1,i].annotate('{:.0f}'.format(outlier[1]),
[outlier[0],cputime_max*1.1], rotation = 270,
fontsize = SMALL_SIZE)
mean_labelled = False
for initpts in np.unique(x_train):
mean = np.mean(y_train[x_train == initpts])
if mean_labelled:
axs[1,i].scatter([initpts], [mean], color = 'red', marker = 's')
else:
axs[1,i].scatter([initpts], [mean],
color = 'red', marker = 's',
label = 'mean')
mean_labelled = True
axs[1,i].legend(fontsize = SMALL_SIZE)
expname = experiment_folders[i][0]['name'].split('_')[0]
title = f'{i+1}a) {expname}'
axs[0,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
title = f'{i+1}b) {expname}'
axs[1,i].set_title(title, loc = 'left', fontsize = LARGE_SIZE)
# collect table of loss function values
c_speed = clean_rows
loss_table = loss_function_table(c_speed, expname)
if tot_loss_table is None:
tot_loss_table = loss_table
else:
tot_loss_table = pd.concat([tot_loss_table,loss_table], axis = 0)
axs[0,0].set_ylabel('BO iterations to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylabel('CPU time to GMP convergence', fontsize = SMALL_SIZE)
axs[1,0].set_ylim(-0.05*cputime_max,1.4*cputime_max)
for ax in axs[1,:]:
ax.set_xlabel('secondary initpts', fontsize = SMALL_SIZE)
for ax in axs.flatten():
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params('x',labelrotation = 40)
ax.tick_params(axis = 'both',
width = 3, length = 4,
labelsize = SMALL_SIZE)
plt.savefig(filename)
return tot_loss_table | cputime_max = max(clean_rows[:,1])
x_train = clean_rows[:,0].reshape(-1,1)
y_train = clean_rows[:,1].reshape(-1,1)
degree=1 | random_line_split |
mod.rs | pub use self::cdtime::{nanos_to_collectd, CdTime};
pub use self::logger::{collectd_log, log_err, CollectdLoggerBuilder, LogLevel};
pub use self::oconfig::{ConfigItem, ConfigValue};
use crate::bindings::{
data_set_t, meta_data_add_boolean, meta_data_add_double, meta_data_add_signed_int,
meta_data_add_string, meta_data_add_unsigned_int, meta_data_create, meta_data_destroy,
meta_data_get_boolean, meta_data_get_double, meta_data_get_signed_int, meta_data_get_string,
meta_data_get_unsigned_int, meta_data_t, meta_data_toc, meta_data_type, plugin_dispatch_values,
uc_get_rate, value_list_t, value_t, ARR_LENGTH, DS_TYPE_ABSOLUTE, DS_TYPE_COUNTER,
DS_TYPE_DERIVE, DS_TYPE_GAUGE, MD_TYPE_BOOLEAN, MD_TYPE_DOUBLE, MD_TYPE_SIGNED_INT,
MD_TYPE_STRING, MD_TYPE_UNSIGNED_INT,
};
use crate::errors::{ArrayError, CacheRateError, ReceiveError, SubmitError};
use chrono::prelude::*;
use chrono::Duration;
use memchr::memchr;
use std::borrow::Cow;
use std::collections::HashMap;
use std::ffi::{CStr, CString};
use std::fmt;
use std::os::raw::{c_char, c_void};
use std::ptr;
use std::slice;
use std::str::Utf8Error;
mod cdtime;
mod logger;
mod oconfig;
/// The value of a metadata entry associated with a [ValueList].
/// Metadata can be added using [ValueListBuilder::metadata] method.
#[derive(Debug, Clone, PartialEq)]
pub enum MetaValue {
String(String),
SignedInt(i64),
UnsignedInt(u64),
Double(f64),
Boolean(bool),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[repr(u32)]
#[allow(dead_code)]
enum ValueType {
Counter = DS_TYPE_COUNTER,
Gauge = DS_TYPE_GAUGE,
Derive = DS_TYPE_DERIVE,
Absolute = DS_TYPE_ABSOLUTE,
}
/// The value that a plugin reports can be any one of the following types
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Value {
/// A COUNTER value is for continuous incrementing counters like the ifInOctets counter in a router.
/// The COUNTER data source assumes that the observed value never decreases, except when it
/// overflows. The update function takes the overflow into account. If a counter is reset to
/// zero, for example because an application was restarted, the wrap-around calculation may
/// result in a huge rate. Thus setting a reasonable maximum value is essential when using
/// COUNTER data sources. Because of this, COUNTER data sources are only recommended for
/// counters that wrap-around often, for example 32 bit octet counters of a busy switch port.
Counter(u64),
/// A GAUGE value is simply stored as-is. This is the right choice for values which may
/// increase as well as decrease, such as temperatures or the amount of memory used
Gauge(f64),
/// DERIVE will store the derivative of the observed values source. If the data type has a
/// minimum of zero, negative rates will be discarded. Using DERIVE is a good idea for
/// measuring cgroup's cpuacct.usage as that stores the total number of CPU nanoseconds by all
/// tasks in the cgroup; the change (derivative) in CPU nanoseconds is more interesting than
/// the current value.
Derive(i64),
/// ABSOLUTE is for counters which get reset upon reading. This is used for fast counters which
/// tend to overflow. So instead of reading them normally you reset them after every read to
/// make sure you have a maximum time available before the next overflow.
Absolute(u64),
}
impl Value {
/// Returns if an underlying value is nan
///
/// ```
/// # use collectd_plugin::Value;
/// assert_eq!(true, Value::Gauge(::std::f64::NAN).is_nan());
/// assert_eq!(false, Value::Gauge(0.0).is_nan());
/// assert_eq!(false, Value::Derive(0).is_nan());
/// ```
pub fn is_nan(&self) -> bool {
if let Value::Gauge(x) = *self {
x.is_nan()
} else {
false
}
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Value::Counter(x) | Value::Absolute(x) => write!(f, "{}", x),
Value::Gauge(x) => write!(f, "{}", x),
Value::Derive(x) => write!(f, "{}", x),
}
}
}
impl From<Value> for value_t {
fn from(x: Value) -> Self {
match x {
Value::Counter(x) => value_t { counter: x },
Value::Gauge(x) => value_t { gauge: x },
Value::Derive(x) => value_t { derive: x },
Value::Absolute(x) => value_t { absolute: x },
}
}
}
/// Name and value of a reported metric
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ValueReport<'a> {
/// Name of the metric. If values has a length of 1, this is often just "value"
pub name: &'a str,
/// The value reported
pub value: Value,
/// Minimum value seen in an interval
pub min: f64,
/// Maximum value seen in an interval
pub max: f64,
}
/// Contains values and metadata that collectd has collected from plugins
#[derive(Debug, PartialEq, Clone)]
pub struct ValueList<'a> {
pub values: Vec<ValueReport<'a>>,
/// The plugin that submitted this value. This would be your `PluginManager` when submitting
/// values
pub plugin: &'a str,
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub plugin_instance: Option<&'a str>,
/// This is the string found in types.db, determines how many values are expected and how they
/// should be interpreted
pub type_: &'a str,
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub type_instance: Option<&'a str>,
/// The hostname where the values were collectd
pub host: &'a str,
/// The timestamp at which the value was collected
pub time: DateTime<Utc>,
/// The interval in which new values are to be expected
pub interval: Duration,
/// Metadata associated to the reported values
pub meta: HashMap<String, MetaValue>,
// Keep the original list and set around for calculating rates on demand
original_list: *const value_list_t,
original_set: *const data_set_t,
}
impl<'a> ValueList<'a> {
/// Collectd does not automatically convert `Derived` values into a rate. This is why many
/// write plugins have a `StoreRates` config option so that these rates are calculated on
/// demand from collectd's internal cache. This function will return a vector that can supercede
/// the `values` field that contains the rate of all non-gauge values. Values that are gauges
/// remain unchanged, so one doesn't need to resort back to `values` field as this function
/// will return everything prepped for submission.
pub fn rates(&self) -> Result<Cow<'_, Vec<ValueReport<'a>>>, CacheRateError> {
// As an optimization step, if we know all values are gauges there is no need to call out
// to uc_get_rate as no values will be changed
let all_gauges = self.values.iter().all(|x| match x.value {
Value::Gauge(_) => true,
_ => false,
});
if all_gauges {
return Ok(Cow::Borrowed(&self.values));
}
let ptr = unsafe { uc_get_rate(self.original_set, self.original_list) };
if !ptr.is_null() {
let nv = unsafe { slice::from_raw_parts(ptr, self.values.len()) }
.iter()
.zip(self.values.iter())
.map(|(rate, report)| match report.value {
Value::Gauge(_) => *report,
_ => ValueReport {
value: Value::Gauge(*rate),
..*report
},
})
.collect();
Ok(Cow::Owned(nv))
} else {
Err(CacheRateError)
}
}
pub fn from<'b>(
set: &'b data_set_t,
list: &'b value_list_t,
) -> Result<ValueList<'b>, ReceiveError> {
let plugin = receive_array(&list.plugin, "", "plugin name")?;
let ds_len = length(set.ds_num);
let list_len = length(list.values_len);
let values: Result<Vec<ValueReport<'_>>, ReceiveError> =
unsafe { slice::from_raw_parts(list.values, list_len) }
.iter()
.zip(unsafe { slice::from_raw_parts(set.ds, ds_len) })
.map(|(val, source)| unsafe {
let v = match ::std::mem::transmute(source.type_) {
ValueType::Gauge => Value::Gauge(val.gauge),
ValueType::Counter => Value::Counter(val.counter),
ValueType::Derive => Value::Derive(val.derive),
ValueType::Absolute => Value::Absolute(val.absolute),
};
let name = receive_array(&source.name, plugin, "data source name")?;
Ok(ValueReport {
name,
value: v,
min: source.min,
max: source.max,
})
})
.collect();
assert!(list.time > 0);
assert!(list.interval > 0);
let plugin_instance =
receive_array(&list.plugin_instance, plugin, "plugin_instance").map(empty_to_none)?;
let type_ = receive_array(&list.type_, plugin, "type")?;
let type_instance =
receive_array(&list.type_instance, plugin, "type_instance").map(empty_to_none)?;
let host = receive_array(&list.host, plugin, "host")?;
let meta = from_meta_data(plugin, list.meta)?;
Ok(ValueList {
values: values?,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: CdTime::from(list.time).into(),
interval: CdTime::from(list.interval).into(),
meta,
original_list: list,
original_set: set,
})
}
}
#[derive(Debug, PartialEq, Clone)]
struct SubmitValueList<'a> {
values: &'a [Value],
plugin_instance: Option<&'a str>,
plugin: &'a str,
type_: &'a str,
type_instance: Option<&'a str>,
host: Option<&'a str>,
time: Option<DateTime<Utc>>,
interval: Option<Duration>,
meta: HashMap<&'a str, MetaValue>,
}
/// Creates a value list to report values to collectd.
#[derive(Debug, PartialEq, Clone)]
pub struct ValueListBuilder<'a> {
list: SubmitValueList<'a>,
}
impl<'a> ValueListBuilder<'a> {
/// Primes a value list for submission. `plugin` will most likely be the name from the
/// `PluginManager` and `type_` is the datatype found in types.db
pub fn new<T: Into<&'a str>, U: Into<&'a str>>(plugin: T, type_: U) -> ValueListBuilder<'a> {
ValueListBuilder {
list: SubmitValueList {
values: &[],
plugin_instance: None,
plugin: plugin.into(),
type_: type_.into(),
type_instance: None,
host: None,
time: None,
interval: None,
meta: HashMap::new(),
},
}
}
/// A set of observed values that belong to the same plugin and type instance
pub fn values(mut self, values: &'a [Value]) -> ValueListBuilder<'a> {
self.list.values = values;
self
}
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub fn plugin_instance<T: Into<&'a str>>(mut self, plugin_instance: T) -> ValueListBuilder<'a> {
self.list.plugin_instance = Some(plugin_instance.into());
self
}
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub fn type_instance<T: Into<&'a str>>(mut self, type_instance: T) -> ValueListBuilder<'a> {
self.list.type_instance = Some(type_instance.into());
self
}
/// Override the machine's hostname that the observed values will be attributed to. Best to
/// override when observing values from another machine
pub fn host<T: Into<&'a str>>(mut self, host: T) -> ValueListBuilder<'a> {
self.list.host = Some(host.into());
self
}
/// The timestamp at which the value was collected. Overrides the default time, which is when
/// collectd receives the values from `submit`. Use only if there is a significant delay is
/// metrics gathering or if submitting values from the past.
pub fn time(mut self, dt: DateTime<Utc>) -> ValueListBuilder<'a> {
self.list.time = Some(dt);
self
}
/// The interval in which new values are to be expected. This is typically handled at a global
/// or plugin level. Use at your own discretion.
pub fn interval(mut self, interval: Duration) -> ValueListBuilder<'a> {
self.list.interval = Some(interval);
self
}
/// Add a metadata entry.
///
/// Multiple entries can be added by calling this method. If the same key is used, only the last
/// entry is kept.
pub fn metadata(mut self, key: &'a str, value: MetaValue) -> ValueListBuilder<'a> {
self.list.meta.insert(key, value);
self
}
/// Submits the observed values to collectd and returns errors if encountered
pub fn submit(self) -> Result<(), SubmitError> {
let mut v: Vec<value_t> = self.list.values.iter().map(|&x| x.into()).collect();
let plugin_instance = self
.list
.plugin_instance
.map(|x| submit_array_res(x, "plugin_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let type_instance = self
.list
.type_instance
.map(|x| submit_array_res(x, "type_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let host = self
.list
.host
.map(|x| submit_array_res(x, "host"))
.transpose()?;
// If a custom host is not provided by the plugin, we default to the global
// hostname. In versions prior to collectd 5.7, it was required to propagate the
// global hostname (hostname_g) in the submission. In collectd 5.7, one could
// submit an empty array or hostname_g and they would equate to the same thing. In
// collectd 5.8, hostname_g had the type signature changed so it could no longer be
// submitted and would cause garbage to be read (and thus could have very much
// unintended side effects)
let host = host.unwrap_or([0 as c_char; ARR_LENGTH]);
let len = v.len() as u64;
let plugin = submit_array_res(self.list.plugin, "plugin")?;
let type_ = submit_array_res(self.list.type_, "type")?;
let meta = to_meta_data(&self.list.meta)?;
let list = value_list_t {
values: v.as_mut_ptr(),
values_len: len,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: self.list.time.map(CdTime::from).unwrap_or(CdTime(0)).into(),
interval: self
.list
.interval
.map(CdTime::from)
.unwrap_or(CdTime(0))
.into(),
meta,
};
match unsafe { plugin_dispatch_values(&list) } {
0 => Ok(()),
i => Err(SubmitError::Dispatch(i)),
}
}
}
fn to_meta_data<'a, 'b: 'a, T>(meta_hm: T) -> Result<*mut meta_data_t, SubmitError>
where
T: IntoIterator<Item = (&'a &'b str, &'a MetaValue)>,
{
let meta = unsafe { meta_data_create() };
let conversion_result = to_meta_data_with_meta(meta_hm, meta);
match conversion_result {
Ok(()) => Ok(meta),
Err(error) => {
unsafe {
meta_data_destroy(meta);
}
Err(error)
}
}
}
fn to_meta_data_with_meta<'a, 'b: 'a, T>(
meta_hm: T,
meta: *mut meta_data_t,
) -> Result<(), SubmitError>
where
T: IntoIterator<Item = (&'a &'b str, &'a MetaValue)>,
{
for (key, value) in meta_hm.into_iter() {
let c_key = CString::new(*key).map_err(|e| SubmitError::Field {
name: "meta key",
err: ArrayError::NullPresent(e.nul_position(), key.to_string()),
})?;
match value {
MetaValue::String(str) => {
let c_value = CString::new(str.as_str()).map_err(|e| SubmitError::Field {
name: "meta value",
err: ArrayError::NullPresent(e.nul_position(), str.to_string()),
})?;
unsafe {
meta_data_add_string(meta, c_key.as_ptr(), c_value.as_ptr());
}
}
MetaValue::SignedInt(i) => unsafe {
meta_data_add_signed_int(meta, c_key.as_ptr(), *i);
},
MetaValue::UnsignedInt(u) => unsafe {
meta_data_add_unsigned_int(meta, c_key.as_ptr(), *u);
},
MetaValue::Double(d) => unsafe {
meta_data_add_double(meta, c_key.as_ptr(), *d);
},
MetaValue::Boolean(b) => unsafe {
meta_data_add_boolean(meta, c_key.as_ptr(), *b);
},
}
}
Ok(())
}
fn from_meta_data(
plugin: &str,
meta: *mut meta_data_t,
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
if meta.is_null() {
return Ok(HashMap::new());
}
let mut c_toc: *mut *mut c_char = ptr::null_mut();
let count_or_err = unsafe { meta_data_toc(meta, &mut c_toc as *mut *mut *mut c_char) };
if count_or_err < 0 {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: "toc".to_string(),
msg: "invalid parameters to meta_data_toc",
});
}
let count = count_or_err as usize;
if count == 0 {
return Ok(HashMap::new());
}
let toc = unsafe { slice::from_raw_parts(c_toc, count) };
let conversion_result = from_meta_data_with_toc(plugin, meta, toc);
for c_key_ptr in toc {
unsafe {
libc::free(*c_key_ptr as *mut c_void);
}
}
unsafe {
libc::free(c_toc as *mut c_void);
}
conversion_result
}
fn from_meta_data_with_toc(
plugin: &str,
meta: *mut meta_data_t,
toc: &[*mut c_char],
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
let mut meta_hm = HashMap::with_capacity(toc.len());
for c_key_ptr in toc {
let (c_key, key, value_type) = unsafe {
let c_key: &CStr = CStr::from_ptr(*c_key_ptr);
let key: String = c_key
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata key",
err: e,
})?
.to_string();
let value_type: u32 = meta_data_type(meta, c_key.as_ptr()) as u32;
(c_key, key, value_type)
};
match value_type {
MD_TYPE_BOOLEAN => {
let mut c_value = false;
unsafe {
meta_data_get_boolean(meta, c_key.as_ptr(), &mut c_value as *mut bool);
}
meta_hm.insert(key, MetaValue::Boolean(c_value));
}
MD_TYPE_DOUBLE => {
let mut c_value = 0.0;
unsafe {
meta_data_get_double(meta, c_key.as_ptr(), &mut c_value as *mut f64);
}
meta_hm.insert(key, MetaValue::Double(c_value));
}
MD_TYPE_SIGNED_INT => {
let mut c_value = 0i64;
unsafe {
meta_data_get_signed_int(meta, c_key.as_ptr(), &mut c_value as *mut i64);
}
meta_hm.insert(key, MetaValue::SignedInt(c_value));
}
MD_TYPE_STRING => {
let value: String = unsafe {
let mut c_value: *mut c_char = ptr::null_mut();
meta_data_get_string(meta, c_key.as_ptr(), &mut c_value as *mut *mut c_char);
CStr::from_ptr(c_value)
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata value",
err: e,
})?
.to_string()
};
meta_hm.insert(key, MetaValue::String(value));
}
MD_TYPE_UNSIGNED_INT => {
let mut c_value = 0u64;
unsafe {
meta_data_get_unsigned_int(meta, c_key.as_ptr(), &mut c_value as *mut u64);
}
meta_hm.insert(key, MetaValue::UnsignedInt(c_value));
}
_ => {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: key,
msg: "unknown metadata type",
});
}
}
}
Ok(meta_hm)
}
fn submit_array_res(s: &str, name: &'static str) -> Result<[c_char; ARR_LENGTH], SubmitError> {
to_array_res(s).map_err(|e| SubmitError::Field { name, err: e })
}
/// Collectd stores textual data in fixed sized arrays, so this function will convert a string
/// slice into array compatible with collectd's text fields. Be aware that `ARR_LENGTH` is 64
/// before collectd 5.7
fn to_array_res(s: &str) -> Result<[c_char; ARR_LENGTH], ArrayError> {
// By checking if the length is greater than or *equal* to, we guarantee a trailing null
if s.len() >= ARR_LENGTH {
return Err(ArrayError::TooLong(s.len()));
}
let bytes = s.as_bytes();
// Using memchr to find a null and work around it is 10x faster than
// using a CString to get the bytes_with_nul and cut the time to submit
// values to collectd in half.
if let Some(ind) = memchr(0, bytes) {
return Err(ArrayError::NullPresent(ind, s.to_string()));
}
let mut arr = [0; ARR_LENGTH];
arr[0..bytes.len()].copy_from_slice(bytes);
Ok(unsafe { ::std::mem::transmute(arr) })
}
fn receive_array<'a>(
s: &'a [c_char; ARR_LENGTH],
plugin: &str,
field: &'static str,
) -> Result<&'a str, ReceiveError> {
from_array(s).map_err(|e| ReceiveError::Utf8 {
plugin: String::from(plugin),
field,
err: e,
})
}
/// Turns a fixed size character array into string slice, if possible
pub fn from_array(s: &[c_char; ARR_LENGTH]) -> Result<&str, Utf8Error> {
unsafe {
let a = s as *const [c_char; ARR_LENGTH] as *const c_char;
CStr::from_ptr(a).to_str()
}
}
/// Returns if the string is empty or not
pub fn empty_to_none(s: &str) -> Option<&str> {
if s.is_empty() {
None
} else {
Some(s)
}
}
pub fn length(len: u64) -> usize {
len as usize
}
pub fn get_default_interval() -> u64 {
0
}
#[cfg(test)]
mod tests {
use self::cdtime::nanos_to_collectd;
use super::*;
use crate::bindings::data_source_t;
use std::os::raw::c_char;
#[test]
fn test_empty_to_none() {
assert_eq!(None, empty_to_none(""));
let s = "hi";
assert_eq!(Some("hi"), empty_to_none(s));
}
#[test]
fn test_from_array() {
let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
assert_eq!(Ok("hi"), from_array(&name));
}
#[test]
fn test_to_array() {
let actual = to_array_res("Hi");
assert!(actual.is_ok());
assert_eq!(&actual.unwrap()[..2], &[b'H' as c_char, b'i' as c_char]);
}
#[test]
fn test_to_array_res_nul() {
let actual = to_array_res("hi\0");
assert!(actual.is_err());
}
#[test]
fn test_to_array_res_too_long() {
let actual = to_array_res(
"Hello check this out, I am a long string and there is no signs of stopping; well, maybe one day I will stop when I get too longggggggggggggggggggggggggggggggggggg",
);
assert!(actual.is_err());
}
#[test]
fn test_submit() {
let values = vec![Value::Gauge(15.0), Value::Gauge(10.0), Value::Gauge(12.0)];
let result = ValueListBuilder::new("my-plugin", "load")
.values(&values)
.submit();
assert_eq!(result.unwrap(), ());
}
#[test]
fn test_recv_value_list_conversion() {
let empty: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
let mut metric: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
metric[0] = b'h' as c_char;
metric[1] = b'o' as c_char;
| let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
let val = data_source_t {
name,
type_: DS_TYPE_GAUGE as i32,
min: 10.0,
max: 11.0,
};
let mut v = vec![val];
let conv = data_set_t {
type_: metric,
ds_num: 1,
ds: v.as_mut_ptr(),
};
let mut vs = vec![value_t { gauge: 3.0 }];
let list_t = value_list_t {
values: vs.as_mut_ptr(),
values_len: 1,
time: nanos_to_collectd(1_000_000_000),
interval: nanos_to_collectd(1_000_000_000),
host: metric,
plugin: name,
plugin_instance: metric,
type_: metric,
type_instance: empty,
meta: ptr::null_mut(),
};
let actual = ValueList::from(&conv, &list_t).unwrap();
assert_eq!(
actual,
ValueList {
values: vec![ValueReport {
name: "hi",
value: Value::Gauge(3.0),
min: 10.0,
max: 11.0,
}],
plugin_instance: Some("ho"),
plugin: "hi",
type_: "ho",
type_instance: None,
host: "ho",
time: Utc.ymd(1970, 1, 1).and_hms(0, 0, 1),
interval: Duration::seconds(1),
original_list: &list_t,
original_set: &conv,
meta: HashMap::new(),
}
);
}
} | random_line_split | |
mod.rs | pub use self::cdtime::{nanos_to_collectd, CdTime};
pub use self::logger::{collectd_log, log_err, CollectdLoggerBuilder, LogLevel};
pub use self::oconfig::{ConfigItem, ConfigValue};
use crate::bindings::{
data_set_t, meta_data_add_boolean, meta_data_add_double, meta_data_add_signed_int,
meta_data_add_string, meta_data_add_unsigned_int, meta_data_create, meta_data_destroy,
meta_data_get_boolean, meta_data_get_double, meta_data_get_signed_int, meta_data_get_string,
meta_data_get_unsigned_int, meta_data_t, meta_data_toc, meta_data_type, plugin_dispatch_values,
uc_get_rate, value_list_t, value_t, ARR_LENGTH, DS_TYPE_ABSOLUTE, DS_TYPE_COUNTER,
DS_TYPE_DERIVE, DS_TYPE_GAUGE, MD_TYPE_BOOLEAN, MD_TYPE_DOUBLE, MD_TYPE_SIGNED_INT,
MD_TYPE_STRING, MD_TYPE_UNSIGNED_INT,
};
use crate::errors::{ArrayError, CacheRateError, ReceiveError, SubmitError};
use chrono::prelude::*;
use chrono::Duration;
use memchr::memchr;
use std::borrow::Cow;
use std::collections::HashMap;
use std::ffi::{CStr, CString};
use std::fmt;
use std::os::raw::{c_char, c_void};
use std::ptr;
use std::slice;
use std::str::Utf8Error;
mod cdtime;
mod logger;
mod oconfig;
/// The value of a metadata entry associated with a [ValueList].
/// Metadata can be added using [ValueListBuilder::metadata] method.
#[derive(Debug, Clone, PartialEq)]
pub enum MetaValue {
String(String),
SignedInt(i64),
UnsignedInt(u64),
Double(f64),
Boolean(bool),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[repr(u32)]
#[allow(dead_code)]
enum ValueType {
Counter = DS_TYPE_COUNTER,
Gauge = DS_TYPE_GAUGE,
Derive = DS_TYPE_DERIVE,
Absolute = DS_TYPE_ABSOLUTE,
}
/// The value that a plugin reports can be any one of the following types
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Value {
/// A COUNTER value is for continuous incrementing counters like the ifInOctets counter in a router.
/// The COUNTER data source assumes that the observed value never decreases, except when it
/// overflows. The update function takes the overflow into account. If a counter is reset to
/// zero, for example because an application was restarted, the wrap-around calculation may
/// result in a huge rate. Thus setting a reasonable maximum value is essential when using
/// COUNTER data sources. Because of this, COUNTER data sources are only recommended for
/// counters that wrap-around often, for example 32 bit octet counters of a busy switch port.
Counter(u64),
/// A GAUGE value is simply stored as-is. This is the right choice for values which may
/// increase as well as decrease, such as temperatures or the amount of memory used
Gauge(f64),
/// DERIVE will store the derivative of the observed values source. If the data type has a
/// minimum of zero, negative rates will be discarded. Using DERIVE is a good idea for
/// measuring cgroup's cpuacct.usage as that stores the total number of CPU nanoseconds by all
/// tasks in the cgroup; the change (derivative) in CPU nanoseconds is more interesting than
/// the current value.
Derive(i64),
/// ABSOLUTE is for counters which get reset upon reading. This is used for fast counters which
/// tend to overflow. So instead of reading them normally you reset them after every read to
/// make sure you have a maximum time available before the next overflow.
Absolute(u64),
}
impl Value {
/// Returns if an underlying value is nan
///
/// ```
/// # use collectd_plugin::Value;
/// assert_eq!(true, Value::Gauge(::std::f64::NAN).is_nan());
/// assert_eq!(false, Value::Gauge(0.0).is_nan());
/// assert_eq!(false, Value::Derive(0).is_nan());
/// ```
pub fn is_nan(&self) -> bool {
if let Value::Gauge(x) = *self {
x.is_nan()
} else {
false
}
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Value::Counter(x) | Value::Absolute(x) => write!(f, "{}", x),
Value::Gauge(x) => write!(f, "{}", x),
Value::Derive(x) => write!(f, "{}", x),
}
}
}
impl From<Value> for value_t {
fn from(x: Value) -> Self {
match x {
Value::Counter(x) => value_t { counter: x },
Value::Gauge(x) => value_t { gauge: x },
Value::Derive(x) => value_t { derive: x },
Value::Absolute(x) => value_t { absolute: x },
}
}
}
/// Name and value of a reported metric
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct ValueReport<'a> {
/// Name of the metric. If values has a length of 1, this is often just "value"
pub name: &'a str,
/// The value reported
pub value: Value,
/// Minimum value seen in an interval
pub min: f64,
/// Maximum value seen in an interval
pub max: f64,
}
/// Contains values and metadata that collectd has collected from plugins
#[derive(Debug, PartialEq, Clone)]
pub struct ValueList<'a> {
pub values: Vec<ValueReport<'a>>,
/// The plugin that submitted this value. This would be your `PluginManager` when submitting
/// values
pub plugin: &'a str,
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub plugin_instance: Option<&'a str>,
/// This is the string found in types.db, determines how many values are expected and how they
/// should be interpreted
pub type_: &'a str,
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub type_instance: Option<&'a str>,
/// The hostname where the values were collectd
pub host: &'a str,
/// The timestamp at which the value was collected
pub time: DateTime<Utc>,
/// The interval in which new values are to be expected
pub interval: Duration,
/// Metadata associated to the reported values
pub meta: HashMap<String, MetaValue>,
// Keep the original list and set around for calculating rates on demand
original_list: *const value_list_t,
original_set: *const data_set_t,
}
impl<'a> ValueList<'a> {
/// Collectd does not automatically convert `Derived` values into a rate. This is why many
/// write plugins have a `StoreRates` config option so that these rates are calculated on
/// demand from collectd's internal cache. This function will return a vector that can supercede
/// the `values` field that contains the rate of all non-gauge values. Values that are gauges
/// remain unchanged, so one doesn't need to resort back to `values` field as this function
/// will return everything prepped for submission.
pub fn rates(&self) -> Result<Cow<'_, Vec<ValueReport<'a>>>, CacheRateError> {
// As an optimization step, if we know all values are gauges there is no need to call out
// to uc_get_rate as no values will be changed
let all_gauges = self.values.iter().all(|x| match x.value {
Value::Gauge(_) => true,
_ => false,
});
if all_gauges {
return Ok(Cow::Borrowed(&self.values));
}
let ptr = unsafe { uc_get_rate(self.original_set, self.original_list) };
if !ptr.is_null() {
let nv = unsafe { slice::from_raw_parts(ptr, self.values.len()) }
.iter()
.zip(self.values.iter())
.map(|(rate, report)| match report.value {
Value::Gauge(_) => *report,
_ => ValueReport {
value: Value::Gauge(*rate),
..*report
},
})
.collect();
Ok(Cow::Owned(nv))
} else {
Err(CacheRateError)
}
}
pub fn from<'b>(
set: &'b data_set_t,
list: &'b value_list_t,
) -> Result<ValueList<'b>, ReceiveError> {
let plugin = receive_array(&list.plugin, "", "plugin name")?;
let ds_len = length(set.ds_num);
let list_len = length(list.values_len);
let values: Result<Vec<ValueReport<'_>>, ReceiveError> =
unsafe { slice::from_raw_parts(list.values, list_len) }
.iter()
.zip(unsafe { slice::from_raw_parts(set.ds, ds_len) })
.map(|(val, source)| unsafe {
let v = match ::std::mem::transmute(source.type_) {
ValueType::Gauge => Value::Gauge(val.gauge),
ValueType::Counter => Value::Counter(val.counter),
ValueType::Derive => Value::Derive(val.derive),
ValueType::Absolute => Value::Absolute(val.absolute),
};
let name = receive_array(&source.name, plugin, "data source name")?;
Ok(ValueReport {
name,
value: v,
min: source.min,
max: source.max,
})
})
.collect();
assert!(list.time > 0);
assert!(list.interval > 0);
let plugin_instance =
receive_array(&list.plugin_instance, plugin, "plugin_instance").map(empty_to_none)?;
let type_ = receive_array(&list.type_, plugin, "type")?;
let type_instance =
receive_array(&list.type_instance, plugin, "type_instance").map(empty_to_none)?;
let host = receive_array(&list.host, plugin, "host")?;
let meta = from_meta_data(plugin, list.meta)?;
Ok(ValueList {
values: values?,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: CdTime::from(list.time).into(),
interval: CdTime::from(list.interval).into(),
meta,
original_list: list,
original_set: set,
})
}
}
#[derive(Debug, PartialEq, Clone)]
struct SubmitValueList<'a> {
values: &'a [Value],
plugin_instance: Option<&'a str>,
plugin: &'a str,
type_: &'a str,
type_instance: Option<&'a str>,
host: Option<&'a str>,
time: Option<DateTime<Utc>>,
interval: Option<Duration>,
meta: HashMap<&'a str, MetaValue>,
}
/// Creates a value list to report values to collectd.
#[derive(Debug, PartialEq, Clone)]
pub struct ValueListBuilder<'a> {
list: SubmitValueList<'a>,
}
impl<'a> ValueListBuilder<'a> {
/// Primes a value list for submission. `plugin` will most likely be the name from the
/// `PluginManager` and `type_` is the datatype found in types.db
pub fn new<T: Into<&'a str>, U: Into<&'a str>>(plugin: T, type_: U) -> ValueListBuilder<'a> {
ValueListBuilder {
list: SubmitValueList {
values: &[],
plugin_instance: None,
plugin: plugin.into(),
type_: type_.into(),
type_instance: None,
host: None,
time: None,
interval: None,
meta: HashMap::new(),
},
}
}
/// A set of observed values that belong to the same plugin and type instance
pub fn values(mut self, values: &'a [Value]) -> ValueListBuilder<'a> {
self.list.values = values;
self
}
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub fn plugin_instance<T: Into<&'a str>>(mut self, plugin_instance: T) -> ValueListBuilder<'a> {
self.list.plugin_instance = Some(plugin_instance.into());
self
}
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub fn type_instance<T: Into<&'a str>>(mut self, type_instance: T) -> ValueListBuilder<'a> {
self.list.type_instance = Some(type_instance.into());
self
}
/// Override the machine's hostname that the observed values will be attributed to. Best to
/// override when observing values from another machine
pub fn host<T: Into<&'a str>>(mut self, host: T) -> ValueListBuilder<'a> {
self.list.host = Some(host.into());
self
}
/// The timestamp at which the value was collected. Overrides the default time, which is when
/// collectd receives the values from `submit`. Use only if there is a significant delay is
/// metrics gathering or if submitting values from the past.
pub fn time(mut self, dt: DateTime<Utc>) -> ValueListBuilder<'a> |
/// The interval in which new values are to be expected. This is typically handled at a global
/// or plugin level. Use at your own discretion.
pub fn interval(mut self, interval: Duration) -> ValueListBuilder<'a> {
self.list.interval = Some(interval);
self
}
/// Add a metadata entry.
///
/// Multiple entries can be added by calling this method. If the same key is used, only the last
/// entry is kept.
pub fn metadata(mut self, key: &'a str, value: MetaValue) -> ValueListBuilder<'a> {
self.list.meta.insert(key, value);
self
}
/// Submits the observed values to collectd and returns errors if encountered
pub fn submit(self) -> Result<(), SubmitError> {
let mut v: Vec<value_t> = self.list.values.iter().map(|&x| x.into()).collect();
let plugin_instance = self
.list
.plugin_instance
.map(|x| submit_array_res(x, "plugin_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let type_instance = self
.list
.type_instance
.map(|x| submit_array_res(x, "type_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let host = self
.list
.host
.map(|x| submit_array_res(x, "host"))
.transpose()?;
// If a custom host is not provided by the plugin, we default to the global
// hostname. In versions prior to collectd 5.7, it was required to propagate the
// global hostname (hostname_g) in the submission. In collectd 5.7, one could
// submit an empty array or hostname_g and they would equate to the same thing. In
// collectd 5.8, hostname_g had the type signature changed so it could no longer be
// submitted and would cause garbage to be read (and thus could have very much
// unintended side effects)
let host = host.unwrap_or([0 as c_char; ARR_LENGTH]);
let len = v.len() as u64;
let plugin = submit_array_res(self.list.plugin, "plugin")?;
let type_ = submit_array_res(self.list.type_, "type")?;
let meta = to_meta_data(&self.list.meta)?;
let list = value_list_t {
values: v.as_mut_ptr(),
values_len: len,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: self.list.time.map(CdTime::from).unwrap_or(CdTime(0)).into(),
interval: self
.list
.interval
.map(CdTime::from)
.unwrap_or(CdTime(0))
.into(),
meta,
};
match unsafe { plugin_dispatch_values(&list) } {
0 => Ok(()),
i => Err(SubmitError::Dispatch(i)),
}
}
}
fn to_meta_data<'a, 'b: 'a, T>(meta_hm: T) -> Result<*mut meta_data_t, SubmitError>
where
T: IntoIterator<Item = (&'a &'b str, &'a MetaValue)>,
{
let meta = unsafe { meta_data_create() };
let conversion_result = to_meta_data_with_meta(meta_hm, meta);
match conversion_result {
Ok(()) => Ok(meta),
Err(error) => {
unsafe {
meta_data_destroy(meta);
}
Err(error)
}
}
}
fn to_meta_data_with_meta<'a, 'b: 'a, T>(
meta_hm: T,
meta: *mut meta_data_t,
) -> Result<(), SubmitError>
where
T: IntoIterator<Item = (&'a &'b str, &'a MetaValue)>,
{
for (key, value) in meta_hm.into_iter() {
let c_key = CString::new(*key).map_err(|e| SubmitError::Field {
name: "meta key",
err: ArrayError::NullPresent(e.nul_position(), key.to_string()),
})?;
match value {
MetaValue::String(str) => {
let c_value = CString::new(str.as_str()).map_err(|e| SubmitError::Field {
name: "meta value",
err: ArrayError::NullPresent(e.nul_position(), str.to_string()),
})?;
unsafe {
meta_data_add_string(meta, c_key.as_ptr(), c_value.as_ptr());
}
}
MetaValue::SignedInt(i) => unsafe {
meta_data_add_signed_int(meta, c_key.as_ptr(), *i);
},
MetaValue::UnsignedInt(u) => unsafe {
meta_data_add_unsigned_int(meta, c_key.as_ptr(), *u);
},
MetaValue::Double(d) => unsafe {
meta_data_add_double(meta, c_key.as_ptr(), *d);
},
MetaValue::Boolean(b) => unsafe {
meta_data_add_boolean(meta, c_key.as_ptr(), *b);
},
}
}
Ok(())
}
fn from_meta_data(
plugin: &str,
meta: *mut meta_data_t,
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
if meta.is_null() {
return Ok(HashMap::new());
}
let mut c_toc: *mut *mut c_char = ptr::null_mut();
let count_or_err = unsafe { meta_data_toc(meta, &mut c_toc as *mut *mut *mut c_char) };
if count_or_err < 0 {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: "toc".to_string(),
msg: "invalid parameters to meta_data_toc",
});
}
let count = count_or_err as usize;
if count == 0 {
return Ok(HashMap::new());
}
let toc = unsafe { slice::from_raw_parts(c_toc, count) };
let conversion_result = from_meta_data_with_toc(plugin, meta, toc);
for c_key_ptr in toc {
unsafe {
libc::free(*c_key_ptr as *mut c_void);
}
}
unsafe {
libc::free(c_toc as *mut c_void);
}
conversion_result
}
fn from_meta_data_with_toc(
plugin: &str,
meta: *mut meta_data_t,
toc: &[*mut c_char],
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
let mut meta_hm = HashMap::with_capacity(toc.len());
for c_key_ptr in toc {
let (c_key, key, value_type) = unsafe {
let c_key: &CStr = CStr::from_ptr(*c_key_ptr);
let key: String = c_key
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata key",
err: e,
})?
.to_string();
let value_type: u32 = meta_data_type(meta, c_key.as_ptr()) as u32;
(c_key, key, value_type)
};
match value_type {
MD_TYPE_BOOLEAN => {
let mut c_value = false;
unsafe {
meta_data_get_boolean(meta, c_key.as_ptr(), &mut c_value as *mut bool);
}
meta_hm.insert(key, MetaValue::Boolean(c_value));
}
MD_TYPE_DOUBLE => {
let mut c_value = 0.0;
unsafe {
meta_data_get_double(meta, c_key.as_ptr(), &mut c_value as *mut f64);
}
meta_hm.insert(key, MetaValue::Double(c_value));
}
MD_TYPE_SIGNED_INT => {
let mut c_value = 0i64;
unsafe {
meta_data_get_signed_int(meta, c_key.as_ptr(), &mut c_value as *mut i64);
}
meta_hm.insert(key, MetaValue::SignedInt(c_value));
}
MD_TYPE_STRING => {
let value: String = unsafe {
let mut c_value: *mut c_char = ptr::null_mut();
meta_data_get_string(meta, c_key.as_ptr(), &mut c_value as *mut *mut c_char);
CStr::from_ptr(c_value)
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata value",
err: e,
})?
.to_string()
};
meta_hm.insert(key, MetaValue::String(value));
}
MD_TYPE_UNSIGNED_INT => {
let mut c_value = 0u64;
unsafe {
meta_data_get_unsigned_int(meta, c_key.as_ptr(), &mut c_value as *mut u64);
}
meta_hm.insert(key, MetaValue::UnsignedInt(c_value));
}
_ => {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: key,
msg: "unknown metadata type",
});
}
}
}
Ok(meta_hm)
}
fn submit_array_res(s: &str, name: &'static str) -> Result<[c_char; ARR_LENGTH], SubmitError> {
to_array_res(s).map_err(|e| SubmitError::Field { name, err: e })
}
/// Collectd stores textual data in fixed sized arrays, so this function will convert a string
/// slice into array compatible with collectd's text fields. Be aware that `ARR_LENGTH` is 64
/// before collectd 5.7
fn to_array_res(s: &str) -> Result<[c_char; ARR_LENGTH], ArrayError> {
// By checking if the length is greater than or *equal* to, we guarantee a trailing null
if s.len() >= ARR_LENGTH {
return Err(ArrayError::TooLong(s.len()));
}
let bytes = s.as_bytes();
// Using memchr to find a null and work around it is 10x faster than
// using a CString to get the bytes_with_nul and cut the time to submit
// values to collectd in half.
if let Some(ind) = memchr(0, bytes) {
return Err(ArrayError::NullPresent(ind, s.to_string()));
}
let mut arr = [0; ARR_LENGTH];
arr[0..bytes.len()].copy_from_slice(bytes);
Ok(unsafe { ::std::mem::transmute(arr) })
}
fn receive_array<'a>(
s: &'a [c_char; ARR_LENGTH],
plugin: &str,
field: &'static str,
) -> Result<&'a str, ReceiveError> {
from_array(s).map_err(|e| ReceiveError::Utf8 {
plugin: String::from(plugin),
field,
err: e,
})
}
/// Turns a fixed size character array into string slice, if possible
pub fn from_array(s: &[c_char; ARR_LENGTH]) -> Result<&str, Utf8Error> {
unsafe {
let a = s as *const [c_char; ARR_LENGTH] as *const c_char;
CStr::from_ptr(a).to_str()
}
}
/// Returns if the string is empty or not
pub fn empty_to_none(s: &str) -> Option<&str> {
if s.is_empty() {
None
} else {
Some(s)
}
}
pub fn length(len: u64) -> usize {
len as usize
}
pub fn get_default_interval() -> u64 {
0
}
#[cfg(test)]
mod tests {
use self::cdtime::nanos_to_collectd;
use super::*;
use crate::bindings::data_source_t;
use std::os::raw::c_char;
#[test]
fn test_empty_to_none() {
assert_eq!(None, empty_to_none(""));
let s = "hi";
assert_eq!(Some("hi"), empty_to_none(s));
}
#[test]
fn test_from_array() {
let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
assert_eq!(Ok("hi"), from_array(&name));
}
#[test]
fn test_to_array() {
let actual = to_array_res("Hi");
assert!(actual.is_ok());
assert_eq!(&actual.unwrap()[..2], &[b'H' as c_char, b'i' as c_char]);
}
#[test]
fn test_to_array_res_nul() {
let actual = to_array_res("hi\0");
assert!(actual.is_err());
}
#[test]
fn test_to_array_res_too_long() {
let actual = to_array_res(
"Hello check this out, I am a long string and there is no signs of stopping; well, maybe one day I will stop when I get too longggggggggggggggggggggggggggggggggggg",
);
assert!(actual.is_err());
}
#[test]
fn test_submit() {
let values = vec![Value::Gauge(15.0), Value::Gauge(10.0), Value::Gauge(12.0)];
let result = ValueListBuilder::new("my-plugin", "load")
.values(&values)
.submit();
assert_eq!(result.unwrap(), ());
}
#[test]
fn test_recv_value_list_conversion() {
let empty: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
let mut metric: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
metric[0] = b'h' as c_char;
metric[1] = b'o' as c_char;
let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
let val = data_source_t {
name,
type_: DS_TYPE_GAUGE as i32,
min: 10.0,
max: 11.0,
};
let mut v = vec![val];
let conv = data_set_t {
type_: metric,
ds_num: 1,
ds: v.as_mut_ptr(),
};
let mut vs = vec![value_t { gauge: 3.0 }];
let list_t = value_list_t {
values: vs.as_mut_ptr(),
values_len: 1,
time: nanos_to_collectd(1_000_000_000),
interval: nanos_to_collectd(1_000_000_000),
host: metric,
plugin: name,
plugin_instance: metric,
type_: metric,
type_instance: empty,
meta: ptr::null_mut(),
};
let actual = ValueList::from(&conv, &list_t).unwrap();
assert_eq!(
actual,
ValueList {
values: vec![ValueReport {
name: "hi",
value: Value::Gauge(3.0),
min: 10.0,
max: 11.0,
}],
plugin_instance: Some("ho"),
plugin: "hi",
type_: "ho",
type_instance: None,
host: "ho",
time: Utc.ymd(1970, 1, 1).and_hms(0, 0, 1),
interval: Duration::seconds(1),
original_list: &list_t,
original_set: &conv,
meta: HashMap::new(),
}
);
}
}
| {
self.list.time = Some(dt);
self
} | identifier_body |
mod.rs | pub use self::cdtime::{nanos_to_collectd, CdTime};
pub use self::logger::{collectd_log, log_err, CollectdLoggerBuilder, LogLevel};
pub use self::oconfig::{ConfigItem, ConfigValue};
use crate::bindings::{
data_set_t, meta_data_add_boolean, meta_data_add_double, meta_data_add_signed_int,
meta_data_add_string, meta_data_add_unsigned_int, meta_data_create, meta_data_destroy,
meta_data_get_boolean, meta_data_get_double, meta_data_get_signed_int, meta_data_get_string,
meta_data_get_unsigned_int, meta_data_t, meta_data_toc, meta_data_type, plugin_dispatch_values,
uc_get_rate, value_list_t, value_t, ARR_LENGTH, DS_TYPE_ABSOLUTE, DS_TYPE_COUNTER,
DS_TYPE_DERIVE, DS_TYPE_GAUGE, MD_TYPE_BOOLEAN, MD_TYPE_DOUBLE, MD_TYPE_SIGNED_INT,
MD_TYPE_STRING, MD_TYPE_UNSIGNED_INT,
};
use crate::errors::{ArrayError, CacheRateError, ReceiveError, SubmitError};
use chrono::prelude::*;
use chrono::Duration;
use memchr::memchr;
use std::borrow::Cow;
use std::collections::HashMap;
use std::ffi::{CStr, CString};
use std::fmt;
use std::os::raw::{c_char, c_void};
use std::ptr;
use std::slice;
use std::str::Utf8Error;
mod cdtime;
mod logger;
mod oconfig;
/// The value of a metadata entry associated with a [ValueList].
/// Metadata can be added using [ValueListBuilder::metadata] method.
#[derive(Debug, Clone, PartialEq)]
pub enum MetaValue {
String(String),
SignedInt(i64),
UnsignedInt(u64),
Double(f64),
Boolean(bool),
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[repr(u32)]
#[allow(dead_code)]
enum ValueType {
Counter = DS_TYPE_COUNTER,
Gauge = DS_TYPE_GAUGE,
Derive = DS_TYPE_DERIVE,
Absolute = DS_TYPE_ABSOLUTE,
}
/// The value that a plugin reports can be any one of the following types
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Value {
/// A COUNTER value is for continuous incrementing counters like the ifInOctets counter in a router.
/// The COUNTER data source assumes that the observed value never decreases, except when it
/// overflows. The update function takes the overflow into account. If a counter is reset to
/// zero, for example because an application was restarted, the wrap-around calculation may
/// result in a huge rate. Thus setting a reasonable maximum value is essential when using
/// COUNTER data sources. Because of this, COUNTER data sources are only recommended for
/// counters that wrap-around often, for example 32 bit octet counters of a busy switch port.
Counter(u64),
/// A GAUGE value is simply stored as-is. This is the right choice for values which may
/// increase as well as decrease, such as temperatures or the amount of memory used
Gauge(f64),
/// DERIVE will store the derivative of the observed values source. If the data type has a
/// minimum of zero, negative rates will be discarded. Using DERIVE is a good idea for
/// measuring cgroup's cpuacct.usage as that stores the total number of CPU nanoseconds by all
/// tasks in the cgroup; the change (derivative) in CPU nanoseconds is more interesting than
/// the current value.
Derive(i64),
/// ABSOLUTE is for counters which get reset upon reading. This is used for fast counters which
/// tend to overflow. So instead of reading them normally you reset them after every read to
/// make sure you have a maximum time available before the next overflow.
Absolute(u64),
}
impl Value {
/// Returns if an underlying value is nan
///
/// ```
/// # use collectd_plugin::Value;
/// assert_eq!(true, Value::Gauge(::std::f64::NAN).is_nan());
/// assert_eq!(false, Value::Gauge(0.0).is_nan());
/// assert_eq!(false, Value::Derive(0).is_nan());
/// ```
pub fn is_nan(&self) -> bool {
if let Value::Gauge(x) = *self {
x.is_nan()
} else {
false
}
}
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Value::Counter(x) | Value::Absolute(x) => write!(f, "{}", x),
Value::Gauge(x) => write!(f, "{}", x),
Value::Derive(x) => write!(f, "{}", x),
}
}
}
impl From<Value> for value_t {
fn from(x: Value) -> Self {
match x {
Value::Counter(x) => value_t { counter: x },
Value::Gauge(x) => value_t { gauge: x },
Value::Derive(x) => value_t { derive: x },
Value::Absolute(x) => value_t { absolute: x },
}
}
}
/// Name and value of a reported metric
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct | <'a> {
/// Name of the metric. If values has a length of 1, this is often just "value"
pub name: &'a str,
/// The value reported
pub value: Value,
/// Minimum value seen in an interval
pub min: f64,
/// Maximum value seen in an interval
pub max: f64,
}
/// Contains values and metadata that collectd has collected from plugins
#[derive(Debug, PartialEq, Clone)]
pub struct ValueList<'a> {
pub values: Vec<ValueReport<'a>>,
/// The plugin that submitted this value. This would be your `PluginManager` when submitting
/// values
pub plugin: &'a str,
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub plugin_instance: Option<&'a str>,
/// This is the string found in types.db, determines how many values are expected and how they
/// should be interpreted
pub type_: &'a str,
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub type_instance: Option<&'a str>,
/// The hostname where the values were collectd
pub host: &'a str,
/// The timestamp at which the value was collected
pub time: DateTime<Utc>,
/// The interval in which new values are to be expected
pub interval: Duration,
/// Metadata associated to the reported values
pub meta: HashMap<String, MetaValue>,
// Keep the original list and set around for calculating rates on demand
original_list: *const value_list_t,
original_set: *const data_set_t,
}
impl<'a> ValueList<'a> {
/// Collectd does not automatically convert `Derived` values into a rate. This is why many
/// write plugins have a `StoreRates` config option so that these rates are calculated on
/// demand from collectd's internal cache. This function will return a vector that can supercede
/// the `values` field that contains the rate of all non-gauge values. Values that are gauges
/// remain unchanged, so one doesn't need to resort back to `values` field as this function
/// will return everything prepped for submission.
pub fn rates(&self) -> Result<Cow<'_, Vec<ValueReport<'a>>>, CacheRateError> {
// As an optimization step, if we know all values are gauges there is no need to call out
// to uc_get_rate as no values will be changed
let all_gauges = self.values.iter().all(|x| match x.value {
Value::Gauge(_) => true,
_ => false,
});
if all_gauges {
return Ok(Cow::Borrowed(&self.values));
}
let ptr = unsafe { uc_get_rate(self.original_set, self.original_list) };
if !ptr.is_null() {
let nv = unsafe { slice::from_raw_parts(ptr, self.values.len()) }
.iter()
.zip(self.values.iter())
.map(|(rate, report)| match report.value {
Value::Gauge(_) => *report,
_ => ValueReport {
value: Value::Gauge(*rate),
..*report
},
})
.collect();
Ok(Cow::Owned(nv))
} else {
Err(CacheRateError)
}
}
pub fn from<'b>(
set: &'b data_set_t,
list: &'b value_list_t,
) -> Result<ValueList<'b>, ReceiveError> {
let plugin = receive_array(&list.plugin, "", "plugin name")?;
let ds_len = length(set.ds_num);
let list_len = length(list.values_len);
let values: Result<Vec<ValueReport<'_>>, ReceiveError> =
unsafe { slice::from_raw_parts(list.values, list_len) }
.iter()
.zip(unsafe { slice::from_raw_parts(set.ds, ds_len) })
.map(|(val, source)| unsafe {
let v = match ::std::mem::transmute(source.type_) {
ValueType::Gauge => Value::Gauge(val.gauge),
ValueType::Counter => Value::Counter(val.counter),
ValueType::Derive => Value::Derive(val.derive),
ValueType::Absolute => Value::Absolute(val.absolute),
};
let name = receive_array(&source.name, plugin, "data source name")?;
Ok(ValueReport {
name,
value: v,
min: source.min,
max: source.max,
})
})
.collect();
assert!(list.time > 0);
assert!(list.interval > 0);
let plugin_instance =
receive_array(&list.plugin_instance, plugin, "plugin_instance").map(empty_to_none)?;
let type_ = receive_array(&list.type_, plugin, "type")?;
let type_instance =
receive_array(&list.type_instance, plugin, "type_instance").map(empty_to_none)?;
let host = receive_array(&list.host, plugin, "host")?;
let meta = from_meta_data(plugin, list.meta)?;
Ok(ValueList {
values: values?,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: CdTime::from(list.time).into(),
interval: CdTime::from(list.interval).into(),
meta,
original_list: list,
original_set: set,
})
}
}
#[derive(Debug, PartialEq, Clone)]
struct SubmitValueList<'a> {
values: &'a [Value],
plugin_instance: Option<&'a str>,
plugin: &'a str,
type_: &'a str,
type_instance: Option<&'a str>,
host: Option<&'a str>,
time: Option<DateTime<Utc>>,
interval: Option<Duration>,
meta: HashMap<&'a str, MetaValue>,
}
/// Creates a value list to report values to collectd.
#[derive(Debug, PartialEq, Clone)]
pub struct ValueListBuilder<'a> {
list: SubmitValueList<'a>,
}
impl<'a> ValueListBuilder<'a> {
/// Primes a value list for submission. `plugin` will most likely be the name from the
/// `PluginManager` and `type_` is the datatype found in types.db
pub fn new<T: Into<&'a str>, U: Into<&'a str>>(plugin: T, type_: U) -> ValueListBuilder<'a> {
ValueListBuilder {
list: SubmitValueList {
values: &[],
plugin_instance: None,
plugin: plugin.into(),
type_: type_.into(),
type_instance: None,
host: None,
time: None,
interval: None,
meta: HashMap::new(),
},
}
}
/// A set of observed values that belong to the same plugin and type instance
pub fn values(mut self, values: &'a [Value]) -> ValueListBuilder<'a> {
self.list.values = values;
self
}
/// Distinguishes entities that yield metrics. Each core would be a different instance of the
/// same plugin, as each core reports "idle", "user", "system" metrics.
pub fn plugin_instance<T: Into<&'a str>>(mut self, plugin_instance: T) -> ValueListBuilder<'a> {
self.list.plugin_instance = Some(plugin_instance.into());
self
}
/// The type instance is used to separate values of identical type which nonetheless belong to
/// one another. For instance, even though "free", "used", and "total" all have types of
/// "Memory" they are different type instances.
pub fn type_instance<T: Into<&'a str>>(mut self, type_instance: T) -> ValueListBuilder<'a> {
self.list.type_instance = Some(type_instance.into());
self
}
/// Override the machine's hostname that the observed values will be attributed to. Best to
/// override when observing values from another machine
pub fn host<T: Into<&'a str>>(mut self, host: T) -> ValueListBuilder<'a> {
self.list.host = Some(host.into());
self
}
/// The timestamp at which the value was collected. Overrides the default time, which is when
/// collectd receives the values from `submit`. Use only if there is a significant delay is
/// metrics gathering or if submitting values from the past.
pub fn time(mut self, dt: DateTime<Utc>) -> ValueListBuilder<'a> {
self.list.time = Some(dt);
self
}
/// The interval in which new values are to be expected. This is typically handled at a global
/// or plugin level. Use at your own discretion.
pub fn interval(mut self, interval: Duration) -> ValueListBuilder<'a> {
self.list.interval = Some(interval);
self
}
/// Add a metadata entry.
///
/// Multiple entries can be added by calling this method. If the same key is used, only the last
/// entry is kept.
pub fn metadata(mut self, key: &'a str, value: MetaValue) -> ValueListBuilder<'a> {
self.list.meta.insert(key, value);
self
}
/// Submits the observed values to collectd and returns errors if encountered
pub fn submit(self) -> Result<(), SubmitError> {
let mut v: Vec<value_t> = self.list.values.iter().map(|&x| x.into()).collect();
let plugin_instance = self
.list
.plugin_instance
.map(|x| submit_array_res(x, "plugin_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let type_instance = self
.list
.type_instance
.map(|x| submit_array_res(x, "type_instance"))
.unwrap_or_else(|| Ok([0 as c_char; ARR_LENGTH]))?;
let host = self
.list
.host
.map(|x| submit_array_res(x, "host"))
.transpose()?;
// If a custom host is not provided by the plugin, we default to the global
// hostname. In versions prior to collectd 5.7, it was required to propagate the
// global hostname (hostname_g) in the submission. In collectd 5.7, one could
// submit an empty array or hostname_g and they would equate to the same thing. In
// collectd 5.8, hostname_g had the type signature changed so it could no longer be
// submitted and would cause garbage to be read (and thus could have very much
// unintended side effects)
let host = host.unwrap_or([0 as c_char; ARR_LENGTH]);
let len = v.len() as u64;
let plugin = submit_array_res(self.list.plugin, "plugin")?;
let type_ = submit_array_res(self.list.type_, "type")?;
let meta = to_meta_data(&self.list.meta)?;
let list = value_list_t {
values: v.as_mut_ptr(),
values_len: len,
plugin_instance,
plugin,
type_,
type_instance,
host,
time: self.list.time.map(CdTime::from).unwrap_or(CdTime(0)).into(),
interval: self
.list
.interval
.map(CdTime::from)
.unwrap_or(CdTime(0))
.into(),
meta,
};
match unsafe { plugin_dispatch_values(&list) } {
0 => Ok(()),
i => Err(SubmitError::Dispatch(i)),
}
}
}
fn to_meta_data<'a, 'b: 'a, T>(meta_hm: T) -> Result<*mut meta_data_t, SubmitError>
where
T: IntoIterator<Item = (&'a &'b str, &'a MetaValue)>,
{
let meta = unsafe { meta_data_create() };
let conversion_result = to_meta_data_with_meta(meta_hm, meta);
match conversion_result {
Ok(()) => Ok(meta),
Err(error) => {
unsafe {
meta_data_destroy(meta);
}
Err(error)
}
}
}
fn to_meta_data_with_meta<'a, 'b: 'a, T>(
meta_hm: T,
meta: *mut meta_data_t,
) -> Result<(), SubmitError>
where
T: IntoIterator<Item = (&'a &'b str, &'a MetaValue)>,
{
for (key, value) in meta_hm.into_iter() {
let c_key = CString::new(*key).map_err(|e| SubmitError::Field {
name: "meta key",
err: ArrayError::NullPresent(e.nul_position(), key.to_string()),
})?;
match value {
MetaValue::String(str) => {
let c_value = CString::new(str.as_str()).map_err(|e| SubmitError::Field {
name: "meta value",
err: ArrayError::NullPresent(e.nul_position(), str.to_string()),
})?;
unsafe {
meta_data_add_string(meta, c_key.as_ptr(), c_value.as_ptr());
}
}
MetaValue::SignedInt(i) => unsafe {
meta_data_add_signed_int(meta, c_key.as_ptr(), *i);
},
MetaValue::UnsignedInt(u) => unsafe {
meta_data_add_unsigned_int(meta, c_key.as_ptr(), *u);
},
MetaValue::Double(d) => unsafe {
meta_data_add_double(meta, c_key.as_ptr(), *d);
},
MetaValue::Boolean(b) => unsafe {
meta_data_add_boolean(meta, c_key.as_ptr(), *b);
},
}
}
Ok(())
}
fn from_meta_data(
plugin: &str,
meta: *mut meta_data_t,
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
if meta.is_null() {
return Ok(HashMap::new());
}
let mut c_toc: *mut *mut c_char = ptr::null_mut();
let count_or_err = unsafe { meta_data_toc(meta, &mut c_toc as *mut *mut *mut c_char) };
if count_or_err < 0 {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: "toc".to_string(),
msg: "invalid parameters to meta_data_toc",
});
}
let count = count_or_err as usize;
if count == 0 {
return Ok(HashMap::new());
}
let toc = unsafe { slice::from_raw_parts(c_toc, count) };
let conversion_result = from_meta_data_with_toc(plugin, meta, toc);
for c_key_ptr in toc {
unsafe {
libc::free(*c_key_ptr as *mut c_void);
}
}
unsafe {
libc::free(c_toc as *mut c_void);
}
conversion_result
}
fn from_meta_data_with_toc(
plugin: &str,
meta: *mut meta_data_t,
toc: &[*mut c_char],
) -> Result<HashMap<String, MetaValue>, ReceiveError> {
let mut meta_hm = HashMap::with_capacity(toc.len());
for c_key_ptr in toc {
let (c_key, key, value_type) = unsafe {
let c_key: &CStr = CStr::from_ptr(*c_key_ptr);
let key: String = c_key
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata key",
err: e,
})?
.to_string();
let value_type: u32 = meta_data_type(meta, c_key.as_ptr()) as u32;
(c_key, key, value_type)
};
match value_type {
MD_TYPE_BOOLEAN => {
let mut c_value = false;
unsafe {
meta_data_get_boolean(meta, c_key.as_ptr(), &mut c_value as *mut bool);
}
meta_hm.insert(key, MetaValue::Boolean(c_value));
}
MD_TYPE_DOUBLE => {
let mut c_value = 0.0;
unsafe {
meta_data_get_double(meta, c_key.as_ptr(), &mut c_value as *mut f64);
}
meta_hm.insert(key, MetaValue::Double(c_value));
}
MD_TYPE_SIGNED_INT => {
let mut c_value = 0i64;
unsafe {
meta_data_get_signed_int(meta, c_key.as_ptr(), &mut c_value as *mut i64);
}
meta_hm.insert(key, MetaValue::SignedInt(c_value));
}
MD_TYPE_STRING => {
let value: String = unsafe {
let mut c_value: *mut c_char = ptr::null_mut();
meta_data_get_string(meta, c_key.as_ptr(), &mut c_value as *mut *mut c_char);
CStr::from_ptr(c_value)
.to_str()
.map_err(|e| ReceiveError::Utf8 {
plugin: plugin.to_string(),
field: "metadata value",
err: e,
})?
.to_string()
};
meta_hm.insert(key, MetaValue::String(value));
}
MD_TYPE_UNSIGNED_INT => {
let mut c_value = 0u64;
unsafe {
meta_data_get_unsigned_int(meta, c_key.as_ptr(), &mut c_value as *mut u64);
}
meta_hm.insert(key, MetaValue::UnsignedInt(c_value));
}
_ => {
return Err(ReceiveError::Metadata {
plugin: plugin.to_string(),
field: key,
msg: "unknown metadata type",
});
}
}
}
Ok(meta_hm)
}
fn submit_array_res(s: &str, name: &'static str) -> Result<[c_char; ARR_LENGTH], SubmitError> {
to_array_res(s).map_err(|e| SubmitError::Field { name, err: e })
}
/// Collectd stores textual data in fixed sized arrays, so this function will convert a string
/// slice into array compatible with collectd's text fields. Be aware that `ARR_LENGTH` is 64
/// before collectd 5.7
fn to_array_res(s: &str) -> Result<[c_char; ARR_LENGTH], ArrayError> {
// By checking if the length is greater than or *equal* to, we guarantee a trailing null
if s.len() >= ARR_LENGTH {
return Err(ArrayError::TooLong(s.len()));
}
let bytes = s.as_bytes();
// Using memchr to find a null and work around it is 10x faster than
// using a CString to get the bytes_with_nul and cut the time to submit
// values to collectd in half.
if let Some(ind) = memchr(0, bytes) {
return Err(ArrayError::NullPresent(ind, s.to_string()));
}
let mut arr = [0; ARR_LENGTH];
arr[0..bytes.len()].copy_from_slice(bytes);
Ok(unsafe { ::std::mem::transmute(arr) })
}
fn receive_array<'a>(
s: &'a [c_char; ARR_LENGTH],
plugin: &str,
field: &'static str,
) -> Result<&'a str, ReceiveError> {
from_array(s).map_err(|e| ReceiveError::Utf8 {
plugin: String::from(plugin),
field,
err: e,
})
}
/// Turns a fixed size character array into string slice, if possible
pub fn from_array(s: &[c_char; ARR_LENGTH]) -> Result<&str, Utf8Error> {
unsafe {
let a = s as *const [c_char; ARR_LENGTH] as *const c_char;
CStr::from_ptr(a).to_str()
}
}
/// Returns if the string is empty or not
pub fn empty_to_none(s: &str) -> Option<&str> {
if s.is_empty() {
None
} else {
Some(s)
}
}
pub fn length(len: u64) -> usize {
len as usize
}
pub fn get_default_interval() -> u64 {
0
}
#[cfg(test)]
mod tests {
use self::cdtime::nanos_to_collectd;
use super::*;
use crate::bindings::data_source_t;
use std::os::raw::c_char;
#[test]
fn test_empty_to_none() {
assert_eq!(None, empty_to_none(""));
let s = "hi";
assert_eq!(Some("hi"), empty_to_none(s));
}
#[test]
fn test_from_array() {
let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
assert_eq!(Ok("hi"), from_array(&name));
}
#[test]
fn test_to_array() {
let actual = to_array_res("Hi");
assert!(actual.is_ok());
assert_eq!(&actual.unwrap()[..2], &[b'H' as c_char, b'i' as c_char]);
}
#[test]
fn test_to_array_res_nul() {
let actual = to_array_res("hi\0");
assert!(actual.is_err());
}
#[test]
fn test_to_array_res_too_long() {
let actual = to_array_res(
"Hello check this out, I am a long string and there is no signs of stopping; well, maybe one day I will stop when I get too longggggggggggggggggggggggggggggggggggg",
);
assert!(actual.is_err());
}
#[test]
fn test_submit() {
let values = vec![Value::Gauge(15.0), Value::Gauge(10.0), Value::Gauge(12.0)];
let result = ValueListBuilder::new("my-plugin", "load")
.values(&values)
.submit();
assert_eq!(result.unwrap(), ());
}
#[test]
fn test_recv_value_list_conversion() {
let empty: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
let mut metric: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
metric[0] = b'h' as c_char;
metric[1] = b'o' as c_char;
let mut name: [c_char; ARR_LENGTH] = [0; ARR_LENGTH];
name[0] = b'h' as c_char;
name[1] = b'i' as c_char;
let val = data_source_t {
name,
type_: DS_TYPE_GAUGE as i32,
min: 10.0,
max: 11.0,
};
let mut v = vec![val];
let conv = data_set_t {
type_: metric,
ds_num: 1,
ds: v.as_mut_ptr(),
};
let mut vs = vec![value_t { gauge: 3.0 }];
let list_t = value_list_t {
values: vs.as_mut_ptr(),
values_len: 1,
time: nanos_to_collectd(1_000_000_000),
interval: nanos_to_collectd(1_000_000_000),
host: metric,
plugin: name,
plugin_instance: metric,
type_: metric,
type_instance: empty,
meta: ptr::null_mut(),
};
let actual = ValueList::from(&conv, &list_t).unwrap();
assert_eq!(
actual,
ValueList {
values: vec![ValueReport {
name: "hi",
value: Value::Gauge(3.0),
min: 10.0,
max: 11.0,
}],
plugin_instance: Some("ho"),
plugin: "hi",
type_: "ho",
type_instance: None,
host: "ho",
time: Utc.ymd(1970, 1, 1).and_hms(0, 0, 1),
interval: Duration::seconds(1),
original_list: &list_t,
original_set: &conv,
meta: HashMap::new(),
}
);
}
}
| ValueReport | identifier_name |
input.ts | import { get } from "svelte/store";
import { type DialogState } from "@graphite/state-providers/dialog";
import { type DocumentState } from "@graphite/state-providers/document";
import { type FullscreenState } from "@graphite/state-providers/fullscreen";
import { type PortfolioState } from "@graphite/state-providers/portfolio";
import { makeKeyboardModifiersBitfield, textInputCleanup, getLocalizedScanCode } from "@graphite/utility-functions/keyboard-entry";
import { platformIsMac } from "@graphite/utility-functions/platform";
import { extractPixelData } from "@graphite/utility-functions/rasterization";
import { stripIndents } from "@graphite/utility-functions/strip-indents";
import { type Editor } from "@graphite/wasm-communication/editor";
import { TriggerPaste } from "@graphite/wasm-communication/messages";
type EventName = keyof HTMLElementEventMap | keyof WindowEventHandlersEventMap | "modifyinputfield";
type EventListenerTarget = {
addEventListener: typeof window.addEventListener;
removeEventListener: typeof window.removeEventListener;
};
export function createInputManager(editor: Editor, dialog: DialogState, portfolio: PortfolioState, document: DocumentState, fullscreen: FullscreenState): () => void {
const app = window.document.querySelector("[data-app-container]") as HTMLElement | undefined;
app?.focus();
let viewportPointerInteractionOngoing = false;
let textToolInteractiveInputElement = undefined as undefined | HTMLDivElement;
let canvasFocused = true;
// Event listeners
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const listeners: { target: EventListenerTarget; eventName: EventName; action: (event: any) => void; options?: AddEventListenerOptions }[] = [
{ target: window, eventName: "resize", action: () => onWindowResize(window.document.body) },
{ target: window, eventName: "beforeunload", action: (e: BeforeUnloadEvent) => onBeforeUnload(e) },
{ target: window, eventName: "keyup", action: (e: KeyboardEvent) => onKeyUp(e) },
{ target: window, eventName: "keydown", action: (e: KeyboardEvent) => onKeyDown(e) },
{ target: window, eventName: "pointermove", action: (e: PointerEvent) => onPointerMove(e) },
{ target: window, eventName: "pointerdown", action: (e: PointerEvent) => onPointerDown(e) },
{ target: window, eventName: "pointerup", action: (e: PointerEvent) => onPointerUp(e) },
{ target: window, eventName: "mousedown", action: (e: MouseEvent) => onMouseDown(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function onPointerMove(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) |
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
// We keep a copy of the text input element to check against when it's active for text entry.
function onModifyInputField(e: CustomEvent): void {
textToolInteractiveInputElement = e.detail;
}
// Window events
function onWindowResize(container: HTMLElement): void {
const viewports = Array.from(container.querySelectorAll("[data-viewport]"));
const boundsOfViewports = viewports.map((canvas) => {
const bounds = canvas.getBoundingClientRect();
return [bounds.left, bounds.top, bounds.right, bounds.bottom];
});
const flattened = boundsOfViewports.flat();
const data = Float64Array.from(flattened);
if (boundsOfViewports.length > 0) editor.instance.boundsOfViewports(data);
}
async function onBeforeUnload(e: BeforeUnloadEvent): Promise<void> {
const activeDocument = get(portfolio).documents[get(portfolio).activeDocumentIndex];
if (activeDocument && !activeDocument.isAutoSaved) editor.instance.triggerAutoSave(activeDocument.id);
// Skip the message if the editor crashed, since work is already lost
if (await editor.instance.hasCrashed()) return;
// Skip the message during development, since it's annoying when testing
if (await editor.instance.inDevelopmentMode()) return;
const allDocumentsSaved = get(portfolio).documents.reduce((acc, doc) => acc && doc.isSaved, true);
if (!allDocumentsSaved) {
e.returnValue = "Unsaved work will be lost if the web browser tab is closed. Close anyway?";
e.preventDefault();
}
}
function onPaste(e: ClipboardEvent): void {
const dataTransfer = e.clipboardData;
if (!dataTransfer || targetIsTextField(e.target || undefined)) return;
e.preventDefault();
Array.from(dataTransfer.items).forEach((item) => {
if (item.type === "text/plain") {
item.getAsString((text) => {
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
} else if (text.startsWith("graphite/nodes: ")) {
editor.instance.pasteSerializedNodes(text.substring(16, text.length));
}
});
}
const file = item.getAsFile();
if (file?.type.startsWith("image")) {
extractPixelData(file).then((imageData): void => {
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
});
}
});
}
// Frontend message subscriptions
editor.subscriptions.subscribeJsMessage(TriggerPaste, async () => {
// In the try block, attempt to read from the Clipboard API, which may not have permission and may not be supported in all browsers
// In the catch block, explain to the user why the paste failed and how to fix or work around the problem
try {
// Attempt to check if the clipboard permission is denied, and throw an error if that is the case
// In Firefox, the `clipboard-read` permission isn't supported, so attempting to query it throws an error
// In Safari, the entire Permissions API isn't supported, so the query never occurs and this block is skipped without an error and we assume we might have permission
const clipboardRead = "clipboard-read" as PermissionName;
const permission = await navigator.permissions?.query({ name: clipboardRead });
if (permission?.state === "denied") throw new Error("Permission denied");
// Read the clipboard contents if the Clipboard API is available
const clipboardItems = await navigator.clipboard.read();
if (!clipboardItems) throw new Error("Clipboard API unsupported");
// Read any layer data or images from the clipboard
Array.from(clipboardItems).forEach(async (item) => {
// Read plain text and, if it is a layer, pass it to the editor
if (item.types.includes("text/plain")) {
const blob = await item.getType("text/plain");
const reader = new FileReader();
reader.onload = (): void => {
const text = reader.result as string;
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
}
};
reader.readAsText(blob);
}
// Read an image from the clipboard and pass it to the editor to be loaded
const imageType = item.types.find((type) => type.startsWith("image/"));
if (imageType) {
const blob = await item.getType(imageType);
const reader = new FileReader();
reader.onload = async (): Promise<void> => {
if (reader.result instanceof ArrayBuffer) {
const imageData = await extractPixelData(new Blob([reader.result], { type: imageType }));
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
}
};
reader.readAsArrayBuffer(blob);
}
});
} catch (err) {
const unsupported = stripIndents`
This browser does not support reading from the clipboard.
Use the keyboard shortcut to paste instead.
`;
const denied = stripIndents`
The browser's clipboard permission has been denied.
Open the browser's website settings (usually accessible
just left of the URL) to allow this permission.
`;
const matchMessage = {
"clipboard-read": unsupported,
"Clipboard API unsupported": unsupported,
"Permission denied": denied,
};
const message = Object.entries(matchMessage).find(([key]) => String(err).includes(key))?.[1] || String(err);
editor.instance.errorDialog("Cannot access clipboard", message);
}
});
// Initialization
// Bind the event listeners
bindListeners();
// Resize on creation
onWindowResize(window.document.body);
// Return the destructor
return unbindListeners;
}
function targetIsTextField(target: EventTarget | HTMLElement | undefined): boolean {
return target instanceof HTMLElement && (target.nodeName === "INPUT" || target.nodeName === "TEXTAREA" || target.isContentEditable);
}
| {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
} | conditional_block |
input.ts | import { get } from "svelte/store";
import { type DialogState } from "@graphite/state-providers/dialog";
import { type DocumentState } from "@graphite/state-providers/document";
import { type FullscreenState } from "@graphite/state-providers/fullscreen";
import { type PortfolioState } from "@graphite/state-providers/portfolio";
import { makeKeyboardModifiersBitfield, textInputCleanup, getLocalizedScanCode } from "@graphite/utility-functions/keyboard-entry";
import { platformIsMac } from "@graphite/utility-functions/platform";
import { extractPixelData } from "@graphite/utility-functions/rasterization";
import { stripIndents } from "@graphite/utility-functions/strip-indents";
import { type Editor } from "@graphite/wasm-communication/editor";
import { TriggerPaste } from "@graphite/wasm-communication/messages";
type EventName = keyof HTMLElementEventMap | keyof WindowEventHandlersEventMap | "modifyinputfield";
type EventListenerTarget = {
addEventListener: typeof window.addEventListener;
removeEventListener: typeof window.removeEventListener;
};
export function createInputManager(editor: Editor, dialog: DialogState, portfolio: PortfolioState, document: DocumentState, fullscreen: FullscreenState): () => void {
const app = window.document.querySelector("[data-app-container]") as HTMLElement | undefined;
app?.focus();
let viewportPointerInteractionOngoing = false;
let textToolInteractiveInputElement = undefined as undefined | HTMLDivElement;
let canvasFocused = true;
// Event listeners
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const listeners: { target: EventListenerTarget; eventName: EventName; action: (event: any) => void; options?: AddEventListenerOptions }[] = [
{ target: window, eventName: "resize", action: () => onWindowResize(window.document.body) },
{ target: window, eventName: "beforeunload", action: (e: BeforeUnloadEvent) => onBeforeUnload(e) },
{ target: window, eventName: "keyup", action: (e: KeyboardEvent) => onKeyUp(e) },
{ target: window, eventName: "keydown", action: (e: KeyboardEvent) => onKeyDown(e) },
{ target: window, eventName: "pointermove", action: (e: PointerEvent) => onPointerMove(e) },
{ target: window, eventName: "pointerdown", action: (e: PointerEvent) => onPointerDown(e) },
{ target: window, eventName: "pointerup", action: (e: PointerEvent) => onPointerUp(e) },
{ target: window, eventName: "mousedown", action: (e: MouseEvent) => onMouseDown(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> |
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function onPointerMove(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
// We keep a copy of the text input element to check against when it's active for text entry.
function onModifyInputField(e: CustomEvent): void {
textToolInteractiveInputElement = e.detail;
}
// Window events
function onWindowResize(container: HTMLElement): void {
const viewports = Array.from(container.querySelectorAll("[data-viewport]"));
const boundsOfViewports = viewports.map((canvas) => {
const bounds = canvas.getBoundingClientRect();
return [bounds.left, bounds.top, bounds.right, bounds.bottom];
});
const flattened = boundsOfViewports.flat();
const data = Float64Array.from(flattened);
if (boundsOfViewports.length > 0) editor.instance.boundsOfViewports(data);
}
async function onBeforeUnload(e: BeforeUnloadEvent): Promise<void> {
const activeDocument = get(portfolio).documents[get(portfolio).activeDocumentIndex];
if (activeDocument && !activeDocument.isAutoSaved) editor.instance.triggerAutoSave(activeDocument.id);
// Skip the message if the editor crashed, since work is already lost
if (await editor.instance.hasCrashed()) return;
// Skip the message during development, since it's annoying when testing
if (await editor.instance.inDevelopmentMode()) return;
const allDocumentsSaved = get(portfolio).documents.reduce((acc, doc) => acc && doc.isSaved, true);
if (!allDocumentsSaved) {
e.returnValue = "Unsaved work will be lost if the web browser tab is closed. Close anyway?";
e.preventDefault();
}
}
function onPaste(e: ClipboardEvent): void {
const dataTransfer = e.clipboardData;
if (!dataTransfer || targetIsTextField(e.target || undefined)) return;
e.preventDefault();
Array.from(dataTransfer.items).forEach((item) => {
if (item.type === "text/plain") {
item.getAsString((text) => {
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
} else if (text.startsWith("graphite/nodes: ")) {
editor.instance.pasteSerializedNodes(text.substring(16, text.length));
}
});
}
const file = item.getAsFile();
if (file?.type.startsWith("image")) {
extractPixelData(file).then((imageData): void => {
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
});
}
});
}
// Frontend message subscriptions
editor.subscriptions.subscribeJsMessage(TriggerPaste, async () => {
// In the try block, attempt to read from the Clipboard API, which may not have permission and may not be supported in all browsers
// In the catch block, explain to the user why the paste failed and how to fix or work around the problem
try {
// Attempt to check if the clipboard permission is denied, and throw an error if that is the case
// In Firefox, the `clipboard-read` permission isn't supported, so attempting to query it throws an error
// In Safari, the entire Permissions API isn't supported, so the query never occurs and this block is skipped without an error and we assume we might have permission
const clipboardRead = "clipboard-read" as PermissionName;
const permission = await navigator.permissions?.query({ name: clipboardRead });
if (permission?.state === "denied") throw new Error("Permission denied");
// Read the clipboard contents if the Clipboard API is available
const clipboardItems = await navigator.clipboard.read();
if (!clipboardItems) throw new Error("Clipboard API unsupported");
// Read any layer data or images from the clipboard
Array.from(clipboardItems).forEach(async (item) => {
// Read plain text and, if it is a layer, pass it to the editor
if (item.types.includes("text/plain")) {
const blob = await item.getType("text/plain");
const reader = new FileReader();
reader.onload = (): void => {
const text = reader.result as string;
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
}
};
reader.readAsText(blob);
}
// Read an image from the clipboard and pass it to the editor to be loaded
const imageType = item.types.find((type) => type.startsWith("image/"));
if (imageType) {
const blob = await item.getType(imageType);
const reader = new FileReader();
reader.onload = async (): Promise<void> => {
if (reader.result instanceof ArrayBuffer) {
const imageData = await extractPixelData(new Blob([reader.result], { type: imageType }));
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
}
};
reader.readAsArrayBuffer(blob);
}
});
} catch (err) {
const unsupported = stripIndents`
This browser does not support reading from the clipboard.
Use the keyboard shortcut to paste instead.
`;
const denied = stripIndents`
The browser's clipboard permission has been denied.
Open the browser's website settings (usually accessible
just left of the URL) to allow this permission.
`;
const matchMessage = {
"clipboard-read": unsupported,
"Clipboard API unsupported": unsupported,
"Permission denied": denied,
};
const message = Object.entries(matchMessage).find(([key]) => String(err).includes(key))?.[1] || String(err);
editor.instance.errorDialog("Cannot access clipboard", message);
}
});
// Initialization
// Bind the event listeners
bindListeners();
// Resize on creation
onWindowResize(window.document.body);
// Return the destructor
return unbindListeners;
}
function targetIsTextField(target: EventTarget | HTMLElement | undefined): boolean {
return target instanceof HTMLElement && (target.nodeName === "INPUT" || target.nodeName === "TEXTAREA" || target.isContentEditable);
}
| {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
} | identifier_body |
input.ts | import { get } from "svelte/store";
import { type DialogState } from "@graphite/state-providers/dialog";
import { type DocumentState } from "@graphite/state-providers/document";
import { type FullscreenState } from "@graphite/state-providers/fullscreen";
import { type PortfolioState } from "@graphite/state-providers/portfolio";
import { makeKeyboardModifiersBitfield, textInputCleanup, getLocalizedScanCode } from "@graphite/utility-functions/keyboard-entry";
import { platformIsMac } from "@graphite/utility-functions/platform";
import { extractPixelData } from "@graphite/utility-functions/rasterization";
import { stripIndents } from "@graphite/utility-functions/strip-indents";
import { type Editor } from "@graphite/wasm-communication/editor";
import { TriggerPaste } from "@graphite/wasm-communication/messages";
type EventName = keyof HTMLElementEventMap | keyof WindowEventHandlersEventMap | "modifyinputfield";
type EventListenerTarget = {
addEventListener: typeof window.addEventListener;
removeEventListener: typeof window.removeEventListener;
};
export function createInputManager(editor: Editor, dialog: DialogState, portfolio: PortfolioState, document: DocumentState, fullscreen: FullscreenState): () => void {
const app = window.document.querySelector("[data-app-container]") as HTMLElement | undefined;
app?.focus();
let viewportPointerInteractionOngoing = false;
let textToolInteractiveInputElement = undefined as undefined | HTMLDivElement;
let canvasFocused = true;
// Event listeners
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const listeners: { target: EventListenerTarget; eventName: EventName; action: (event: any) => void; options?: AddEventListenerOptions }[] = [
{ target: window, eventName: "resize", action: () => onWindowResize(window.document.body) },
{ target: window, eventName: "beforeunload", action: (e: BeforeUnloadEvent) => onBeforeUnload(e) },
{ target: window, eventName: "keyup", action: (e: KeyboardEvent) => onKeyUp(e) },
{ target: window, eventName: "keydown", action: (e: KeyboardEvent) => onKeyDown(e) },
{ target: window, eventName: "pointermove", action: (e: PointerEvent) => onPointerMove(e) },
{ target: window, eventName: "pointerdown", action: (e: PointerEvent) => onPointerDown(e) },
{ target: window, eventName: "pointerup", action: (e: PointerEvent) => onPointerUp(e) },
{ target: window, eventName: "mousedown", action: (e: MouseEvent) => onMouseDown(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function onPointerMove(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
// We keep a copy of the text input element to check against when it's active for text entry.
function onModifyInputField(e: CustomEvent): void {
textToolInteractiveInputElement = e.detail;
}
// Window events
function onWindowResize(container: HTMLElement): void {
const viewports = Array.from(container.querySelectorAll("[data-viewport]"));
const boundsOfViewports = viewports.map((canvas) => {
const bounds = canvas.getBoundingClientRect();
return [bounds.left, bounds.top, bounds.right, bounds.bottom];
});
const flattened = boundsOfViewports.flat();
const data = Float64Array.from(flattened);
if (boundsOfViewports.length > 0) editor.instance.boundsOfViewports(data);
}
async function onBeforeUnload(e: BeforeUnloadEvent): Promise<void> {
const activeDocument = get(portfolio).documents[get(portfolio).activeDocumentIndex];
if (activeDocument && !activeDocument.isAutoSaved) editor.instance.triggerAutoSave(activeDocument.id);
// Skip the message if the editor crashed, since work is already lost
if (await editor.instance.hasCrashed()) return;
// Skip the message during development, since it's annoying when testing
if (await editor.instance.inDevelopmentMode()) return;
const allDocumentsSaved = get(portfolio).documents.reduce((acc, doc) => acc && doc.isSaved, true);
if (!allDocumentsSaved) {
e.returnValue = "Unsaved work will be lost if the web browser tab is closed. Close anyway?";
e.preventDefault();
}
}
function onPaste(e: ClipboardEvent): void {
const dataTransfer = e.clipboardData;
if (!dataTransfer || targetIsTextField(e.target || undefined)) return;
e.preventDefault();
Array.from(dataTransfer.items).forEach((item) => {
if (item.type === "text/plain") { | editor.instance.pasteSerializedData(text.substring(16, text.length));
} else if (text.startsWith("graphite/nodes: ")) {
editor.instance.pasteSerializedNodes(text.substring(16, text.length));
}
});
}
const file = item.getAsFile();
if (file?.type.startsWith("image")) {
extractPixelData(file).then((imageData): void => {
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
});
}
});
}
// Frontend message subscriptions
editor.subscriptions.subscribeJsMessage(TriggerPaste, async () => {
// In the try block, attempt to read from the Clipboard API, which may not have permission and may not be supported in all browsers
// In the catch block, explain to the user why the paste failed and how to fix or work around the problem
try {
// Attempt to check if the clipboard permission is denied, and throw an error if that is the case
// In Firefox, the `clipboard-read` permission isn't supported, so attempting to query it throws an error
// In Safari, the entire Permissions API isn't supported, so the query never occurs and this block is skipped without an error and we assume we might have permission
const clipboardRead = "clipboard-read" as PermissionName;
const permission = await navigator.permissions?.query({ name: clipboardRead });
if (permission?.state === "denied") throw new Error("Permission denied");
// Read the clipboard contents if the Clipboard API is available
const clipboardItems = await navigator.clipboard.read();
if (!clipboardItems) throw new Error("Clipboard API unsupported");
// Read any layer data or images from the clipboard
Array.from(clipboardItems).forEach(async (item) => {
// Read plain text and, if it is a layer, pass it to the editor
if (item.types.includes("text/plain")) {
const blob = await item.getType("text/plain");
const reader = new FileReader();
reader.onload = (): void => {
const text = reader.result as string;
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
}
};
reader.readAsText(blob);
}
// Read an image from the clipboard and pass it to the editor to be loaded
const imageType = item.types.find((type) => type.startsWith("image/"));
if (imageType) {
const blob = await item.getType(imageType);
const reader = new FileReader();
reader.onload = async (): Promise<void> => {
if (reader.result instanceof ArrayBuffer) {
const imageData = await extractPixelData(new Blob([reader.result], { type: imageType }));
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
}
};
reader.readAsArrayBuffer(blob);
}
});
} catch (err) {
const unsupported = stripIndents`
This browser does not support reading from the clipboard.
Use the keyboard shortcut to paste instead.
`;
const denied = stripIndents`
The browser's clipboard permission has been denied.
Open the browser's website settings (usually accessible
just left of the URL) to allow this permission.
`;
const matchMessage = {
"clipboard-read": unsupported,
"Clipboard API unsupported": unsupported,
"Permission denied": denied,
};
const message = Object.entries(matchMessage).find(([key]) => String(err).includes(key))?.[1] || String(err);
editor.instance.errorDialog("Cannot access clipboard", message);
}
});
// Initialization
// Bind the event listeners
bindListeners();
// Resize on creation
onWindowResize(window.document.body);
// Return the destructor
return unbindListeners;
}
function targetIsTextField(target: EventTarget | HTMLElement | undefined): boolean {
return target instanceof HTMLElement && (target.nodeName === "INPUT" || target.nodeName === "TEXTAREA" || target.isContentEditable);
} | item.getAsString((text) => {
if (text.startsWith("graphite/layer: ")) { | random_line_split |
input.ts | import { get } from "svelte/store";
import { type DialogState } from "@graphite/state-providers/dialog";
import { type DocumentState } from "@graphite/state-providers/document";
import { type FullscreenState } from "@graphite/state-providers/fullscreen";
import { type PortfolioState } from "@graphite/state-providers/portfolio";
import { makeKeyboardModifiersBitfield, textInputCleanup, getLocalizedScanCode } from "@graphite/utility-functions/keyboard-entry";
import { platformIsMac } from "@graphite/utility-functions/platform";
import { extractPixelData } from "@graphite/utility-functions/rasterization";
import { stripIndents } from "@graphite/utility-functions/strip-indents";
import { type Editor } from "@graphite/wasm-communication/editor";
import { TriggerPaste } from "@graphite/wasm-communication/messages";
type EventName = keyof HTMLElementEventMap | keyof WindowEventHandlersEventMap | "modifyinputfield";
type EventListenerTarget = {
addEventListener: typeof window.addEventListener;
removeEventListener: typeof window.removeEventListener;
};
export function createInputManager(editor: Editor, dialog: DialogState, portfolio: PortfolioState, document: DocumentState, fullscreen: FullscreenState): () => void {
const app = window.document.querySelector("[data-app-container]") as HTMLElement | undefined;
app?.focus();
let viewportPointerInteractionOngoing = false;
let textToolInteractiveInputElement = undefined as undefined | HTMLDivElement;
let canvasFocused = true;
// Event listeners
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const listeners: { target: EventListenerTarget; eventName: EventName; action: (event: any) => void; options?: AddEventListenerOptions }[] = [
{ target: window, eventName: "resize", action: () => onWindowResize(window.document.body) },
{ target: window, eventName: "beforeunload", action: (e: BeforeUnloadEvent) => onBeforeUnload(e) },
{ target: window, eventName: "keyup", action: (e: KeyboardEvent) => onKeyUp(e) },
{ target: window, eventName: "keydown", action: (e: KeyboardEvent) => onKeyDown(e) },
{ target: window, eventName: "pointermove", action: (e: PointerEvent) => onPointerMove(e) },
{ target: window, eventName: "pointerdown", action: (e: PointerEvent) => onPointerDown(e) },
{ target: window, eventName: "pointerup", action: (e: PointerEvent) => onPointerUp(e) },
{ target: window, eventName: "mousedown", action: (e: MouseEvent) => onMouseDown(e) },
{ target: window, eventName: "mouseup", action: (e: MouseEvent) => onPotentialDoubleClick(e) },
{ target: window, eventName: "wheel", action: (e: WheelEvent) => onWheelScroll(e), options: { passive: false } },
{ target: window, eventName: "modifyinputfield", action: (e: CustomEvent) => onModifyInputField(e) },
{ target: window, eventName: "focusout", action: () => (canvasFocused = false) },
{ target: window.document, eventName: "contextmenu", action: (e: MouseEvent) => onContextMenu(e) },
{ target: window.document, eventName: "fullscreenchange", action: () => fullscreen.fullscreenModeChanged() },
{ target: window.document.body, eventName: "paste", action: (e: ClipboardEvent) => onPaste(e) },
];
// Event bindings
function bindListeners(): void {
// Add event bindings for the lifetime of the application
listeners.forEach(({ target, eventName, action, options }) => target.addEventListener(eventName, action, options));
}
function unbindListeners(): void {
// Remove event bindings after the lifetime of the application (or on hot-module replacement during development)
listeners.forEach(({ target, eventName, action, options }) => target.removeEventListener(eventName, action, options));
}
// Keyboard events
async function shouldRedirectKeyboardEventToBackend(e: KeyboardEvent): Promise<boolean> {
// Don't redirect when a modal is covering the workspace
if (get(dialog).visible) return false;
const key = await getLocalizedScanCode(e);
// TODO: Switch to a system where everything is sent to the backend, then the input preprocessor makes decisions and kicks some inputs back to the frontend
const accelKey = platformIsMac() ? e.metaKey : e.ctrlKey;
// Don't redirect user input from text entry into HTML elements
if (targetIsTextField(e.target || undefined) && key !== "Escape" && !(accelKey && ["Enter", "NumpadEnter"].includes(key))) return false;
// Don't redirect paste
if (key === "KeyV" && accelKey) return false;
// Don't redirect a fullscreen request
if (key === "F11" && e.type === "keydown" && !e.repeat) {
e.preventDefault();
fullscreen.toggleFullscreen();
return false;
}
// Don't redirect a reload request
if (key === "F5") return false;
if (key === "KeyR" && accelKey) return false;
// Don't redirect debugging tools
if (["F12", "F8"].includes(key)) return false;
if (["KeyC", "KeyI", "KeyJ"].includes(key) && accelKey && e.shiftKey) return false;
// Don't redirect tab or enter if not in canvas (to allow navigating elements)
if (!canvasFocused && !targetIsTextField(e.target || undefined) && ["Tab", "Enter", "NumpadEnter", "Space", "ArrowDown", "ArrowLeft", "ArrowRight", "ArrowUp"].includes(key)) return false;
// Redirect to the backend
return true;
}
async function onKeyDown(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
const NO_KEY_REPEAT_MODIFIER_KEYS = ["ControlLeft", "ControlRight", "ShiftLeft", "ShiftRight", "MetaLeft", "MetaRight", "AltLeft", "AltRight", "AltGraph", "CapsLock", "Fn", "FnLock"];
if (e.repeat && NO_KEY_REPEAT_MODIFIER_KEYS.includes(key)) return;
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyDown(key, modifiers, e.repeat);
return;
}
if (get(dialog).visible && key === "Escape") {
dialog.dismissDialog();
}
}
async function onKeyUp(e: KeyboardEvent): Promise<void> {
const key = await getLocalizedScanCode(e);
if (await shouldRedirectKeyboardEventToBackend(e)) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onKeyUp(key, modifiers, e.repeat);
}
}
// Pointer events
// While any pointer button is already down, additional button down events are not reported, but they are sent as `pointermove` events and these are handled in the backend
function | (e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
// Don't redirect pointer movement to the backend if there's no ongoing interaction and it's over a floating menu, or the graph overlay, on top of the canvas
// TODO: A better approach is to pass along a boolean to the backend's input preprocessor so it can know if it's being occluded by the GUI.
// TODO: This would allow it to properly decide to act on removing hover focus from something that was hovered in the canvas before moving over the GUI.
// TODO: Further explanation: https://github.com/GraphiteEditor/Graphite/pull/623#discussion_r866436197
const inFloatingMenu = e.target instanceof Element && e.target.closest("[data-floating-menu-content]");
const inGraphOverlay = get(document).graphViewOverlayOpen;
if (!viewportPointerInteractionOngoing && (inFloatingMenu || inGraphOverlay)) return;
const { target } = e;
const newInCanvasArea = (target instanceof Element && target.closest("[data-viewport], [data-graph]")) instanceof Element && !targetIsTextField(window.document.activeElement || undefined);
if (newInCanvasArea && !canvasFocused) {
canvasFocused = true;
app?.focus();
}
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseMove(e.clientX, e.clientY, e.buttons, modifiers);
}
function onMouseDown(e: MouseEvent): void {
// Block middle mouse button auto-scroll mode (the circlar gizmo that appears and allows quick scrolling by moving the cursor above or below it)
if (e.button === 1) e.preventDefault();
}
function onPointerDown(e: PointerEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
const inDialog = target instanceof Element && target.closest("[data-dialog-modal] [data-floating-menu-content]");
const inTextInput = target === textToolInteractiveInputElement;
if (get(dialog).visible && !inDialog) {
dialog.dismissDialog();
e.preventDefault();
e.stopPropagation();
}
if (!inTextInput) {
if (textToolInteractiveInputElement) editor.instance.onChangeText(textInputCleanup(textToolInteractiveInputElement.innerText));
else viewportPointerInteractionOngoing = isTargetingCanvas instanceof Element;
}
if (viewportPointerInteractionOngoing) {
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseDown(e.clientX, e.clientY, e.buttons, modifiers);
}
}
function onPointerUp(e: PointerEvent): void {
if (!e.buttons) viewportPointerInteractionOngoing = false;
if (textToolInteractiveInputElement) return;
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onMouseUp(e.clientX, e.clientY, e.buttons, modifiers);
}
function onPotentialDoubleClick(e: MouseEvent): void {
if (textToolInteractiveInputElement) return;
// Allow only double-clicks
if (e.detail !== 2) return;
// `e.buttons` is always 0 in the `mouseup` event, so we have to convert from `e.button` instead
let buttons = 1;
if (e.button === 0) buttons = 1; // LMB
if (e.button === 1) buttons = 4; // MMB
if (e.button === 2) buttons = 2; // RMB
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onDoubleClick(e.clientX, e.clientY, buttons, modifiers);
}
// Mouse events
function onWheelScroll(e: WheelEvent): void {
const { target } = e;
const isTargetingCanvas = target instanceof Element && target.closest("[data-viewport]");
// Redirect vertical scroll wheel movement into a horizontal scroll on a horizontally scrollable element
// There seems to be no possible way to properly employ the browser's smooth scrolling interpolation
const horizontalScrollableElement = target instanceof Element && target.closest("[data-scrollable-x]");
if (horizontalScrollableElement && e.deltaY !== 0) {
horizontalScrollableElement.scrollTo(horizontalScrollableElement.scrollLeft + e.deltaY, 0);
return;
}
if (isTargetingCanvas) {
e.preventDefault();
const modifiers = makeKeyboardModifiersBitfield(e);
editor.instance.onWheelScroll(e.clientX, e.clientY, e.buttons, e.deltaX, e.deltaY, e.deltaZ, modifiers);
}
}
function onContextMenu(e: MouseEvent): void {
if (!targetIsTextField(e.target || undefined) && e.target !== textToolInteractiveInputElement) {
e.preventDefault();
}
}
// Receives a custom event dispatched when the user begins interactively editing with the text tool.
// We keep a copy of the text input element to check against when it's active for text entry.
function onModifyInputField(e: CustomEvent): void {
textToolInteractiveInputElement = e.detail;
}
// Window events
function onWindowResize(container: HTMLElement): void {
const viewports = Array.from(container.querySelectorAll("[data-viewport]"));
const boundsOfViewports = viewports.map((canvas) => {
const bounds = canvas.getBoundingClientRect();
return [bounds.left, bounds.top, bounds.right, bounds.bottom];
});
const flattened = boundsOfViewports.flat();
const data = Float64Array.from(flattened);
if (boundsOfViewports.length > 0) editor.instance.boundsOfViewports(data);
}
async function onBeforeUnload(e: BeforeUnloadEvent): Promise<void> {
const activeDocument = get(portfolio).documents[get(portfolio).activeDocumentIndex];
if (activeDocument && !activeDocument.isAutoSaved) editor.instance.triggerAutoSave(activeDocument.id);
// Skip the message if the editor crashed, since work is already lost
if (await editor.instance.hasCrashed()) return;
// Skip the message during development, since it's annoying when testing
if (await editor.instance.inDevelopmentMode()) return;
const allDocumentsSaved = get(portfolio).documents.reduce((acc, doc) => acc && doc.isSaved, true);
if (!allDocumentsSaved) {
e.returnValue = "Unsaved work will be lost if the web browser tab is closed. Close anyway?";
e.preventDefault();
}
}
function onPaste(e: ClipboardEvent): void {
const dataTransfer = e.clipboardData;
if (!dataTransfer || targetIsTextField(e.target || undefined)) return;
e.preventDefault();
Array.from(dataTransfer.items).forEach((item) => {
if (item.type === "text/plain") {
item.getAsString((text) => {
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
} else if (text.startsWith("graphite/nodes: ")) {
editor.instance.pasteSerializedNodes(text.substring(16, text.length));
}
});
}
const file = item.getAsFile();
if (file?.type.startsWith("image")) {
extractPixelData(file).then((imageData): void => {
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
});
}
});
}
// Frontend message subscriptions
editor.subscriptions.subscribeJsMessage(TriggerPaste, async () => {
// In the try block, attempt to read from the Clipboard API, which may not have permission and may not be supported in all browsers
// In the catch block, explain to the user why the paste failed and how to fix or work around the problem
try {
// Attempt to check if the clipboard permission is denied, and throw an error if that is the case
// In Firefox, the `clipboard-read` permission isn't supported, so attempting to query it throws an error
// In Safari, the entire Permissions API isn't supported, so the query never occurs and this block is skipped without an error and we assume we might have permission
const clipboardRead = "clipboard-read" as PermissionName;
const permission = await navigator.permissions?.query({ name: clipboardRead });
if (permission?.state === "denied") throw new Error("Permission denied");
// Read the clipboard contents if the Clipboard API is available
const clipboardItems = await navigator.clipboard.read();
if (!clipboardItems) throw new Error("Clipboard API unsupported");
// Read any layer data or images from the clipboard
Array.from(clipboardItems).forEach(async (item) => {
// Read plain text and, if it is a layer, pass it to the editor
if (item.types.includes("text/plain")) {
const blob = await item.getType("text/plain");
const reader = new FileReader();
reader.onload = (): void => {
const text = reader.result as string;
if (text.startsWith("graphite/layer: ")) {
editor.instance.pasteSerializedData(text.substring(16, text.length));
}
};
reader.readAsText(blob);
}
// Read an image from the clipboard and pass it to the editor to be loaded
const imageType = item.types.find((type) => type.startsWith("image/"));
if (imageType) {
const blob = await item.getType(imageType);
const reader = new FileReader();
reader.onload = async (): Promise<void> => {
if (reader.result instanceof ArrayBuffer) {
const imageData = await extractPixelData(new Blob([reader.result], { type: imageType }));
editor.instance.pasteImage(new Uint8Array(imageData.data), imageData.width, imageData.height);
}
};
reader.readAsArrayBuffer(blob);
}
});
} catch (err) {
const unsupported = stripIndents`
This browser does not support reading from the clipboard.
Use the keyboard shortcut to paste instead.
`;
const denied = stripIndents`
The browser's clipboard permission has been denied.
Open the browser's website settings (usually accessible
just left of the URL) to allow this permission.
`;
const matchMessage = {
"clipboard-read": unsupported,
"Clipboard API unsupported": unsupported,
"Permission denied": denied,
};
const message = Object.entries(matchMessage).find(([key]) => String(err).includes(key))?.[1] || String(err);
editor.instance.errorDialog("Cannot access clipboard", message);
}
});
// Initialization
// Bind the event listeners
bindListeners();
// Resize on creation
onWindowResize(window.document.body);
// Return the destructor
return unbindListeners;
}
function targetIsTextField(target: EventTarget | HTMLElement | undefined): boolean {
return target instanceof HTMLElement && (target.nodeName === "INPUT" || target.nodeName === "TEXTAREA" || target.isContentEditable);
}
| onPointerMove | identifier_name |
main.rs | // bin2src - convert a binary file to source code in various languages
//
// Copyright (C) 2020 Alexandre Gomiero de Oliveira
//
// MIT License
//
// Copyright (c) 2020-2021 Alexandre Gomiero de Oliveira
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![warn(anonymous_parameters)]
#![warn(bare_trait_objects)]
#![warn(elided_lifetimes_in_paths)]
#![warn(single_use_lifetimes)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
mod lang;
use std::env;
const VERSION: &'static str = "0.0.57";
const AUTHOR: &'static str = "Alexandre Gomiero de Oliveira";
#[derive(Debug)]
pub enum Lang {
C,
Cshell,
Pascal,
Python,
Rust,
Undef,
}
fn main() {
let args: Vec<String> = match get_args_as_strings() {
Ok(e) => e,
Err(e) => {
println!("\n{}", e);
print_help();
return;
}
};
if args.len() < 4 {
print_help();
return;
};
let mut parse_result: generator::GeneratorInput = match parse(args) {
Ok(s) => s,
Err(e) => {
println!("\nArgument parser error: {}", e);
print_help();
return;
}
};
match parse_result.generate() {
Err(e) => panic!("Generator error: {}", e),
_ => "",
};
}
fn get_args_as_strings() -> Result<Vec<String>, &'static str> {
let mut ret: Vec<String> = Vec::new();
let args = env::args_os();
for cmd in args {
ret.push(match cmd.into_string() {
Ok(c) => c,
_ => return Err("Invalid unicode character found"),
});
}
Ok(ret)
}
fn parse(args: Vec<String>) -> Result<generator::GeneratorInput, String> {
let mut parse_args = args.iter().skip(1); // Skip program name
let mut inp_file: String = String::new();
let mut out_lang: Lang = Lang::Undef;
let mut out_dir: String = String::new();
let mut out_file: String = String::new();
let mut out_hex: bool = false;
while let Some(cmd) = parse_args.next() {
let cmd_name: &str;
if cmd.starts_with("--") {
cmd_name = &cmd[2..];
} else if cmd.starts_with("-") {
cmd_name = &cmd[1..];
} else {
inp_file = String::from(&cmd[..]);
break;
}
match cmd_name {
"l" | "out-language" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Missing language")),
};
out_lang = match value.as_str() {
"c" => Lang::C,
"cshell" => Lang::Cshell,
"pascal" => Lang::Pascal,
"python" => Lang::Python,
"rust" => Lang::Rust,
l @ _ => return Err(format!("Language not implemented: {}", l)),
};
}
"d" | "out-dir" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid directory")),
};
out_dir = String::from(value);
}
"f" | "out-file" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid output file")),
};
out_file = String::from(value);
}
"h" | "hex" => {
out_hex = true;
}
c @ _ => return Err(format!("Unknow command: {}", c)),
}
}
if inp_file.is_empty() {
return Err(String::from("Invalid input file"));
};
if out_dir.is_empty() {
out_dir = String::from("./");
};
Ok(generator::GeneratorInput {
input_file: inp_file,
output_file: out_file,
output_dir: out_dir,
lang: out_lang,
hex: out_hex,
})
}
fn print_help() {
print!(
"
bin2src - version {}
Copyright (C) 2020 {}
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; for details access LICENSE file at:
https://github.com/gomiero/bin2src/
bin2src - Converts a binary file to an array of bytes, defined at a source of another language, so you can embed it into your program.
Usage: bin2src < -l LANG | --out-lang LANG > [ OPTIONS ] < FILE >
LANG and FILE are required and FILE must be the last argument.
Options:
-l, --out-language LANG specify the language, where LANG={{c|cshell|pascal|python|rust}}
-d, --out-dir PATH specify where to output source(s) file(s);
if not specified, generate in current directory
-f, --out-file OUTFILE specify the output file(s) name (* without extension *);
if not specified, output file(s) will have the same name
of input file (without extra dots).
-h, --hex output bytes in hexadecimal (for C shellcode this flag has
diferent behaviors. See the Github site for more information)
Currently supported languages:
- C
- C for shellcode
- Pascal
- Python
- Rust
", VERSION, AUTHOR);
}
mod generator {
use super::lang::c;
use super::lang::cshell;
use super::lang::pascal;
use super::lang::python;
use super::lang::rust;
use super::Lang;
use std::error::Error;
use std::fs;
use std::io::{BufReader, BufWriter, ErrorKind, Read, Write};
use std::path::PathBuf;
#[inline]
pub fn camel(s: &String) -> String {
let mut ss = s.clone().to_lowercase();
let mut first = ss.remove(0).to_uppercase().to_string();
first.push_str(ss.as_str());
first
}
#[derive(Debug)]
pub struct GeneratorOutput {
pub ifile_name: String,
pub ifile_path: PathBuf,
pub ifile_size: u64,
pub odir_path: PathBuf,
pub ofile_name: String,
pub hex: bool,
}
impl GeneratorOutput {
pub fn open_inp_file(&mut self) -> Result<BufReader<fs::File>, &'static str> {
let inp_file: BufReader<fs::File> =
match fs::OpenOptions::new().read(true).open(&self.ifile_path) {
Ok(f) => BufReader::with_capacity(32768, f),
Err(e) => {
return match e.kind() {
ErrorKind::PermissionDenied => Err("Permission"),
ErrorKind::NotFound => Err("Not found"),
_ => Err("Can't open file"),
}
}
};
Ok(inp_file)
}
pub fn write_data(
&mut self,
f: &mut BufWriter<fs::File>,
numbytes: u64,
write_if: fn(bool, bool, &mut BufWriter<fs::File>, u8) -> Result<(), Box<dyn Error>>,
sep: String,
) -> Result<(), &'static str> {
let mut ifile = self.open_inp_file()?;
let mut doblock = || -> Result<(), Box<dyn Error>> {
let mut buf = [0u8; 4096];
let mut count = 0;
'outter: loop {
let sz = ifile.read(&mut buf[..])?;
if sz == 0 {
f.flush()?;
break;
} else if sz <= 4096 {
for b in 0..sz {
if count == self.ifile_size - 1 {
write_if(self.hex, false, f, buf[b])?;
break 'outter;
};
write_if(self.hex, true, f, buf[b])?;
count += 1;
if count % numbytes == 0 {
write!(f, "{}", sep)?;
};
}
};
}
Ok(())
};
if let Err(_err) = doblock() {
Err("Error when writing data block")
} else {
Ok(())
}
}
pub fn set_output_fname(&mut self) {
if self.ofile_name.is_empty() {
self.ofile_name = self
.ifile_path
.file_stem()
.unwrap()
.to_str()
.unwrap()
.to_string();
if let Some(pos) = self.ofile_name.find(".") {
self.ofile_name.truncate(pos);
}
};
}
}
#[derive(Debug)]
pub struct GeneratorInput {
pub input_file: String,
pub output_file: String,
pub output_dir: String,
pub lang: Lang,
pub hex: bool,
}
impl GeneratorInput {
fn input_file_test(&mut self) -> Result<(String, PathBuf, u64), &'static str> {
let ifpath: PathBuf = PathBuf::from(&self.input_file);
if !(ifpath.exists() || ifpath.is_file()) {
Err("Input file does not exists or is not a file")
} else {
let ifname: String = String::from(ifpath.file_name().unwrap().to_str().unwrap());
let ifsize = ifpath.metadata().unwrap().len();
Ok((ifname, ifpath, ifsize))
}
}
fn output_dir_test(&mut self) -> Result<PathBuf, &'static str> {
let ofpath: PathBuf = PathBuf::from(&self.output_dir);
// Test for output dir
if !(ofpath.exists() || ofpath.is_dir()) {
Err("Output folder does not exists or is inacessible")
} else |
}
pub fn generate(&mut self) -> Result<(), &'static str> {
// Test for input file
let (ifname, ifpath, ifsize) = self.input_file_test()?;
// Test for output dir
let ofpath: PathBuf = self.output_dir_test()?;
let go = GeneratorOutput {
ifile_name: ifname,
ifile_path: ifpath,
ifile_size: ifsize,
odir_path: ofpath,
ofile_name: String::from(&self.output_file),
hex: self.hex,
};
match match &self.lang {
Lang::C => c::C::new(go).generate_files(),
Lang::Cshell => cshell::Cshell::new(go).generate_files(),
Lang::Pascal => pascal::Pascal::new(go).generate_files(),
Lang::Python => python::Python::new(go).generate_files(),
Lang::Rust => rust::Rust::new(go).generate_files(),
_ => Err("Language not implemented yet"),
} {
Ok(_) => {
println!("Source(s) created.");
Ok(())
}
Err(e) => Err(e),
}
}
}
}
| {
Ok(ofpath)
} | conditional_block |
main.rs | // bin2src - convert a binary file to source code in various languages
//
// Copyright (C) 2020 Alexandre Gomiero de Oliveira
//
// MIT License
//
// Copyright (c) 2020-2021 Alexandre Gomiero de Oliveira
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![warn(anonymous_parameters)]
#![warn(bare_trait_objects)]
#![warn(elided_lifetimes_in_paths)]
#![warn(single_use_lifetimes)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
mod lang;
use std::env;
const VERSION: &'static str = "0.0.57";
const AUTHOR: &'static str = "Alexandre Gomiero de Oliveira";
#[derive(Debug)]
pub enum Lang {
C,
Cshell,
Pascal,
Python,
Rust,
Undef,
}
fn main() {
let args: Vec<String> = match get_args_as_strings() {
Ok(e) => e,
Err(e) => {
println!("\n{}", e);
print_help();
return;
}
};
if args.len() < 4 {
print_help();
return;
};
let mut parse_result: generator::GeneratorInput = match parse(args) {
Ok(s) => s,
Err(e) => {
println!("\nArgument parser error: {}", e);
print_help();
return;
}
};
match parse_result.generate() {
Err(e) => panic!("Generator error: {}", e),
_ => "",
};
}
fn get_args_as_strings() -> Result<Vec<String>, &'static str> {
let mut ret: Vec<String> = Vec::new();
let args = env::args_os();
for cmd in args {
ret.push(match cmd.into_string() {
Ok(c) => c,
_ => return Err("Invalid unicode character found"),
});
}
Ok(ret)
}
fn parse(args: Vec<String>) -> Result<generator::GeneratorInput, String> {
let mut parse_args = args.iter().skip(1); // Skip program name
let mut inp_file: String = String::new();
let mut out_lang: Lang = Lang::Undef;
let mut out_dir: String = String::new();
let mut out_file: String = String::new();
let mut out_hex: bool = false;
while let Some(cmd) = parse_args.next() {
let cmd_name: &str;
if cmd.starts_with("--") {
cmd_name = &cmd[2..];
} else if cmd.starts_with("-") {
cmd_name = &cmd[1..];
} else {
inp_file = String::from(&cmd[..]);
break;
}
match cmd_name {
"l" | "out-language" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Missing language")),
};
out_lang = match value.as_str() {
"c" => Lang::C,
"cshell" => Lang::Cshell,
"pascal" => Lang::Pascal,
"python" => Lang::Python,
"rust" => Lang::Rust,
l @ _ => return Err(format!("Language not implemented: {}", l)),
};
}
"d" | "out-dir" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid directory")),
};
out_dir = String::from(value);
}
"f" | "out-file" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid output file")),
};
out_file = String::from(value);
}
"h" | "hex" => {
out_hex = true;
}
c @ _ => return Err(format!("Unknow command: {}", c)),
}
}
if inp_file.is_empty() {
return Err(String::from("Invalid input file"));
};
if out_dir.is_empty() {
out_dir = String::from("./");
};
Ok(generator::GeneratorInput {
input_file: inp_file,
output_file: out_file,
output_dir: out_dir,
lang: out_lang,
hex: out_hex,
})
}
fn print_help() {
print!(
"
bin2src - version {}
Copyright (C) 2020 {}
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; for details access LICENSE file at:
https://github.com/gomiero/bin2src/
bin2src - Converts a binary file to an array of bytes, defined at a source of another language, so you can embed it into your program.
Usage: bin2src < -l LANG | --out-lang LANG > [ OPTIONS ] < FILE >
LANG and FILE are required and FILE must be the last argument.
Options:
-l, --out-language LANG specify the language, where LANG={{c|cshell|pascal|python|rust}}
-d, --out-dir PATH specify where to output source(s) file(s);
if not specified, generate in current directory
-f, --out-file OUTFILE specify the output file(s) name (* without extension *);
if not specified, output file(s) will have the same name
of input file (without extra dots).
-h, --hex output bytes in hexadecimal (for C shellcode this flag has
diferent behaviors. See the Github site for more information)
Currently supported languages:
- C
- C for shellcode
- Pascal
- Python
- Rust
", VERSION, AUTHOR);
}
mod generator {
use super::lang::c;
use super::lang::cshell;
use super::lang::pascal;
use super::lang::python;
use super::lang::rust;
use super::Lang;
use std::error::Error;
use std::fs;
use std::io::{BufReader, BufWriter, ErrorKind, Read, Write};
use std::path::PathBuf;
#[inline]
pub fn camel(s: &String) -> String {
let mut ss = s.clone().to_lowercase();
let mut first = ss.remove(0).to_uppercase().to_string();
first.push_str(ss.as_str());
first
}
#[derive(Debug)]
pub struct GeneratorOutput {
pub ifile_name: String,
pub ifile_path: PathBuf,
pub ifile_size: u64,
pub odir_path: PathBuf,
pub ofile_name: String,
pub hex: bool,
}
impl GeneratorOutput {
pub fn open_inp_file(&mut self) -> Result<BufReader<fs::File>, &'static str> {
let inp_file: BufReader<fs::File> =
match fs::OpenOptions::new().read(true).open(&self.ifile_path) {
Ok(f) => BufReader::with_capacity(32768, f),
Err(e) => {
return match e.kind() {
ErrorKind::PermissionDenied => Err("Permission"),
ErrorKind::NotFound => Err("Not found"),
_ => Err("Can't open file"),
}
}
};
Ok(inp_file)
}
pub fn write_data(
&mut self,
f: &mut BufWriter<fs::File>,
numbytes: u64,
write_if: fn(bool, bool, &mut BufWriter<fs::File>, u8) -> Result<(), Box<dyn Error>>,
sep: String,
) -> Result<(), &'static str> {
let mut ifile = self.open_inp_file()?;
let mut doblock = || -> Result<(), Box<dyn Error>> {
let mut buf = [0u8; 4096];
let mut count = 0;
'outter: loop {
let sz = ifile.read(&mut buf[..])?;
if sz == 0 {
f.flush()?;
break;
} else if sz <= 4096 {
for b in 0..sz {
if count == self.ifile_size - 1 {
write_if(self.hex, false, f, buf[b])?;
break 'outter;
};
write_if(self.hex, true, f, buf[b])?;
count += 1;
if count % numbytes == 0 {
write!(f, "{}", sep)?;
};
}
};
}
Ok(())
};
if let Err(_err) = doblock() {
Err("Error when writing data block")
} else {
Ok(())
}
}
pub fn set_output_fname(&mut self) {
if self.ofile_name.is_empty() {
self.ofile_name = self
.ifile_path
.file_stem()
.unwrap()
.to_str()
.unwrap()
.to_string();
if let Some(pos) = self.ofile_name.find(".") {
self.ofile_name.truncate(pos);
}
};
}
}
#[derive(Debug)]
pub struct | {
pub input_file: String,
pub output_file: String,
pub output_dir: String,
pub lang: Lang,
pub hex: bool,
}
impl GeneratorInput {
fn input_file_test(&mut self) -> Result<(String, PathBuf, u64), &'static str> {
let ifpath: PathBuf = PathBuf::from(&self.input_file);
if !(ifpath.exists() || ifpath.is_file()) {
Err("Input file does not exists or is not a file")
} else {
let ifname: String = String::from(ifpath.file_name().unwrap().to_str().unwrap());
let ifsize = ifpath.metadata().unwrap().len();
Ok((ifname, ifpath, ifsize))
}
}
fn output_dir_test(&mut self) -> Result<PathBuf, &'static str> {
let ofpath: PathBuf = PathBuf::from(&self.output_dir);
// Test for output dir
if !(ofpath.exists() || ofpath.is_dir()) {
Err("Output folder does not exists or is inacessible")
} else {
Ok(ofpath)
}
}
pub fn generate(&mut self) -> Result<(), &'static str> {
// Test for input file
let (ifname, ifpath, ifsize) = self.input_file_test()?;
// Test for output dir
let ofpath: PathBuf = self.output_dir_test()?;
let go = GeneratorOutput {
ifile_name: ifname,
ifile_path: ifpath,
ifile_size: ifsize,
odir_path: ofpath,
ofile_name: String::from(&self.output_file),
hex: self.hex,
};
match match &self.lang {
Lang::C => c::C::new(go).generate_files(),
Lang::Cshell => cshell::Cshell::new(go).generate_files(),
Lang::Pascal => pascal::Pascal::new(go).generate_files(),
Lang::Python => python::Python::new(go).generate_files(),
Lang::Rust => rust::Rust::new(go).generate_files(),
_ => Err("Language not implemented yet"),
} {
Ok(_) => {
println!("Source(s) created.");
Ok(())
}
Err(e) => Err(e),
}
}
}
}
| GeneratorInput | identifier_name |
main.rs | // bin2src - convert a binary file to source code in various languages
//
// Copyright (C) 2020 Alexandre Gomiero de Oliveira
//
// MIT License
//
// Copyright (c) 2020-2021 Alexandre Gomiero de Oliveira
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![warn(anonymous_parameters)]
#![warn(bare_trait_objects)]
#![warn(elided_lifetimes_in_paths)]
#![warn(single_use_lifetimes)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unused_import_braces)]
#![warn(unused_qualifications)]
mod lang;
use std::env;
const VERSION: &'static str = "0.0.57";
const AUTHOR: &'static str = "Alexandre Gomiero de Oliveira";
#[derive(Debug)]
pub enum Lang {
C,
Cshell,
Pascal,
Python,
Rust,
Undef,
}
fn main() {
let args: Vec<String> = match get_args_as_strings() {
Ok(e) => e,
Err(e) => {
println!("\n{}", e);
print_help();
return;
}
};
if args.len() < 4 {
print_help();
return;
};
let mut parse_result: generator::GeneratorInput = match parse(args) {
Ok(s) => s,
Err(e) => {
println!("\nArgument parser error: {}", e);
print_help();
return;
}
}; | _ => "",
};
}
fn get_args_as_strings() -> Result<Vec<String>, &'static str> {
let mut ret: Vec<String> = Vec::new();
let args = env::args_os();
for cmd in args {
ret.push(match cmd.into_string() {
Ok(c) => c,
_ => return Err("Invalid unicode character found"),
});
}
Ok(ret)
}
fn parse(args: Vec<String>) -> Result<generator::GeneratorInput, String> {
let mut parse_args = args.iter().skip(1); // Skip program name
let mut inp_file: String = String::new();
let mut out_lang: Lang = Lang::Undef;
let mut out_dir: String = String::new();
let mut out_file: String = String::new();
let mut out_hex: bool = false;
while let Some(cmd) = parse_args.next() {
let cmd_name: &str;
if cmd.starts_with("--") {
cmd_name = &cmd[2..];
} else if cmd.starts_with("-") {
cmd_name = &cmd[1..];
} else {
inp_file = String::from(&cmd[..]);
break;
}
match cmd_name {
"l" | "out-language" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Missing language")),
};
out_lang = match value.as_str() {
"c" => Lang::C,
"cshell" => Lang::Cshell,
"pascal" => Lang::Pascal,
"python" => Lang::Python,
"rust" => Lang::Rust,
l @ _ => return Err(format!("Language not implemented: {}", l)),
};
}
"d" | "out-dir" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid directory")),
};
out_dir = String::from(value);
}
"f" | "out-file" => {
let value = match parse_args.next() {
Some(c) => c,
None => return Err(format!("Invalid output file")),
};
out_file = String::from(value);
}
"h" | "hex" => {
out_hex = true;
}
c @ _ => return Err(format!("Unknow command: {}", c)),
}
}
if inp_file.is_empty() {
return Err(String::from("Invalid input file"));
};
if out_dir.is_empty() {
out_dir = String::from("./");
};
Ok(generator::GeneratorInput {
input_file: inp_file,
output_file: out_file,
output_dir: out_dir,
lang: out_lang,
hex: out_hex,
})
}
fn print_help() {
print!(
"
bin2src - version {}
Copyright (C) 2020 {}
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions; for details access LICENSE file at:
https://github.com/gomiero/bin2src/
bin2src - Converts a binary file to an array of bytes, defined at a source of another language, so you can embed it into your program.
Usage: bin2src < -l LANG | --out-lang LANG > [ OPTIONS ] < FILE >
LANG and FILE are required and FILE must be the last argument.
Options:
-l, --out-language LANG specify the language, where LANG={{c|cshell|pascal|python|rust}}
-d, --out-dir PATH specify where to output source(s) file(s);
if not specified, generate in current directory
-f, --out-file OUTFILE specify the output file(s) name (* without extension *);
if not specified, output file(s) will have the same name
of input file (without extra dots).
-h, --hex output bytes in hexadecimal (for C shellcode this flag has
diferent behaviors. See the Github site for more information)
Currently supported languages:
- C
- C for shellcode
- Pascal
- Python
- Rust
", VERSION, AUTHOR);
}
mod generator {
use super::lang::c;
use super::lang::cshell;
use super::lang::pascal;
use super::lang::python;
use super::lang::rust;
use super::Lang;
use std::error::Error;
use std::fs;
use std::io::{BufReader, BufWriter, ErrorKind, Read, Write};
use std::path::PathBuf;
#[inline]
pub fn camel(s: &String) -> String {
let mut ss = s.clone().to_lowercase();
let mut first = ss.remove(0).to_uppercase().to_string();
first.push_str(ss.as_str());
first
}
#[derive(Debug)]
pub struct GeneratorOutput {
pub ifile_name: String,
pub ifile_path: PathBuf,
pub ifile_size: u64,
pub odir_path: PathBuf,
pub ofile_name: String,
pub hex: bool,
}
impl GeneratorOutput {
pub fn open_inp_file(&mut self) -> Result<BufReader<fs::File>, &'static str> {
let inp_file: BufReader<fs::File> =
match fs::OpenOptions::new().read(true).open(&self.ifile_path) {
Ok(f) => BufReader::with_capacity(32768, f),
Err(e) => {
return match e.kind() {
ErrorKind::PermissionDenied => Err("Permission"),
ErrorKind::NotFound => Err("Not found"),
_ => Err("Can't open file"),
}
}
};
Ok(inp_file)
}
pub fn write_data(
&mut self,
f: &mut BufWriter<fs::File>,
numbytes: u64,
write_if: fn(bool, bool, &mut BufWriter<fs::File>, u8) -> Result<(), Box<dyn Error>>,
sep: String,
) -> Result<(), &'static str> {
let mut ifile = self.open_inp_file()?;
let mut doblock = || -> Result<(), Box<dyn Error>> {
let mut buf = [0u8; 4096];
let mut count = 0;
'outter: loop {
let sz = ifile.read(&mut buf[..])?;
if sz == 0 {
f.flush()?;
break;
} else if sz <= 4096 {
for b in 0..sz {
if count == self.ifile_size - 1 {
write_if(self.hex, false, f, buf[b])?;
break 'outter;
};
write_if(self.hex, true, f, buf[b])?;
count += 1;
if count % numbytes == 0 {
write!(f, "{}", sep)?;
};
}
};
}
Ok(())
};
if let Err(_err) = doblock() {
Err("Error when writing data block")
} else {
Ok(())
}
}
pub fn set_output_fname(&mut self) {
if self.ofile_name.is_empty() {
self.ofile_name = self
.ifile_path
.file_stem()
.unwrap()
.to_str()
.unwrap()
.to_string();
if let Some(pos) = self.ofile_name.find(".") {
self.ofile_name.truncate(pos);
}
};
}
}
#[derive(Debug)]
pub struct GeneratorInput {
pub input_file: String,
pub output_file: String,
pub output_dir: String,
pub lang: Lang,
pub hex: bool,
}
impl GeneratorInput {
fn input_file_test(&mut self) -> Result<(String, PathBuf, u64), &'static str> {
let ifpath: PathBuf = PathBuf::from(&self.input_file);
if !(ifpath.exists() || ifpath.is_file()) {
Err("Input file does not exists or is not a file")
} else {
let ifname: String = String::from(ifpath.file_name().unwrap().to_str().unwrap());
let ifsize = ifpath.metadata().unwrap().len();
Ok((ifname, ifpath, ifsize))
}
}
fn output_dir_test(&mut self) -> Result<PathBuf, &'static str> {
let ofpath: PathBuf = PathBuf::from(&self.output_dir);
// Test for output dir
if !(ofpath.exists() || ofpath.is_dir()) {
Err("Output folder does not exists or is inacessible")
} else {
Ok(ofpath)
}
}
pub fn generate(&mut self) -> Result<(), &'static str> {
// Test for input file
let (ifname, ifpath, ifsize) = self.input_file_test()?;
// Test for output dir
let ofpath: PathBuf = self.output_dir_test()?;
let go = GeneratorOutput {
ifile_name: ifname,
ifile_path: ifpath,
ifile_size: ifsize,
odir_path: ofpath,
ofile_name: String::from(&self.output_file),
hex: self.hex,
};
match match &self.lang {
Lang::C => c::C::new(go).generate_files(),
Lang::Cshell => cshell::Cshell::new(go).generate_files(),
Lang::Pascal => pascal::Pascal::new(go).generate_files(),
Lang::Python => python::Python::new(go).generate_files(),
Lang::Rust => rust::Rust::new(go).generate_files(),
_ => Err("Language not implemented yet"),
} {
Ok(_) => {
println!("Source(s) created.");
Ok(())
}
Err(e) => Err(e),
}
}
}
} |
match parse_result.generate() {
Err(e) => panic!("Generator error: {}", e), | random_line_split |
production_example.py | # -*- coding: utf-8 -*-
"""
Recommended installs: pip install pytrends fredapi yfinance
Uses a number of live public data sources to construct an example production case.
While stock price forecasting is shown here, time series forecasting alone is not a recommended basis for managing investments!
This is a highly opinionated approach.
evolve = True allows the timeseries to automatically adapt to changes.
There is a slight risk of it getting caught in suboptimal position however.
It should probably be coupled with some basic data sanity checks.
cd ./AutoTS
conda activate py38
nohup python production_example.py > /dev/null &
"""
try: # needs to go first
from sklearnex import patch_sklearn
| import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # required only for graphs
from autots import AutoTS, load_live_daily, create_regressor
fred_key = None # https://fred.stlouisfed.org/docs/api/api_key.html
gsa_key = None
forecast_name = "example"
graph = True # whether to plot graphs
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
frequency = (
"D" # "infer" for automatic alignment, but specific offsets are most reliable, 'D' is daily
)
forecast_length = 60 # number of periods to forecast ahead
drop_most_recent = 1 # whether to discard the n most recent records (as incomplete)
num_validations = (
2 # number of cross validation runs. More is better but slower, usually
)
validation_method = "backwards" # "similarity", "backwards", "seasonal 364"
n_jobs = "auto" # or set to number of CPU cores
prediction_interval = (
0.9 # sets the upper and lower forecast range by probability range. Bigger = wider
)
initial_training = "auto" # set this to True on first run, or on reset, 'auto' looks for existing template, if found, sets to False.
evolve = True # allow time series to progressively evolve on each run, if False, uses fixed template
archive_templates = True # save a copy of the model template used with a timestamp
save_location = None # "C:/Users/Colin/Downloads" # directory to save templates to. Defaults to working dir
template_filename = f"autots_forecast_template_{forecast_name}.csv"
forecast_csv_name = None # f"autots_forecast_{forecast_name}.csv" # or None, point forecast only is written
model_list = "fast_parallel_no_arima"
transformer_list = "fast" # 'superfast'
transformer_max_depth = 5
models_mode = "default" # "deep", "regressor"
initial_template = 'random' # 'random' 'general+random'
preclean = None
{ # preclean option
"fillna": 'ffill',
"transformations": {"0": "EWMAFilter"},
"transformation_params": {
"0": {"span": 14},
},
}
back_forecast = False
start_time = datetime.datetime.now()
if save_location is not None:
template_filename = os.path.join(save_location, template_filename)
if forecast_csv_name is not None:
forecast_csv_name = os.path.join(save_location, forecast_csv_name)
if initial_training == "auto":
initial_training = not os.path.exists(template_filename)
if initial_training:
print("No existing template found.")
else:
print("Existing template found.")
# set max generations based on settings, increase for slower but greater chance of highest accuracy
# if include_ensemble is specified in import_templates, ensembles can progressively nest over generations
if initial_training:
gens = 100
generation_timeout = 10000 # minutes
models_to_validate = 0.15
ensemble = ["horizontal-max", "dist", "simple"] # , "mosaic", "mosaic-window", 'mlensemble'
elif evolve:
gens = 50
generation_timeout = 480 # minutes
models_to_validate = 0.15
ensemble = ["horizontal-max", "dist", "simple"] # "mosaic", "mosaic-window", "subsample"
else:
gens = 0
generation_timeout = 60 # minutes
models_to_validate = 0.99
ensemble = ["horizontal-max", "dist", "simple"] # "mosaic", "mosaic-window",
# only save the very best model if not evolve
if evolve:
n_export = 50
else:
n_export = 1 # wouldn't be a bad idea to do > 1, allowing some future adaptability
"""
Begin dataset retrieval
"""
fred_series = [
"DGS10",
"T5YIE",
"SP500",
"DCOILWTICO",
"DEXUSEU",
"BAMLH0A0HYM2",
"DAAA",
"DEXUSUK",
"T10Y2Y",
]
tickers = ["MSFT", "PG"]
trend_list = ["forecasting", "msft", "p&g"]
weather_event_types = ["%28Z%29+Winter+Weather", "%28Z%29+Winter+Storm"]
wikipedia_pages = ['all', 'Microsoft', "Procter_%26_Gamble", "YouTube", "United_States"]
df = load_live_daily(
long=False,
fred_key=fred_key,
fred_series=fred_series,
tickers=tickers,
trends_list=trend_list,
earthquake_min_magnitude=5,
weather_years=3,
london_air_days=700,
wikipedia_pages=wikipedia_pages,
gsa_key=gsa_key,
gov_domain_list=None, # ['usajobs.gov', 'usps.com', 'weather.gov'],
gov_domain_limit=700,
weather_event_types=weather_event_types,
sleep_seconds=15,
)
# be careful of very noisy, large value series mixed into more well-behaved data as they can skew some metrics such that they get most of the attention
# remove "volume" data as it skews MAE (other solutions are to adjust metric_weighting towards SMAPE, use series `weights`, or pre-scale data)
df = df[[x for x in df.columns if "_volume" not in x]]
# remove dividends and stock splits as it skews metrics
df = df[[x for x in df.columns if "_dividends" not in x]]
df = df[[x for x in df.columns if "stock_splits" not in x]]
# scale 'wiki_all' to millions to prevent too much skew of MAE
if 'wiki_all' in df.columns:
df['wiki_all_millions'] = df['wiki_all'] / 1000000
df = df.drop(columns=['wiki_all'])
# manual NaN cleaning where real values are easily approximated, this is the way
# although if you have 'no good idea' why it is random, auto is best
# note manual pre-cleaning affects VALIDATION significantly (for better or worse)
# as NaN times in history are skipped by metrics, but filled values, as added here, are evaluated
if trend_list is not None:
for tx in trend_list:
if tx in df.columns:
df[tx] = df[tx].interpolate('akima').fillna(method='ffill', limit=30).fillna(method='bfill', limit=30)
# fill weekends
if tickers is not None:
for fx in tickers:
for suffix in ["_high", "_low", "_open", "_close"]:
fxs = (fx + suffix).lower()
if fxs in df.columns:
df[fxs] = df[fxs].interpolate('akima')
if fred_series is not None:
for fx in fred_series:
if fx in df.columns:
df[fx] = df[fx].interpolate('akima')
if weather_event_types is not None:
wevnt = [x for x in df.columns if "_Events" in x]
df[wevnt] = df[wevnt].mask(df[wevnt].notnull().cummax(), df[wevnt].fillna(0))
# most of the NaN here are just weekends, when financial series aren't collected, ffill of a few steps is fine
# partial forward fill, no back fill
df = df.fillna(method='ffill', limit=3)
df = df[df.index.year > 1999]
# remove any data from the future
df = df[df.index <= start_time]
# remove series with no recent data
df = df.dropna(axis="columns", how="all")
min_cutoff_date = start_time - datetime.timedelta(days=180)
most_recent_date = df.notna()[::-1].idxmax()
drop_cols = most_recent_date[most_recent_date < min_cutoff_date].index.tolist()
df = df.drop(columns=drop_cols)
print(
f"Series with most NaN: {df.head(365).isnull().sum().sort_values(ascending=False).head(5)}"
)
df.to_csv(f"training_data_{forecast_name}.csv")
# df = pd.read_csv(f"training_data_{forecast_name}.csv", index_col=0, parse_dates=[0])
# example future_regressor with some things we can glean from data and datetime index
# note this only accepts `wide` style input dataframes
# and this is optional, not required for the modeling
# also create macro_micro before inclusion
regr_train, regr_fcst = create_regressor(
df,
forecast_length=forecast_length,
frequency=frequency,
drop_most_recent=drop_most_recent,
scale=True,
summarize="auto",
backfill="bfill",
fill_na="spline",
holiday_countries={"US": None}, # requires holidays package
encode_holiday_type=True,
# datepart_method="simple_2",
)
# remove the first forecast_length rows (because those are lost in regressor)
df = df.iloc[forecast_length:]
regr_train = regr_train.iloc[forecast_length:]
print("data setup completed, beginning modeling")
"""
Begin modeling
"""
metric_weighting = {
'smape_weighting': 3,
'mae_weighting': 2,
'rmse_weighting': 1,
'made_weighting': 1,
'mage_weighting': 0,
'mle_weighting': 0,
'imle_weighting': 0,
'spl_weighting': 3,
'dwae_weighting': 1,
'runtime_weighting': 0.05,
}
model = AutoTS(
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
ensemble=ensemble,
model_list=model_list,
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
max_generations=gens,
metric_weighting=metric_weighting,
initial_template=initial_template,
aggfunc="first",
models_to_validate=models_to_validate,
model_interrupt=True,
num_validations=num_validations,
validation_method=validation_method,
constraint=None,
drop_most_recent=drop_most_recent, # if newest data is incomplete, also remember to increase forecast_length
preclean=preclean,
models_mode=models_mode,
# no_negatives=True,
# subset=100,
# prefill_na=0,
# remove_leading_zeroes=True,
current_model_file=f"current_model_{forecast_name}",
generation_timeout=generation_timeout,
n_jobs=n_jobs,
verbose=1,
)
if not initial_training:
if evolve:
model.import_template(template_filename, method="addon")
else:
model.import_template(template_filename, method="only")
model = model.fit(
df,
future_regressor=regr_train,
)
# save a template of best models
if initial_training or evolve:
model.export_template(
template_filename,
models="best",
n=n_export,
max_per_model_class=6,
include_results=True,
)
if archive_templates:
arc_file = f"{template_filename.split('.csv')[0]}_{start_time.strftime('%Y%m%d%H%M')}.csv"
model.export_template(arc_file, models="best", n=1)
prediction = model.predict(
future_regressor=regr_fcst, verbose=2, fail_on_forecast_nan=True
)
# Print the details of the best model
print(model)
"""
Process results
"""
# point forecasts dataframe
forecasts_df = prediction.forecast # .fillna(0).round(0)
if forecast_csv_name is not None:
forecasts_df.to_csv(forecast_csv_name)
forecasts_upper_df = prediction.upper_forecast
forecasts_lower_df = prediction.lower_forecast
# accuracy of all tried model results
model_results = model.results()
validation_results = model.results("validation")
print(f"Model failure rate is {model.failure_rate() * 100:.1f}%")
print(f'The following model types failed completely {model.list_failed_model_types()}')
print("Slowest models:")
print(
model_results[model_results["Ensemble"] < 1]
.groupby("Model")
.agg({"TotalRuntimeSeconds": ["mean", "max"]})
.idxmax()
)
model_parameters = json.loads(model.best_model["ModelParameters"].iloc[0])
# model.export_template("all_results.csv", models='all')
if graph:
with plt.style.context("bmh"):
start_date = 'auto' # '2021-01-01'
prediction.plot_grid(model.df_wide_numeric, start_date=start_date)
plt.show()
scores = model.best_model_per_series_mape().index.tolist()
scores = [x for x in scores if x in df.columns]
worst = scores[0:6]
prediction.plot_grid(model.df_wide_numeric, start_date=start_date, title="Worst Performing Forecasts", cols=worst)
plt.show()
best = scores[-6:]
prediction.plot_grid(model.df_wide_numeric, start_date=start_date, title="Best Performing Forecasts", cols=best)
plt.show()
if model.best_model_name == "Cassandra":
prediction.model.plot_components(
prediction, series=None, to_origin_space=True, start_date=start_date
)
plt.show()
prediction.model.plot_trend(
series=None, start_date=start_date
)
plt.show()
ax = model.plot_per_series_mape()
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
if back_forecast:
model.plot_backforecast()
plt.show()
ax = model.plot_validations()
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = model.plot_validations(subset='best')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = model.plot_validations(subset='worst')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
if model.best_model_ensemble == 2:
plt.subplots_adjust(bottom=0.5)
model.plot_horizontal_transformers()
plt.show()
model.plot_horizontal_model_count()
plt.show()
model.plot_horizontal()
plt.show()
# plt.savefig("horizontal.png", dpi=300, bbox_inches="tight")
if str(model_parameters["model_name"]).lower() in ["mosaic", "mosaic-window"]:
mosaic_df = model.mosaic_to_df()
print(mosaic_df[mosaic_df.columns[0:5]].head(5))
print(f"Completed at system time: {datetime.datetime.now()}") | patch_sklearn()
except Exception as e:
print(repr(e))
import json
import datetime | random_line_split |
production_example.py | # -*- coding: utf-8 -*-
"""
Recommended installs: pip install pytrends fredapi yfinance
Uses a number of live public data sources to construct an example production case.
While stock price forecasting is shown here, time series forecasting alone is not a recommended basis for managing investments!
This is a highly opinionated approach.
evolve = True allows the timeseries to automatically adapt to changes.
There is a slight risk of it getting caught in suboptimal position however.
It should probably be coupled with some basic data sanity checks.
cd ./AutoTS
conda activate py38
nohup python production_example.py > /dev/null &
"""
try: # needs to go first
from sklearnex import patch_sklearn
patch_sklearn()
except Exception as e:
print(repr(e))
import json
import datetime
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # required only for graphs
from autots import AutoTS, load_live_daily, create_regressor
fred_key = None # https://fred.stlouisfed.org/docs/api/api_key.html
gsa_key = None
forecast_name = "example"
graph = True # whether to plot graphs
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
frequency = (
"D" # "infer" for automatic alignment, but specific offsets are most reliable, 'D' is daily
)
forecast_length = 60 # number of periods to forecast ahead
drop_most_recent = 1 # whether to discard the n most recent records (as incomplete)
num_validations = (
2 # number of cross validation runs. More is better but slower, usually
)
validation_method = "backwards" # "similarity", "backwards", "seasonal 364"
n_jobs = "auto" # or set to number of CPU cores
prediction_interval = (
0.9 # sets the upper and lower forecast range by probability range. Bigger = wider
)
initial_training = "auto" # set this to True on first run, or on reset, 'auto' looks for existing template, if found, sets to False.
evolve = True # allow time series to progressively evolve on each run, if False, uses fixed template
archive_templates = True # save a copy of the model template used with a timestamp
save_location = None # "C:/Users/Colin/Downloads" # directory to save templates to. Defaults to working dir
template_filename = f"autots_forecast_template_{forecast_name}.csv"
forecast_csv_name = None # f"autots_forecast_{forecast_name}.csv" # or None, point forecast only is written
model_list = "fast_parallel_no_arima"
transformer_list = "fast" # 'superfast'
transformer_max_depth = 5
models_mode = "default" # "deep", "regressor"
initial_template = 'random' # 'random' 'general+random'
preclean = None
{ # preclean option
"fillna": 'ffill',
"transformations": {"0": "EWMAFilter"},
"transformation_params": {
"0": {"span": 14},
},
}
back_forecast = False
start_time = datetime.datetime.now()
if save_location is not None:
template_filename = os.path.join(save_location, template_filename)
if forecast_csv_name is not None:
forecast_csv_name = os.path.join(save_location, forecast_csv_name)
if initial_training == "auto":
initial_training = not os.path.exists(template_filename)
if initial_training:
print("No existing template found.")
else:
print("Existing template found.")
# set max generations based on settings, increase for slower but greater chance of highest accuracy
# if include_ensemble is specified in import_templates, ensembles can progressively nest over generations
if initial_training:
gens = 100
generation_timeout = 10000 # minutes
models_to_validate = 0.15
ensemble = ["horizontal-max", "dist", "simple"] # , "mosaic", "mosaic-window", 'mlensemble'
elif evolve:
gens = 50
generation_timeout = 480 # minutes
models_to_validate = 0.15
ensemble = ["horizontal-max", "dist", "simple"] # "mosaic", "mosaic-window", "subsample"
else:
gens = 0
generation_timeout = 60 # minutes
models_to_validate = 0.99
ensemble = ["horizontal-max", "dist", "simple"] # "mosaic", "mosaic-window",
# only save the very best model if not evolve
if evolve:
n_export = 50
else:
n_export = 1 # wouldn't be a bad idea to do > 1, allowing some future adaptability
"""
Begin dataset retrieval
"""
fred_series = [
"DGS10",
"T5YIE",
"SP500",
"DCOILWTICO",
"DEXUSEU",
"BAMLH0A0HYM2",
"DAAA",
"DEXUSUK",
"T10Y2Y",
]
tickers = ["MSFT", "PG"]
trend_list = ["forecasting", "msft", "p&g"]
weather_event_types = ["%28Z%29+Winter+Weather", "%28Z%29+Winter+Storm"]
wikipedia_pages = ['all', 'Microsoft', "Procter_%26_Gamble", "YouTube", "United_States"]
df = load_live_daily(
long=False,
fred_key=fred_key,
fred_series=fred_series,
tickers=tickers,
trends_list=trend_list,
earthquake_min_magnitude=5,
weather_years=3,
london_air_days=700,
wikipedia_pages=wikipedia_pages,
gsa_key=gsa_key,
gov_domain_list=None, # ['usajobs.gov', 'usps.com', 'weather.gov'],
gov_domain_limit=700,
weather_event_types=weather_event_types,
sleep_seconds=15,
)
# be careful of very noisy, large value series mixed into more well-behaved data as they can skew some metrics such that they get most of the attention
# remove "volume" data as it skews MAE (other solutions are to adjust metric_weighting towards SMAPE, use series `weights`, or pre-scale data)
df = df[[x for x in df.columns if "_volume" not in x]]
# remove dividends and stock splits as it skews metrics
df = df[[x for x in df.columns if "_dividends" not in x]]
df = df[[x for x in df.columns if "stock_splits" not in x]]
# scale 'wiki_all' to millions to prevent too much skew of MAE
if 'wiki_all' in df.columns:
df['wiki_all_millions'] = df['wiki_all'] / 1000000
df = df.drop(columns=['wiki_all'])
# manual NaN cleaning where real values are easily approximated, this is the way
# although if you have 'no good idea' why it is random, auto is best
# note manual pre-cleaning affects VALIDATION significantly (for better or worse)
# as NaN times in history are skipped by metrics, but filled values, as added here, are evaluated
if trend_list is not None:
for tx in trend_list:
if tx in df.columns:
df[tx] = df[tx].interpolate('akima').fillna(method='ffill', limit=30).fillna(method='bfill', limit=30)
# fill weekends
if tickers is not None:
for fx in tickers:
for suffix in ["_high", "_low", "_open", "_close"]:
fxs = (fx + suffix).lower()
if fxs in df.columns:
df[fxs] = df[fxs].interpolate('akima')
if fred_series is not None:
for fx in fred_series:
if fx in df.columns:
df[fx] = df[fx].interpolate('akima')
if weather_event_types is not None:
wevnt = [x for x in df.columns if "_Events" in x]
df[wevnt] = df[wevnt].mask(df[wevnt].notnull().cummax(), df[wevnt].fillna(0))
# most of the NaN here are just weekends, when financial series aren't collected, ffill of a few steps is fine
# partial forward fill, no back fill
df = df.fillna(method='ffill', limit=3)
df = df[df.index.year > 1999]
# remove any data from the future
df = df[df.index <= start_time]
# remove series with no recent data
df = df.dropna(axis="columns", how="all")
min_cutoff_date = start_time - datetime.timedelta(days=180)
most_recent_date = df.notna()[::-1].idxmax()
drop_cols = most_recent_date[most_recent_date < min_cutoff_date].index.tolist()
df = df.drop(columns=drop_cols)
print(
f"Series with most NaN: {df.head(365).isnull().sum().sort_values(ascending=False).head(5)}"
)
df.to_csv(f"training_data_{forecast_name}.csv")
# df = pd.read_csv(f"training_data_{forecast_name}.csv", index_col=0, parse_dates=[0])
# example future_regressor with some things we can glean from data and datetime index
# note this only accepts `wide` style input dataframes
# and this is optional, not required for the modeling
# also create macro_micro before inclusion
regr_train, regr_fcst = create_regressor(
df,
forecast_length=forecast_length,
frequency=frequency,
drop_most_recent=drop_most_recent,
scale=True,
summarize="auto",
backfill="bfill",
fill_na="spline",
holiday_countries={"US": None}, # requires holidays package
encode_holiday_type=True,
# datepart_method="simple_2",
)
# remove the first forecast_length rows (because those are lost in regressor)
df = df.iloc[forecast_length:]
regr_train = regr_train.iloc[forecast_length:]
print("data setup completed, beginning modeling")
"""
Begin modeling
"""
metric_weighting = {
'smape_weighting': 3,
'mae_weighting': 2,
'rmse_weighting': 1,
'made_weighting': 1,
'mage_weighting': 0,
'mle_weighting': 0,
'imle_weighting': 0,
'spl_weighting': 3,
'dwae_weighting': 1,
'runtime_weighting': 0.05,
}
model = AutoTS(
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
ensemble=ensemble,
model_list=model_list,
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
max_generations=gens,
metric_weighting=metric_weighting,
initial_template=initial_template,
aggfunc="first",
models_to_validate=models_to_validate,
model_interrupt=True,
num_validations=num_validations,
validation_method=validation_method,
constraint=None,
drop_most_recent=drop_most_recent, # if newest data is incomplete, also remember to increase forecast_length
preclean=preclean,
models_mode=models_mode,
# no_negatives=True,
# subset=100,
# prefill_na=0,
# remove_leading_zeroes=True,
current_model_file=f"current_model_{forecast_name}",
generation_timeout=generation_timeout,
n_jobs=n_jobs,
verbose=1,
)
if not initial_training:
if evolve:
model.import_template(template_filename, method="addon")
else:
model.import_template(template_filename, method="only")
model = model.fit(
df,
future_regressor=regr_train,
)
# save a template of best models
if initial_training or evolve:
model.export_template(
template_filename,
models="best",
n=n_export,
max_per_model_class=6,
include_results=True,
)
if archive_templates:
arc_file = f"{template_filename.split('.csv')[0]}_{start_time.strftime('%Y%m%d%H%M')}.csv"
model.export_template(arc_file, models="best", n=1)
prediction = model.predict(
future_regressor=regr_fcst, verbose=2, fail_on_forecast_nan=True
)
# Print the details of the best model
print(model)
"""
Process results
"""
# point forecasts dataframe
forecasts_df = prediction.forecast # .fillna(0).round(0)
if forecast_csv_name is not None:
forecasts_df.to_csv(forecast_csv_name)
forecasts_upper_df = prediction.upper_forecast
forecasts_lower_df = prediction.lower_forecast
# accuracy of all tried model results
model_results = model.results()
validation_results = model.results("validation")
print(f"Model failure rate is {model.failure_rate() * 100:.1f}%")
print(f'The following model types failed completely {model.list_failed_model_types()}')
print("Slowest models:")
print(
model_results[model_results["Ensemble"] < 1]
.groupby("Model")
.agg({"TotalRuntimeSeconds": ["mean", "max"]})
.idxmax()
)
model_parameters = json.loads(model.best_model["ModelParameters"].iloc[0])
# model.export_template("all_results.csv", models='all')
if graph:
with plt.style.context("bmh"):
start_date = 'auto' # '2021-01-01'
prediction.plot_grid(model.df_wide_numeric, start_date=start_date)
plt.show()
scores = model.best_model_per_series_mape().index.tolist()
scores = [x for x in scores if x in df.columns]
worst = scores[0:6]
prediction.plot_grid(model.df_wide_numeric, start_date=start_date, title="Worst Performing Forecasts", cols=worst)
plt.show()
best = scores[-6:]
prediction.plot_grid(model.df_wide_numeric, start_date=start_date, title="Best Performing Forecasts", cols=best)
plt.show()
if model.best_model_name == "Cassandra":
prediction.model.plot_components(
prediction, series=None, to_origin_space=True, start_date=start_date
)
plt.show()
prediction.model.plot_trend(
series=None, start_date=start_date
)
plt.show()
ax = model.plot_per_series_mape()
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
if back_forecast:
model.plot_backforecast()
plt.show()
ax = model.plot_validations()
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = model.plot_validations(subset='best')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
ax = model.plot_validations(subset='worst')
plt.legend(bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0)
plt.show()
if model.best_model_ensemble == 2:
|
print(f"Completed at system time: {datetime.datetime.now()}")
| plt.subplots_adjust(bottom=0.5)
model.plot_horizontal_transformers()
plt.show()
model.plot_horizontal_model_count()
plt.show()
model.plot_horizontal()
plt.show()
# plt.savefig("horizontal.png", dpi=300, bbox_inches="tight")
if str(model_parameters["model_name"]).lower() in ["mosaic", "mosaic-window"]:
mosaic_df = model.mosaic_to_df()
print(mosaic_df[mosaic_df.columns[0:5]].head(5)) | conditional_block |
ip6.go | // Copyright 2012 Google, Inc. All rights reserved.
// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package layers
import (
"encoding/binary"
"fmt"
"github.com/tsg/gopacket"
"net"
)
// IPv6 is the layer for the IPv6 header.
type IPv6 struct {
// http://www.networksorcery.com/enp/protocol/ipv6.htm
BaseLayer
Version uint8
TrafficClass uint8
FlowLabel uint32
Length uint16
NextHeader IPProtocol
HopLimit uint8
SrcIP net.IP
DstIP net.IP
HopByHop *IPv6HopByHop
// hbh will be pointed to by HopByHop if that layer exists.
hbh IPv6HopByHop
}
// LayerType returns LayerTypeIPv6
func (i *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
func (i *IPv6) NetworkFlow() gopacket.Flow {
return gopacket.NewFlow(EndpointIPv6, i.SrcIP, i.DstIP)
}
const (
IPv6HopByHopOptionJumbogram = 0xC2 // RFC 2675
)
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (ip6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
payload := b.Bytes()
if ip6.HopByHop != nil {
return fmt.Errorf("unable to serialize hopbyhop for now")
}
bytes, err := b.PrependBytes(40) | }
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net.IP
}
// LayerType returns LayerTypeIPv6Routing.
func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Routing{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
RoutingType: data[2],
SegmentsLeft: data[3],
Reserved: data[4:8],
}
switch i.RoutingType {
case 0: // Source routing
if (len(data)-8)%16 != 0 {
return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", len(data))
}
for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
}
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// IPv6Fragment is the IPv6 fragment header, used for packet
// fragmentation/defragmentation.
type IPv6Fragment struct {
BaseLayer
NextHeader IPProtocol
// Reserved1 is bits [8-16), from least to most significant, 0-indexed
Reserved1 uint8
FragmentOffset uint16
// Reserved2 is bits [29-31), from least to most significant, 0-indexed
Reserved2 uint8
MoreFragments bool
Identification uint32
}
// LayerType returns LayerTypeIPv6Fragment.
func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Fragment{
BaseLayer: BaseLayer{data[:8], data[8:]},
NextHeader: IPProtocol(data[0]),
Reserved1: data[1],
FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
Reserved2: data[3] & 0x6 >> 1,
MoreFragments: data[3]&0x1 != 0,
Identification: binary.BigEndian.Uint32(data[4:8]),
}
p.AddLayer(i)
return p.NextDecoder(gopacket.DecodeFragment)
}
// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
type IPv6DestinationOption ipv6HeaderTLVOption
func (o *IPv6DestinationOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
return (*ipv6HeaderTLVOption)(o).serializeTo(b, fixLengths)
}
// IPv6Destination is the IPv6 destination options header.
type IPv6Destination struct {
ipv6ExtensionBase
Options []IPv6DestinationOption
}
// LayerType returns LayerTypeIPv6Destination.
func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Destination{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
// We guess we'll 1-2 options, one regular option at least, then maybe one
// padding option.
Options: make([]IPv6DestinationOption, 0, 2),
}
var opt *IPv6DestinationOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6DestinationOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
optionLength := 0
for _, opt := range i.Options {
l, err := opt.serializeTo(b, opts.FixLengths)
if err != nil {
return err
}
optionLength += l
}
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
bytes[0] = uint8(i.NextHeader)
if opts.FixLengths {
i.HeaderLength = uint8((optionLength + 2) / 8)
}
bytes[1] = i.HeaderLength
return nil
} | if err != nil {
return err | random_line_split |
ip6.go | // Copyright 2012 Google, Inc. All rights reserved.
// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package layers
import (
"encoding/binary"
"fmt"
"github.com/tsg/gopacket"
"net"
)
// IPv6 is the layer for the IPv6 header.
type IPv6 struct {
// http://www.networksorcery.com/enp/protocol/ipv6.htm
BaseLayer
Version uint8
TrafficClass uint8
FlowLabel uint32
Length uint16
NextHeader IPProtocol
HopLimit uint8
SrcIP net.IP
DstIP net.IP
HopByHop *IPv6HopByHop
// hbh will be pointed to by HopByHop if that layer exists.
hbh IPv6HopByHop
}
// LayerType returns LayerTypeIPv6
func (i *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
func (i *IPv6) NetworkFlow() gopacket.Flow {
return gopacket.NewFlow(EndpointIPv6, i.SrcIP, i.DstIP)
}
const (
IPv6HopByHopOptionJumbogram = 0xC2 // RFC 2675
)
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (ip6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
payload := b.Bytes()
if ip6.HopByHop != nil {
return fmt.Errorf("unable to serialize hopbyhop for now")
}
bytes, err := b.PrependBytes(40)
if err != nil {
return err
}
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 |
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net.IP
}
// LayerType returns LayerTypeIPv6Routing.
func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Routing{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
RoutingType: data[2],
SegmentsLeft: data[3],
Reserved: data[4:8],
}
switch i.RoutingType {
case 0: // Source routing
if (len(data)-8)%16 != 0 {
return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", len(data))
}
for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
}
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// IPv6Fragment is the IPv6 fragment header, used for packet
// fragmentation/defragmentation.
type IPv6Fragment struct {
BaseLayer
NextHeader IPProtocol
// Reserved1 is bits [8-16), from least to most significant, 0-indexed
Reserved1 uint8
FragmentOffset uint16
// Reserved2 is bits [29-31), from least to most significant, 0-indexed
Reserved2 uint8
MoreFragments bool
Identification uint32
}
// LayerType returns LayerTypeIPv6Fragment.
func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Fragment{
BaseLayer: BaseLayer{data[:8], data[8:]},
NextHeader: IPProtocol(data[0]),
Reserved1: data[1],
FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
Reserved2: data[3] & 0x6 >> 1,
MoreFragments: data[3]&0x1 != 0,
Identification: binary.BigEndian.Uint32(data[4:8]),
}
p.AddLayer(i)
return p.NextDecoder(gopacket.DecodeFragment)
}
// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
type IPv6DestinationOption ipv6HeaderTLVOption
func (o *IPv6DestinationOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
return (*ipv6HeaderTLVOption)(o).serializeTo(b, fixLengths)
}
// IPv6Destination is the IPv6 destination options header.
type IPv6Destination struct {
ipv6ExtensionBase
Options []IPv6DestinationOption
}
// LayerType returns LayerTypeIPv6Destination.
func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Destination{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
// We guess we'll 1-2 options, one regular option at least, then maybe one
// padding option.
Options: make([]IPv6DestinationOption, 0, 2),
}
var opt *IPv6DestinationOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6DestinationOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
optionLength := 0
for _, opt := range i.Options {
l, err := opt.serializeTo(b, opts.FixLengths)
if err != nil {
return err
}
optionLength += l
}
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
bytes[0] = uint8(i.NextHeader)
if opts.FixLengths {
i.HeaderLength = uint8((optionLength + 2) / 8)
}
bytes[1] = i.HeaderLength
return nil
}
| {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
} | conditional_block |
ip6.go | // Copyright 2012 Google, Inc. All rights reserved.
// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package layers
import (
"encoding/binary"
"fmt"
"github.com/tsg/gopacket"
"net"
)
// IPv6 is the layer for the IPv6 header.
type IPv6 struct {
// http://www.networksorcery.com/enp/protocol/ipv6.htm
BaseLayer
Version uint8
TrafficClass uint8
FlowLabel uint32
Length uint16
NextHeader IPProtocol
HopLimit uint8
SrcIP net.IP
DstIP net.IP
HopByHop *IPv6HopByHop
// hbh will be pointed to by HopByHop if that layer exists.
hbh IPv6HopByHop
}
// LayerType returns LayerTypeIPv6
func (i *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
func (i *IPv6) NetworkFlow() gopacket.Flow {
return gopacket.NewFlow(EndpointIPv6, i.SrcIP, i.DstIP)
}
const (
IPv6HopByHopOptionJumbogram = 0xC2 // RFC 2675
)
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (ip6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
payload := b.Bytes()
if ip6.HopByHop != nil {
return fmt.Errorf("unable to serialize hopbyhop for now")
}
bytes, err := b.PrependBytes(40)
if err != nil {
return err
}
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error |
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net.IP
}
// LayerType returns LayerTypeIPv6Routing.
func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Routing{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
RoutingType: data[2],
SegmentsLeft: data[3],
Reserved: data[4:8],
}
switch i.RoutingType {
case 0: // Source routing
if (len(data)-8)%16 != 0 {
return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", len(data))
}
for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
}
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// IPv6Fragment is the IPv6 fragment header, used for packet
// fragmentation/defragmentation.
type IPv6Fragment struct {
BaseLayer
NextHeader IPProtocol
// Reserved1 is bits [8-16), from least to most significant, 0-indexed
Reserved1 uint8
FragmentOffset uint16
// Reserved2 is bits [29-31), from least to most significant, 0-indexed
Reserved2 uint8
MoreFragments bool
Identification uint32
}
// LayerType returns LayerTypeIPv6Fragment.
func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Fragment{
BaseLayer: BaseLayer{data[:8], data[8:]},
NextHeader: IPProtocol(data[0]),
Reserved1: data[1],
FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
Reserved2: data[3] & 0x6 >> 1,
MoreFragments: data[3]&0x1 != 0,
Identification: binary.BigEndian.Uint32(data[4:8]),
}
p.AddLayer(i)
return p.NextDecoder(gopacket.DecodeFragment)
}
// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
type IPv6DestinationOption ipv6HeaderTLVOption
func (o *IPv6DestinationOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
return (*ipv6HeaderTLVOption)(o).serializeTo(b, fixLengths)
}
// IPv6Destination is the IPv6 destination options header.
type IPv6Destination struct {
ipv6ExtensionBase
Options []IPv6DestinationOption
}
// LayerType returns LayerTypeIPv6Destination.
func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Destination{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
// We guess we'll 1-2 options, one regular option at least, then maybe one
// padding option.
Options: make([]IPv6DestinationOption, 0, 2),
}
var opt *IPv6DestinationOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6DestinationOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
optionLength := 0
for _, opt := range i.Options {
l, err := opt.serializeTo(b, opts.FixLengths)
if err != nil {
return err
}
optionLength += l
}
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
bytes[0] = uint8(i.NextHeader)
if opts.FixLengths {
i.HeaderLength = uint8((optionLength + 2) / 8)
}
bytes[1] = i.HeaderLength
return nil
}
| {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
} | identifier_body |
ip6.go | // Copyright 2012 Google, Inc. All rights reserved.
// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package layers
import (
"encoding/binary"
"fmt"
"github.com/tsg/gopacket"
"net"
)
// IPv6 is the layer for the IPv6 header.
type IPv6 struct {
// http://www.networksorcery.com/enp/protocol/ipv6.htm
BaseLayer
Version uint8
TrafficClass uint8
FlowLabel uint32
Length uint16
NextHeader IPProtocol
HopLimit uint8
SrcIP net.IP
DstIP net.IP
HopByHop *IPv6HopByHop
// hbh will be pointed to by HopByHop if that layer exists.
hbh IPv6HopByHop
}
// LayerType returns LayerTypeIPv6
func (i *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
func (i *IPv6) NetworkFlow() gopacket.Flow {
return gopacket.NewFlow(EndpointIPv6, i.SrcIP, i.DstIP)
}
const (
IPv6HopByHopOptionJumbogram = 0xC2 // RFC 2675
)
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (ip6 *IPv6) | (b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
payload := b.Bytes()
if ip6.HopByHop != nil {
return fmt.Errorf("unable to serialize hopbyhop for now")
}
bytes, err := b.PrependBytes(40)
if err != nil {
return err
}
bytes[0] = (ip6.Version << 4) | (ip6.TrafficClass >> 4)
bytes[1] = (ip6.TrafficClass << 4) | uint8(ip6.FlowLabel>>16)
binary.BigEndian.PutUint16(bytes[2:], uint16(ip6.FlowLabel))
if opts.FixLengths {
ip6.Length = uint16(len(payload))
}
binary.BigEndian.PutUint16(bytes[4:], ip6.Length)
bytes[6] = byte(ip6.NextHeader)
bytes[7] = byte(ip6.HopLimit)
if len(ip6.SrcIP) != 16 {
return fmt.Errorf("invalid src ip %v", ip6.SrcIP)
}
if len(ip6.DstIP) != 16 {
return fmt.Errorf("invalid dst ip %v", ip6.DstIP)
}
copy(bytes[8:], ip6.SrcIP)
copy(bytes[24:], ip6.DstIP)
return nil
}
func (ip6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
ip6.Version = uint8(data[0]) >> 4
ip6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
ip6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
ip6.Length = binary.BigEndian.Uint16(data[4:6])
ip6.NextHeader = IPProtocol(data[6])
ip6.HopLimit = data[7]
ip6.SrcIP = data[8:24]
ip6.DstIP = data[24:40]
ip6.HopByHop = nil
// We initially set the payload to all bytes after 40. ip6.Length or the
// HopByHop jumbogram option can both change this eventually, though.
ip6.BaseLayer = BaseLayer{data[:40], data[40:]}
// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
// options are crucial for understanding what's actually happening per packet.
if ip6.NextHeader == IPProtocolIPv6HopByHop {
ip6.hbh.DecodeFromBytes(ip6.Payload, df)
hbhLen := len(ip6.hbh.Contents)
// Reset IPv6 contents to include the HopByHop header.
ip6.BaseLayer = BaseLayer{data[:40+hbhLen], data[40+hbhLen:]}
ip6.HopByHop = &ip6.hbh
if ip6.Length == 0 {
for _, o := range ip6.hbh.Options {
if o.OptionType == IPv6HopByHopOptionJumbogram {
if len(o.OptionData) != 4 {
return fmt.Errorf("Invalid jumbo packet option length")
}
payloadLength := binary.BigEndian.Uint32(o.OptionData)
pEnd := int(payloadLength)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
} else {
ip6.Payload = ip6.Payload[:pEnd]
ip6.hbh.Payload = ip6.Payload
}
return nil
}
}
return fmt.Errorf("IPv6 length 0, but HopByHop header does not have jumbogram option")
}
}
if ip6.Length == 0 {
return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ip6.NextHeader)
} else {
pEnd := int(ip6.Length)
if pEnd > len(ip6.Payload) {
df.SetTruncated()
pEnd = len(ip6.Payload)
}
ip6.Payload = ip6.Payload[:pEnd]
}
return nil
}
func (i *IPv6) CanDecode() gopacket.LayerClass {
return LayerTypeIPv6
}
func (i *IPv6) NextLayerType() gopacket.LayerType {
if i.HopByHop != nil {
return i.HopByHop.NextHeader.LayerType()
}
return i.NextHeader.LayerType()
}
func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
ip6 := &IPv6{}
err := ip6.DecodeFromBytes(data, p)
p.AddLayer(ip6)
p.SetNetworkLayer(ip6)
if ip6.HopByHop != nil {
// TODO(gconnell): Since HopByHop is now an integral part of the IPv6
// layer, should it actually be added as its own layer? I'm leaning towards
// no.
p.AddLayer(ip6.HopByHop)
}
if err != nil {
return err
}
if ip6.HopByHop != nil {
return p.NextDecoder(ip6.HopByHop.NextHeader)
}
return p.NextDecoder(ip6.NextHeader)
}
func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
i.ipv6ExtensionBase = decodeIPv6ExtensionBase(data)
i.Options = i.opts[:0]
var opt *IPv6HopByHopOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6HopByHopOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
return nil
}
func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6HopByHop{}
err := i.DecodeFromBytes(data, p)
p.AddLayer(i)
if err != nil {
return err
}
return p.NextDecoder(i.NextHeader)
}
type ipv6HeaderTLVOption struct {
OptionType, OptionLength uint8
ActualLength int
OptionData []byte
}
func decodeIPv6HeaderTLVOption(data []byte) (h ipv6HeaderTLVOption) {
if data[0] == 0 {
h.ActualLength = 1
return
}
h.OptionType = data[0]
h.OptionLength = data[1]
h.ActualLength = int(h.OptionLength) + 2
h.OptionData = data[2:h.ActualLength]
return
}
func (h *ipv6HeaderTLVOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
if fixLengths {
h.OptionLength = uint8(len(h.OptionData))
}
length := int(h.OptionLength) + 2
data, err := b.PrependBytes(length)
if err != nil {
return 0, err
}
data[0] = h.OptionType
data[1] = h.OptionLength
copy(data[2:], h.OptionData)
return length, nil
}
// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
type IPv6HopByHopOption ipv6HeaderTLVOption
type ipv6ExtensionBase struct {
BaseLayer
NextHeader IPProtocol
HeaderLength uint8
ActualLength int
}
func decodeIPv6ExtensionBase(data []byte) (i ipv6ExtensionBase) {
i.NextHeader = IPProtocol(data[0])
i.HeaderLength = data[1]
i.ActualLength = int(i.HeaderLength)*8 + 8
i.Contents = data[:i.ActualLength]
i.Payload = data[i.ActualLength:]
return
}
// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
// which may or may not have extensions.
type IPv6ExtensionSkipper struct {
NextHeader IPProtocol
BaseLayer
}
func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
extension := decodeIPv6ExtensionBase(data)
i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
i.NextHeader = extension.NextHeader
return nil
}
func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
return LayerClassIPv6Extension
}
func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
return i.NextHeader.LayerType()
}
// IPv6HopByHop is the IPv6 hop-by-hop extension.
type IPv6HopByHop struct {
ipv6ExtensionBase
Options []IPv6HopByHopOption
opts [2]IPv6HopByHopOption
}
// LayerType returns LayerTypeIPv6HopByHop.
func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
// IPv6Routing is the IPv6 routing extension.
type IPv6Routing struct {
ipv6ExtensionBase
RoutingType uint8
SegmentsLeft uint8
// This segment is supposed to be zero according to RFC2460, the second set of
// 4 bytes in the extension.
Reserved []byte
// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
// set only if RoutingType == 0.
SourceRoutingIPs []net.IP
}
// LayerType returns LayerTypeIPv6Routing.
func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Routing{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
RoutingType: data[2],
SegmentsLeft: data[3],
Reserved: data[4:8],
}
switch i.RoutingType {
case 0: // Source routing
if (len(data)-8)%16 != 0 {
return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", len(data))
}
for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
}
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// IPv6Fragment is the IPv6 fragment header, used for packet
// fragmentation/defragmentation.
type IPv6Fragment struct {
BaseLayer
NextHeader IPProtocol
// Reserved1 is bits [8-16), from least to most significant, 0-indexed
Reserved1 uint8
FragmentOffset uint16
// Reserved2 is bits [29-31), from least to most significant, 0-indexed
Reserved2 uint8
MoreFragments bool
Identification uint32
}
// LayerType returns LayerTypeIPv6Fragment.
func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Fragment{
BaseLayer: BaseLayer{data[:8], data[8:]},
NextHeader: IPProtocol(data[0]),
Reserved1: data[1],
FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
Reserved2: data[3] & 0x6 >> 1,
MoreFragments: data[3]&0x1 != 0,
Identification: binary.BigEndian.Uint32(data[4:8]),
}
p.AddLayer(i)
return p.NextDecoder(gopacket.DecodeFragment)
}
// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
type IPv6DestinationOption ipv6HeaderTLVOption
func (o *IPv6DestinationOption) serializeTo(b gopacket.SerializeBuffer, fixLengths bool) (int, error) {
return (*ipv6HeaderTLVOption)(o).serializeTo(b, fixLengths)
}
// IPv6Destination is the IPv6 destination options header.
type IPv6Destination struct {
ipv6ExtensionBase
Options []IPv6DestinationOption
}
// LayerType returns LayerTypeIPv6Destination.
func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
i := &IPv6Destination{
ipv6ExtensionBase: decodeIPv6ExtensionBase(data),
// We guess we'll 1-2 options, one regular option at least, then maybe one
// padding option.
Options: make([]IPv6DestinationOption, 0, 2),
}
var opt *IPv6DestinationOption
for d := i.Contents[2:]; len(d) > 0; d = d[opt.ActualLength:] {
i.Options = append(i.Options, IPv6DestinationOption(decodeIPv6HeaderTLVOption(d)))
opt = &i.Options[len(i.Options)-1]
}
p.AddLayer(i)
return p.NextDecoder(i.NextHeader)
}
// SerializeTo writes the serialized form of this layer into the
// SerializationBuffer, implementing gopacket.SerializableLayer.
// See the docs for gopacket.SerializableLayer for more info.
func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
optionLength := 0
for _, opt := range i.Options {
l, err := opt.serializeTo(b, opts.FixLengths)
if err != nil {
return err
}
optionLength += l
}
bytes, err := b.PrependBytes(2)
if err != nil {
return err
}
bytes[0] = uint8(i.NextHeader)
if opts.FixLengths {
i.HeaderLength = uint8((optionLength + 2) / 8)
}
bytes[1] = i.HeaderLength
return nil
}
| SerializeTo | identifier_name |
pod.go | // Copyright 2019 The Kanister Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
json "github.com/json-iterator/go"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sp "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1"
"github.com/kanisterio/kanister/pkg/consts"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
"github.com/kanisterio/kanister/pkg/poll"
)
const (
// DefaultPodReadyWaitTimeout is the time to wait for pod to be ready
DefaultPodReadyWaitTimeout = 15 * time.Minute
// PodReadyWaitTimeoutEnv is the env var to get pod ready wait timeout
PodReadyWaitTimeoutEnv = "KANISTER_POD_READY_WAIT_TIMEOUT"
errAccessingNode = "Failed to get node"
defaultContainerName = "container"
)
// PodOptions specifies options for `CreatePod`
type PodOptions struct {
Annotations map[string]string
Command []string
ContainerName string
Name string
GenerateName string
Image string
Labels map[string]string
Namespace string
ServiceAccountName string
Volumes map[string]string
BlockVolumes map[string]string
// PodSecurityContext and ContainerSecurityContext can be used to set the security context
// at the pod level and container level respectively.
// You can still use podOverride to set the pod security context, but these fields will take precedence.
// We chose these fields to specify security context instead of just using podOverride because
// the merge behaviour of the pods spec is confusing in case of podOverride, and this is more readable.
PodSecurityContext *v1.PodSecurityContext
ContainerSecurityContext *v1.SecurityContext
PodOverride crv1alpha1.JSONMap
Resources v1.ResourceRequirements
RestartPolicy v1.RestartPolicy
OwnerReferences []metav1.OwnerReference
EnvironmentVariables []v1.EnvVar
Lifecycle *v1.Lifecycle
}
func GetPodObjectFromPodOptions(cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
// If Namespace is not specified, use the controller Namespace.
cns, err := GetControllerNamespace()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get controller namespace")
}
ns := opts.Namespace
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != "" {
node, err := cli.CoreV1().Nodes().Get(context.TODO(), n[0], metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "%s %s", errAccessingNode, n[0])
}
if !IsNodeReady(node) || !IsNodeSchedulable(node) {
return errors.Errorf("Node %s is currently not ready/schedulable", n[0])
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func getVolStatus(ctx context.Context, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
for _, vol := range p.Spec.Volumes {
if err := checkPVCAndPVStatus(ctx, vol, p, cli, namespace); err != nil {
return err
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func checkPVCAndPVStatus(ctx context.Context, vol v1.Volume, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
if vol.VolumeSource.PersistentVolumeClaim == nil {
// wait for timeout
return nil
}
pvcName := vol.VolumeSource.PersistentVolumeClaim.ClaimName
pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// Do not return err, wait for timeout, since sometimes in case of statefulsets, they trigger creation of a volume
return nil
} else {
return errors.Wrapf(err, "Failed to get PVC %s", pvcName)
}
}
switch pvc.Status.Phase {
case v1.ClaimLost:
return errors.Errorf("PVC %s assoicated with pod %s has status: %s", pvcName, p.Name, v1.ClaimLost)
case v1.ClaimPending:
pvName := pvc.Spec.VolumeName
if pvName == "" {
// wait for timeout
return nil
}
pv, err := cli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// wait for timeout
return nil
} else {
return errors.Wrapf(err, "Failed to get PV %s", pvName)
}
}
if pv.Status.Phase == v1.VolumeFailed {
return errors.Errorf("PV %s associated with PVC %s has status: %s message: %s reason: %s namespace: %s", pvName, pvcName, v1.VolumeFailed, pv.Status.Message, pv.Status.Reason, namespace)
}
}
return nil
}
// WaitForPodCompletion waits for a pod to reach a terminal state, or timeout
func | (ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
attachLog := true
containerForLogs := ""
err := poll.Wait(ctx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return true, err
}
containerForLogs = p.Spec.Containers[0].Name
switch p.Status.Phase {
case v1.PodFailed:
return false, errors.Errorf("Pod %s failed. Pod status: %s", name, p.Status.String())
}
return p.Status.Phase == v1.PodSucceeded, nil
})
errorMessage := "Pod failed or did not transition into complete state"
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
// use Strategic Merge to patch default pod specs with the passed specs
func patchDefaultPodSpecs(defaultPodSpecs v1.PodSpec, override crv1alpha1.JSONMap) (v1.PodSpec, error) {
// Merge default specs and override specs with StrategicMergePatch
mergedPatch, err := strategicMergeJsonPatch(defaultPodSpecs, override)
if err != nil {
return v1.PodSpec{}, err
}
// Convert merged json to v1.PodSPec object
podSpec := v1.PodSpec{}
err = json.Unmarshal(mergedPatch, &podSpec)
if err != nil {
return podSpec, err
}
return podSpec, err
}
// CreateAndMergeJsonPatch uses Strategic Merge to merge two Pod spec configuration
func CreateAndMergeJsonPatch(original, override crv1alpha1.JSONMap) (crv1alpha1.JSONMap, error) {
// Merge json specs with StrategicMerge
mergedPatch, err := strategicMergeJsonPatch(original, override)
if err != nil {
return nil, err
}
// Convert merged json to map[string]interface{}
var merged map[string]interface{}
err = json.Unmarshal(mergedPatch, &merged)
if err != nil {
return nil, err
}
return merged, err
}
func strategicMergeJsonPatch(original, override interface{}) ([]byte, error) {
// Convert override specs to json
overrideJson, err := json.Marshal(override)
if err != nil {
return nil, err
}
// Convert original specs to json
originalJson, err := json.Marshal(original)
if err != nil {
return nil, err
}
// Merge json specs with StrategicMerge
mergedPatch, err := sp.StrategicMergePatch(originalJson, overrideJson, v1.PodSpec{})
if err != nil {
return nil, err
}
return mergedPatch, nil
}
// GetPodReadyWaitTimeout returns the pod ready wait timeout from ENV if configured
// returns the default of 15 minutes otherwise
func GetPodReadyWaitTimeout() time.Duration {
if v, ok := os.LookupEnv(PodReadyWaitTimeoutEnv); ok {
iv, err := strconv.Atoi(v)
if err == nil {
return time.Duration(iv) * time.Minute
}
log.Debug().Print("Using default timeout value because of invalid environment variable", field.M{"envVar": v})
}
return DefaultPodReadyWaitTimeout
}
| WaitForPodCompletion | identifier_name |
pod.go | // Copyright 2019 The Kanister Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
json "github.com/json-iterator/go"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sp "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1"
"github.com/kanisterio/kanister/pkg/consts"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
"github.com/kanisterio/kanister/pkg/poll"
)
const (
// DefaultPodReadyWaitTimeout is the time to wait for pod to be ready
DefaultPodReadyWaitTimeout = 15 * time.Minute
// PodReadyWaitTimeoutEnv is the env var to get pod ready wait timeout
PodReadyWaitTimeoutEnv = "KANISTER_POD_READY_WAIT_TIMEOUT"
errAccessingNode = "Failed to get node"
defaultContainerName = "container"
)
// PodOptions specifies options for `CreatePod`
type PodOptions struct {
Annotations map[string]string
Command []string
ContainerName string
Name string
GenerateName string
Image string
Labels map[string]string
Namespace string
ServiceAccountName string
Volumes map[string]string
BlockVolumes map[string]string
// PodSecurityContext and ContainerSecurityContext can be used to set the security context
// at the pod level and container level respectively.
// You can still use podOverride to set the pod security context, but these fields will take precedence.
// We chose these fields to specify security context instead of just using podOverride because
// the merge behaviour of the pods spec is confusing in case of podOverride, and this is more readable.
PodSecurityContext *v1.PodSecurityContext
ContainerSecurityContext *v1.SecurityContext
PodOverride crv1alpha1.JSONMap
Resources v1.ResourceRequirements
RestartPolicy v1.RestartPolicy
OwnerReferences []metav1.OwnerReference
EnvironmentVariables []v1.EnvVar
Lifecycle *v1.Lifecycle
}
func GetPodObjectFromPodOptions(cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
// If Namespace is not specified, use the controller Namespace.
cns, err := GetControllerNamespace()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get controller namespace")
}
ns := opts.Namespace
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) |
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != "" {
node, err := cli.CoreV1().Nodes().Get(context.TODO(), n[0], metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "%s %s", errAccessingNode, n[0])
}
if !IsNodeReady(node) || !IsNodeSchedulable(node) {
return errors.Errorf("Node %s is currently not ready/schedulable", n[0])
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func getVolStatus(ctx context.Context, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
for _, vol := range p.Spec.Volumes {
if err := checkPVCAndPVStatus(ctx, vol, p, cli, namespace); err != nil {
return err
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func checkPVCAndPVStatus(ctx context.Context, vol v1.Volume, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
if vol.VolumeSource.PersistentVolumeClaim == nil {
// wait for timeout
return nil
}
pvcName := vol.VolumeSource.PersistentVolumeClaim.ClaimName
pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// Do not return err, wait for timeout, since sometimes in case of statefulsets, they trigger creation of a volume
return nil
} else {
return errors.Wrapf(err, "Failed to get PVC %s", pvcName)
}
}
switch pvc.Status.Phase {
case v1.ClaimLost:
return errors.Errorf("PVC %s assoicated with pod %s has status: %s", pvcName, p.Name, v1.ClaimLost)
case v1.ClaimPending:
pvName := pvc.Spec.VolumeName
if pvName == "" {
// wait for timeout
return nil
}
pv, err := cli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// wait for timeout
return nil
} else {
return errors.Wrapf(err, "Failed to get PV %s", pvName)
}
}
if pv.Status.Phase == v1.VolumeFailed {
return errors.Errorf("PV %s associated with PVC %s has status: %s message: %s reason: %s namespace: %s", pvName, pvcName, v1.VolumeFailed, pv.Status.Message, pv.Status.Reason, namespace)
}
}
return nil
}
// WaitForPodCompletion waits for a pod to reach a terminal state, or timeout
func WaitForPodCompletion(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
attachLog := true
containerForLogs := ""
err := poll.Wait(ctx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return true, err
}
containerForLogs = p.Spec.Containers[0].Name
switch p.Status.Phase {
case v1.PodFailed:
return false, errors.Errorf("Pod %s failed. Pod status: %s", name, p.Status.String())
}
return p.Status.Phase == v1.PodSucceeded, nil
})
errorMessage := "Pod failed or did not transition into complete state"
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
// use Strategic Merge to patch default pod specs with the passed specs
func patchDefaultPodSpecs(defaultPodSpecs v1.PodSpec, override crv1alpha1.JSONMap) (v1.PodSpec, error) {
// Merge default specs and override specs with StrategicMergePatch
mergedPatch, err := strategicMergeJsonPatch(defaultPodSpecs, override)
if err != nil {
return v1.PodSpec{}, err
}
// Convert merged json to v1.PodSPec object
podSpec := v1.PodSpec{}
err = json.Unmarshal(mergedPatch, &podSpec)
if err != nil {
return podSpec, err
}
return podSpec, err
}
// CreateAndMergeJsonPatch uses Strategic Merge to merge two Pod spec configuration
func CreateAndMergeJsonPatch(original, override crv1alpha1.JSONMap) (crv1alpha1.JSONMap, error) {
// Merge json specs with StrategicMerge
mergedPatch, err := strategicMergeJsonPatch(original, override)
if err != nil {
return nil, err
}
// Convert merged json to map[string]interface{}
var merged map[string]interface{}
err = json.Unmarshal(mergedPatch, &merged)
if err != nil {
return nil, err
}
return merged, err
}
func strategicMergeJsonPatch(original, override interface{}) ([]byte, error) {
// Convert override specs to json
overrideJson, err := json.Marshal(override)
if err != nil {
return nil, err
}
// Convert original specs to json
originalJson, err := json.Marshal(original)
if err != nil {
return nil, err
}
// Merge json specs with StrategicMerge
mergedPatch, err := sp.StrategicMergePatch(originalJson, overrideJson, v1.PodSpec{})
if err != nil {
return nil, err
}
return mergedPatch, nil
}
// GetPodReadyWaitTimeout returns the pod ready wait timeout from ENV if configured
// returns the default of 15 minutes otherwise
func GetPodReadyWaitTimeout() time.Duration {
if v, ok := os.LookupEnv(PodReadyWaitTimeoutEnv); ok {
iv, err := strconv.Atoi(v)
if err == nil {
return time.Duration(iv) * time.Minute
}
log.Debug().Print("Using default timeout value because of invalid environment variable", field.M{"envVar": v})
}
return DefaultPodReadyWaitTimeout
}
| {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
} | identifier_body |
pod.go | // Copyright 2019 The Kanister Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
json "github.com/json-iterator/go"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sp "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1"
"github.com/kanisterio/kanister/pkg/consts"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
"github.com/kanisterio/kanister/pkg/poll"
)
const (
// DefaultPodReadyWaitTimeout is the time to wait for pod to be ready
DefaultPodReadyWaitTimeout = 15 * time.Minute
// PodReadyWaitTimeoutEnv is the env var to get pod ready wait timeout
PodReadyWaitTimeoutEnv = "KANISTER_POD_READY_WAIT_TIMEOUT"
errAccessingNode = "Failed to get node"
defaultContainerName = "container"
)
// PodOptions specifies options for `CreatePod`
type PodOptions struct {
Annotations map[string]string
Command []string
ContainerName string
Name string
GenerateName string
Image string
Labels map[string]string
Namespace string
ServiceAccountName string
Volumes map[string]string
BlockVolumes map[string]string
// PodSecurityContext and ContainerSecurityContext can be used to set the security context
// at the pod level and container level respectively.
// You can still use podOverride to set the pod security context, but these fields will take precedence.
// We chose these fields to specify security context instead of just using podOverride because
// the merge behaviour of the pods spec is confusing in case of podOverride, and this is more readable.
PodSecurityContext *v1.PodSecurityContext
ContainerSecurityContext *v1.SecurityContext
PodOverride crv1alpha1.JSONMap
Resources v1.ResourceRequirements
RestartPolicy v1.RestartPolicy
OwnerReferences []metav1.OwnerReference
EnvironmentVariables []v1.EnvVar
Lifecycle *v1.Lifecycle
}
func GetPodObjectFromPodOptions(cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
// If Namespace is not specified, use the controller Namespace.
cns, err := GetControllerNamespace()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get controller namespace")
}
ns := opts.Namespace
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues
if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" {
attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != "" |
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func getVolStatus(ctx context.Context, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
for _, vol := range p.Spec.Volumes {
if err := checkPVCAndPVStatus(ctx, vol, p, cli, namespace); err != nil {
return err
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func checkPVCAndPVStatus(ctx context.Context, vol v1.Volume, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
if vol.VolumeSource.PersistentVolumeClaim == nil {
// wait for timeout
return nil
}
pvcName := vol.VolumeSource.PersistentVolumeClaim.ClaimName
pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// Do not return err, wait for timeout, since sometimes in case of statefulsets, they trigger creation of a volume
return nil
} else {
return errors.Wrapf(err, "Failed to get PVC %s", pvcName)
}
}
switch pvc.Status.Phase {
case v1.ClaimLost:
return errors.Errorf("PVC %s assoicated with pod %s has status: %s", pvcName, p.Name, v1.ClaimLost)
case v1.ClaimPending:
pvName := pvc.Spec.VolumeName
if pvName == "" {
// wait for timeout
return nil
}
pv, err := cli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// wait for timeout
return nil
} else {
return errors.Wrapf(err, "Failed to get PV %s", pvName)
}
}
if pv.Status.Phase == v1.VolumeFailed {
return errors.Errorf("PV %s associated with PVC %s has status: %s message: %s reason: %s namespace: %s", pvName, pvcName, v1.VolumeFailed, pv.Status.Message, pv.Status.Reason, namespace)
}
}
return nil
}
// WaitForPodCompletion waits for a pod to reach a terminal state, or timeout
func WaitForPodCompletion(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
attachLog := true
containerForLogs := ""
err := poll.Wait(ctx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return true, err
}
containerForLogs = p.Spec.Containers[0].Name
switch p.Status.Phase {
case v1.PodFailed:
return false, errors.Errorf("Pod %s failed. Pod status: %s", name, p.Status.String())
}
return p.Status.Phase == v1.PodSucceeded, nil
})
errorMessage := "Pod failed or did not transition into complete state"
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
// use Strategic Merge to patch default pod specs with the passed specs
func patchDefaultPodSpecs(defaultPodSpecs v1.PodSpec, override crv1alpha1.JSONMap) (v1.PodSpec, error) {
// Merge default specs and override specs with StrategicMergePatch
mergedPatch, err := strategicMergeJsonPatch(defaultPodSpecs, override)
if err != nil {
return v1.PodSpec{}, err
}
// Convert merged json to v1.PodSPec object
podSpec := v1.PodSpec{}
err = json.Unmarshal(mergedPatch, &podSpec)
if err != nil {
return podSpec, err
}
return podSpec, err
}
// CreateAndMergeJsonPatch uses Strategic Merge to merge two Pod spec configuration
func CreateAndMergeJsonPatch(original, override crv1alpha1.JSONMap) (crv1alpha1.JSONMap, error) {
// Merge json specs with StrategicMerge
mergedPatch, err := strategicMergeJsonPatch(original, override)
if err != nil {
return nil, err
}
// Convert merged json to map[string]interface{}
var merged map[string]interface{}
err = json.Unmarshal(mergedPatch, &merged)
if err != nil {
return nil, err
}
return merged, err
}
func strategicMergeJsonPatch(original, override interface{}) ([]byte, error) {
// Convert override specs to json
overrideJson, err := json.Marshal(override)
if err != nil {
return nil, err
}
// Convert original specs to json
originalJson, err := json.Marshal(original)
if err != nil {
return nil, err
}
// Merge json specs with StrategicMerge
mergedPatch, err := sp.StrategicMergePatch(originalJson, overrideJson, v1.PodSpec{})
if err != nil {
return nil, err
}
return mergedPatch, nil
}
// GetPodReadyWaitTimeout returns the pod ready wait timeout from ENV if configured
// returns the default of 15 minutes otherwise
func GetPodReadyWaitTimeout() time.Duration {
if v, ok := os.LookupEnv(PodReadyWaitTimeoutEnv); ok {
iv, err := strconv.Atoi(v)
if err == nil {
return time.Duration(iv) * time.Minute
}
log.Debug().Print("Using default timeout value because of invalid environment variable", field.M{"envVar": v})
}
return DefaultPodReadyWaitTimeout
}
| {
node, err := cli.CoreV1().Nodes().Get(context.TODO(), n[0], metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "%s %s", errAccessingNode, n[0])
}
if !IsNodeReady(node) || !IsNodeSchedulable(node) {
return errors.Errorf("Node %s is currently not ready/schedulable", n[0])
}
} | conditional_block |
pod.go | // Copyright 2019 The Kanister Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"fmt"
"io"
"os"
"sort"
"strconv"
"strings"
"time"
json "github.com/json-iterator/go"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sp "k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
crv1alpha1 "github.com/kanisterio/kanister/pkg/apis/cr/v1alpha1"
"github.com/kanisterio/kanister/pkg/consts"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
"github.com/kanisterio/kanister/pkg/poll"
)
const (
// DefaultPodReadyWaitTimeout is the time to wait for pod to be ready
DefaultPodReadyWaitTimeout = 15 * time.Minute
// PodReadyWaitTimeoutEnv is the env var to get pod ready wait timeout
PodReadyWaitTimeoutEnv = "KANISTER_POD_READY_WAIT_TIMEOUT"
errAccessingNode = "Failed to get node"
defaultContainerName = "container"
)
// PodOptions specifies options for `CreatePod`
type PodOptions struct {
Annotations map[string]string
Command []string
ContainerName string
Name string
GenerateName string
Image string
Labels map[string]string
Namespace string
ServiceAccountName string
Volumes map[string]string
BlockVolumes map[string]string
// PodSecurityContext and ContainerSecurityContext can be used to set the security context
// at the pod level and container level respectively.
// You can still use podOverride to set the pod security context, but these fields will take precedence.
// We chose these fields to specify security context instead of just using podOverride because
// the merge behaviour of the pods spec is confusing in case of podOverride, and this is more readable.
PodSecurityContext *v1.PodSecurityContext
ContainerSecurityContext *v1.SecurityContext
PodOverride crv1alpha1.JSONMap
Resources v1.ResourceRequirements
RestartPolicy v1.RestartPolicy
OwnerReferences []metav1.OwnerReference
EnvironmentVariables []v1.EnvVar
Lifecycle *v1.Lifecycle
}
func GetPodObjectFromPodOptions(cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
// If Namespace is not specified, use the controller Namespace.
cns, err := GetControllerNamespace()
if err != nil {
return nil, errors.Wrapf(err, "Failed to get controller namespace")
}
ns := opts.Namespace
if ns == "" {
ns = cns
}
// If a ServiceAccount is not specified and we are in the controller's
// namespace, use the same service account as the controller.
sa := opts.ServiceAccountName
if sa == "" && ns == cns {
sa, err = GetControllerServiceAccount(cli)
if err != nil {
return nil, errors.Wrap(err, "Failed to get Controller Service Account")
}
}
if opts.RestartPolicy == "" {
opts.RestartPolicy = v1.RestartPolicyNever
}
volumeMounts, podVolumes, err := createFilesystemModeVolumeSpecs(opts.Volumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create volume spec")
}
volumeDevices, blockVolumes, err := createBlockModeVolumeSpecs(opts.BlockVolumes)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create raw block volume spec")
}
podVolumes = append(podVolumes, blockVolumes...)
defaultSpecs := v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: opts.Image,
Command: opts.Command,
ImagePullPolicy: v1.PullPolicy(v1.PullIfNotPresent),
VolumeMounts: volumeMounts,
VolumeDevices: volumeDevices,
Resources: opts.Resources,
},
},
// RestartPolicy dictates when the containers of the pod should be
// restarted. The possible values include Always, OnFailure and Never
// with Never being the default. OnFailure policy will result in
// failed containers being restarted with an exponential back-off delay.
RestartPolicy: opts.RestartPolicy,
Volumes: podVolumes,
ServiceAccountName: sa,
}
if opts.EnvironmentVariables != nil && len(opts.EnvironmentVariables) > 0 {
defaultSpecs.Containers[0].Env = opts.EnvironmentVariables
}
// Patch default Pod Specs if needed
patchedSpecs, err := patchDefaultPodSpecs(defaultSpecs, opts.PodOverride)
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Failed to override pod specs. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
// Always put the main container the first
sort.Slice(patchedSpecs.Containers, func(i, j int) bool {
return patchedSpecs.Containers[i].Name == defaultContainerName
})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: opts.GenerateName,
Labels: map[string]string{
consts.LabelKeyCreatedBy: consts.LabelValueKanister,
},
},
Spec: patchedSpecs,
}
// Override `GenerateName` if `Name` option is provided
if opts.Name != "" {
pod.Name = opts.Name
}
// Override default container name if applicable
if opts.ContainerName != "" {
pod.Spec.Containers[0].Name = opts.ContainerName
}
// Add Annotations and Labels, if specified
if opts.Annotations != nil {
pod.ObjectMeta.Annotations = opts.Annotations
}
if pod.ObjectMeta.Labels == nil {
pod.ObjectMeta.Labels = map[string]string{}
}
if opts.OwnerReferences != nil {
pod.SetOwnerReferences(opts.OwnerReferences)
}
if opts.PodSecurityContext != nil {
pod.Spec.SecurityContext = opts.PodSecurityContext
}
if opts.ContainerSecurityContext != nil {
pod.Spec.Containers[0].SecurityContext = opts.ContainerSecurityContext
}
if opts.Lifecycle != nil {
pod.Spec.Containers[0].Lifecycle = opts.Lifecycle
}
for key, value := range opts.Labels {
pod.ObjectMeta.Labels[key] = value
}
pod.Namespace = ns
return pod, nil
}
// CreatePod creates a pod with a single container based on the specified image
func CreatePod(ctx context.Context, cli kubernetes.Interface, opts *PodOptions) (*v1.Pod, error) {
pod, err := GetPodObjectFromPodOptions(cli, opts)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get pod from podOptions. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
pod, err = cli.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrapf(err, "Failed to create pod. Namespace: %s, NameFmt: %s", opts.Namespace, opts.GenerateName)
}
return pod, nil
}
// DeletePod deletes the specified pod
func DeletePod(ctx context.Context, cli kubernetes.Interface, pod *v1.Pod) error {
if err := cli.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil {
log.WithError(err).Print("DeletePod failed")
}
return nil
}
func StreamPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (io.ReadCloser, error) {
plo := &v1.PodLogOptions{
Follow: true,
Container: containerName,
}
return cli.CoreV1().Pods(namespace).GetLogs(podName, plo).Stream(ctx)
}
// GetPodLogs fetches the logs from the given pod
func GetPodLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string) (string, error) {
reader, err := cli.CoreV1().Pods(namespace).GetLogs(podName, &v1.PodLogOptions{Container: containerName}).Stream(ctx)
if err != nil {
return "", err
}
defer reader.Close()
bytes, err := io.ReadAll(reader)
if err != nil {
return "", err
}
return string(bytes), nil
}
// getErrorFromLogs fetches logs from pod and constructs error containing last ten lines of log and specified error message
func getErrorFromLogs(ctx context.Context, cli kubernetes.Interface, namespace, podName, containerName string, err error, errorMessage string) error {
r, logErr := StreamPodLogs(ctx, cli, namespace, podName, containerName)
if logErr != nil {
return errors.Wrapf(logErr, "Failed to fetch logs from the pod")
}
defer r.Close()
// Grab last log lines and put them to an error
lt := NewLogTail(logTailDefaultLength)
// We are not interested in log extraction error
io.Copy(lt, r) // nolint: errcheck
return errors.Wrap(errors.Wrap(err, lt.ToString()), errorMessage)
}
// WaitForPodReady waits for a pod to exit the pending state
func WaitForPodReady(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
timeoutCtx, waitCancel := context.WithTimeout(ctx, GetPodReadyWaitTimeout())
defer waitCancel()
attachLog := true
containerForLogs := ""
err := poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return false, err
}
containerForLogs = p.Spec.Containers[0].Name
// check if nodes are up and available
err = checkNodesStatus(p, cli)
if err != nil && !strings.Contains(err.Error(), errAccessingNode) {
attachLog = false
return false, err
}
// check for memory or resource issues | attachLog = false
return false, errors.Errorf("Pod stuck in pending state, reason: %s", p.Status.Reason)
}
}
// check if pvc and pv are up and ready to mount
if err := getVolStatus(timeoutCtx, p, cli, namespace); err != nil {
attachLog = false
return false, err
}
return p.Status.Phase != v1.PodPending && p.Status.Phase != "", nil
})
if err == nil {
return nil
}
errorMessage := fmt.Sprintf("Pod did not transition into running state. Timeout:%v Namespace:%s, Name:%s", GetPodReadyWaitTimeout(), namespace, name)
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
func checkNodesStatus(p *v1.Pod, cli kubernetes.Interface) error {
n := strings.Split(p.Spec.NodeName, "/")
if n[0] != "" {
node, err := cli.CoreV1().Nodes().Get(context.TODO(), n[0], metav1.GetOptions{})
if err != nil {
return errors.Wrapf(err, "%s %s", errAccessingNode, n[0])
}
if !IsNodeReady(node) || !IsNodeSchedulable(node) {
return errors.Errorf("Node %s is currently not ready/schedulable", n[0])
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func getVolStatus(ctx context.Context, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
for _, vol := range p.Spec.Volumes {
if err := checkPVCAndPVStatus(ctx, vol, p, cli, namespace); err != nil {
return err
}
}
return nil
}
// checkPVCAndPVStatus does the following:
// - if PVC is present then check the status of PVC
// - if PVC is pending then check if the PV status is VolumeFailed return error if so. if not then wait for timeout.
// - if PVC not present then wait for timeout
func checkPVCAndPVStatus(ctx context.Context, vol v1.Volume, p *v1.Pod, cli kubernetes.Interface, namespace string) error {
if vol.VolumeSource.PersistentVolumeClaim == nil {
// wait for timeout
return nil
}
pvcName := vol.VolumeSource.PersistentVolumeClaim.ClaimName
pvc, err := cli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// Do not return err, wait for timeout, since sometimes in case of statefulsets, they trigger creation of a volume
return nil
} else {
return errors.Wrapf(err, "Failed to get PVC %s", pvcName)
}
}
switch pvc.Status.Phase {
case v1.ClaimLost:
return errors.Errorf("PVC %s assoicated with pod %s has status: %s", pvcName, p.Name, v1.ClaimLost)
case v1.ClaimPending:
pvName := pvc.Spec.VolumeName
if pvName == "" {
// wait for timeout
return nil
}
pv, err := cli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
// wait for timeout
return nil
} else {
return errors.Wrapf(err, "Failed to get PV %s", pvName)
}
}
if pv.Status.Phase == v1.VolumeFailed {
return errors.Errorf("PV %s associated with PVC %s has status: %s message: %s reason: %s namespace: %s", pvName, pvcName, v1.VolumeFailed, pv.Status.Message, pv.Status.Reason, namespace)
}
}
return nil
}
// WaitForPodCompletion waits for a pod to reach a terminal state, or timeout
func WaitForPodCompletion(ctx context.Context, cli kubernetes.Interface, namespace, name string) error {
attachLog := true
containerForLogs := ""
err := poll.Wait(ctx, func(ctx context.Context) (bool, error) {
p, err := cli.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
attachLog = false
return true, err
}
containerForLogs = p.Spec.Containers[0].Name
switch p.Status.Phase {
case v1.PodFailed:
return false, errors.Errorf("Pod %s failed. Pod status: %s", name, p.Status.String())
}
return p.Status.Phase == v1.PodSucceeded, nil
})
errorMessage := "Pod failed or did not transition into complete state"
if attachLog {
return getErrorFromLogs(ctx, cli, namespace, name, containerForLogs, err, errorMessage)
}
return errors.Wrap(err, errorMessage)
}
// use Strategic Merge to patch default pod specs with the passed specs
func patchDefaultPodSpecs(defaultPodSpecs v1.PodSpec, override crv1alpha1.JSONMap) (v1.PodSpec, error) {
// Merge default specs and override specs with StrategicMergePatch
mergedPatch, err := strategicMergeJsonPatch(defaultPodSpecs, override)
if err != nil {
return v1.PodSpec{}, err
}
// Convert merged json to v1.PodSPec object
podSpec := v1.PodSpec{}
err = json.Unmarshal(mergedPatch, &podSpec)
if err != nil {
return podSpec, err
}
return podSpec, err
}
// CreateAndMergeJsonPatch uses Strategic Merge to merge two Pod spec configuration
func CreateAndMergeJsonPatch(original, override crv1alpha1.JSONMap) (crv1alpha1.JSONMap, error) {
// Merge json specs with StrategicMerge
mergedPatch, err := strategicMergeJsonPatch(original, override)
if err != nil {
return nil, err
}
// Convert merged json to map[string]interface{}
var merged map[string]interface{}
err = json.Unmarshal(mergedPatch, &merged)
if err != nil {
return nil, err
}
return merged, err
}
func strategicMergeJsonPatch(original, override interface{}) ([]byte, error) {
// Convert override specs to json
overrideJson, err := json.Marshal(override)
if err != nil {
return nil, err
}
// Convert original specs to json
originalJson, err := json.Marshal(original)
if err != nil {
return nil, err
}
// Merge json specs with StrategicMerge
mergedPatch, err := sp.StrategicMergePatch(originalJson, overrideJson, v1.PodSpec{})
if err != nil {
return nil, err
}
return mergedPatch, nil
}
// GetPodReadyWaitTimeout returns the pod ready wait timeout from ENV if configured
// returns the default of 15 minutes otherwise
func GetPodReadyWaitTimeout() time.Duration {
if v, ok := os.LookupEnv(PodReadyWaitTimeoutEnv); ok {
iv, err := strconv.Atoi(v)
if err == nil {
return time.Duration(iv) * time.Minute
}
log.Debug().Print("Using default timeout value because of invalid environment variable", field.M{"envVar": v})
}
return DefaultPodReadyWaitTimeout
} | if p.Status.Phase == v1.PodPending {
if p.Status.Reason == "OutOfmemory" || p.Status.Reason == "OutOfcpu" { | random_line_split |
js_PassengerEdit.js | //对话框包含处理
function showdialog(t,f) {
jQuery("select").hide();
jQuery("#dialog").html(t);
jQuery("#dialog").dialog({
title: '提示',
bgiframe: true,
height: 180,
modal: true,
overlay: {
backgroundColor: '#000',
opacity: 0.5
},
close: function () {
jQuery("select").show();
},
buttons: {
'确定': function (evt) {
jQuery(this).dialog('close');
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
//代码
}
}
});
}
//扩展新方法
String.prototype.NewReplace=function (sourceData,replaceData) {
sourceData=sourceData.replace("(","\\(").replace(")","\\)");
var reg=new RegExp(sourceData,"ig");
var data=this.replace(reg,replaceData);
return data;
}
//padLeft(10,'0')
String.prototype.padLeft=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=char+d;
len++;
}
return d;
}
//padRight(10,'0')
String.prototype.padRight=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=d+char;
len++;
}
return d;
}
//日期已字符串形式显示
function GetStrDate(date,fg) {
var d1="";
if(fg==0) {//yyyy-MM-dd
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0'));
} else if(fg==1)//yyyy-MM-dd HH
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) {
if(istrue) {
break;
}
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
retur | var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
}
//选择旅客类型
function PasTypeChange() {
var text=jQuery(this).attr('txt');
var val=jQuery(this).val();
var opData=jQuery.trim(jQuery("#Hid_CardData").val()).split('|');
var ophtml=[];
var opArr=[];
for(var i=0;i<opData.length;i++) {
opArr=opData[i].split('@@');
if(text.indexOf('成人')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
} else if(text.indexOf('儿童')!= -1) {
if(opData[i].indexOf('身份证')!= -1||opData[i].indexOf('出生日期')!= -1||opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
} else if(text.indexOf('婴儿')!= -1) {
if(opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
}
}
jQuery("#ddlCardType").html(ophtml.join(''));
jQuery("#ddlCardType option:visible").eq(0).attr("selected",true);
CardTypeChange();
}
//选择证件类型
function CardTypeChange() {
var val=jQuery(this).val();
var text=jQuery("#ddlCardType option:selected").text();
var pasType=jQuery("input[type='radio'][name='pastype']:checked").attr("txt");
if(pasType.indexOf('成人')!= -1) {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
} else if(pasType.indexOf('儿童')!= -1) {
if(text.indexOf("出生日期")!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
} else {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
}
} else if(pasType.indexOf('婴儿')!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
}
}
//加载。。。
jQuery(function () {
//初始化航空公司和卡号数
initArr(carryArr,maxCarryNum);
var IsEdit=jQuery("#Hid_IsEdit").val();
//单击旅客类型事件
jQuery("input[type='radio'][name='pastype']").click(PasTypeChange);
jQuery("#ddlCardType").change(CardTypeChange);
if(IsEdit=="1") {
//编辑
var CpyandNo=jQuery.trim(jQuery("#Hid_CpyandNo").val());
var Arr=CpyandNo.split('|');
var num=0;
var carryCode='',Card='';
var name='carry';
for(var i=0;i<Arr.length;i++) {
var carrArr=Arr[i].split(',');
if(carrArr.length==2) {
carryCode=carrArr[0].toUpperCase();
Card=carrArr[1];
if(i>0) {
//添加
num=addGroup(null,name);
}
//赋值
jQuery("#tab_"+name+" select[id='ddlCarryCode_"+num+"'] option[value='"+carryCode+"']").attr("selected",true);
jQuery("#tab_"+name+" #txtCarryCode_"+num).val(carryCode);
jQuery("#tab_"+name+" #txtAirNo_"+num).val(Card);
}
}
var Flyer=jQuery.trim(jQuery("#Hid_Flyer").val());
if(Flyer!=null) {
var model=eval("("+Flyer+")");
jQuery("#txtUser").val(model._name);
jQuery("#txtPhone").val(model._tel);
//证件类型
jQuery("select[id='ddlCardType']").val(model._certificatetype);
//乘客类型
jQuery("input[type='radio'][name='pastype'][value="+model._flyertype+"]").attr("checked",true);
CardTypeChange();
if(jQuery("#txtCardNum").is(":visible")) {
jQuery("#txtCardNum").val(model._certificatenum);
} else {
jQuery("#txtDate").val(model._certificatenum);
}
jQuery("input[type='radio'][name='sex'][value="+model._sex+"]").attr("checked",true);
var Birthday=GetStrDate(eval("new "+model._brontime.NewReplace("/","")+""),0);
jQuery("#txtBirthday").val(Birthday);
jQuery("#txtRemark").val(model._remark);
}
}
}) | n false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
| conditional_block |
js_PassengerEdit.js | //对话框包含处理
function showdialog(t,f) {
jQuery("select").hide();
jQuery("#dialog").html(t);
jQuery("#dialog").dialog({
title: '提示',
bgiframe: true,
height: 180,
modal: true,
overlay: {
backgroundColor: '#000',
opacity: 0.5
},
close: function () {
jQuery("select").show();
},
buttons: {
'确定': function (evt) {
jQuery(this).dialog('close');
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
//代码
}
}
});
}
//扩展新方法
String.prototype.NewReplace=function (sourceData,replaceData) {
sourceData=sourceData.replace("(","\\(").replace(")","\\)");
var reg=new RegExp(sourceData,"ig");
var data=this.replace(reg,replaceData);
return data;
}
//padLeft(10,'0')
String.prototype.padLeft=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=char+d;
len++;
}
return d;
}
//padRight(10,'0')
String.prototype.padRight=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=d+char;
len++;
}
return d;
}
//日期已字符串形式显示
function GetStrDate(date,fg) {
var d1="";
if(fg==0) {//yyyy | d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0'));
} else if(fg==1)//yyyy-MM-dd HH
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) {
if(istrue) {
break;
}
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
return false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
}
//选择旅客类型
function PasTypeChange() {
var text=jQuery(this).attr('txt');
var val=jQuery(this).val();
var opData=jQuery.trim(jQuery("#Hid_CardData").val()).split('|');
var ophtml=[];
var opArr=[];
for(var i=0;i<opData.length;i++) {
opArr=opData[i].split('@@');
if(text.indexOf('成人')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
} else if(text.indexOf('儿童')!= -1) {
if(opData[i].indexOf('身份证')!= -1||opData[i].indexOf('出生日期')!= -1||opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
} else if(text.indexOf('婴儿')!= -1) {
if(opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
}
}
jQuery("#ddlCardType").html(ophtml.join(''));
jQuery("#ddlCardType option:visible").eq(0).attr("selected",true);
CardTypeChange();
}
//选择证件类型
function CardTypeChange() {
var val=jQuery(this).val();
var text=jQuery("#ddlCardType option:selected").text();
var pasType=jQuery("input[type='radio'][name='pastype']:checked").attr("txt");
if(pasType.indexOf('成人')!= -1) {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
} else if(pasType.indexOf('儿童')!= -1) {
if(text.indexOf("出生日期")!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
} else {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
}
} else if(pasType.indexOf('婴儿')!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
}
}
//加载。。。
jQuery(function () {
//初始化航空公司和卡号数
initArr(carryArr,maxCarryNum);
var IsEdit=jQuery("#Hid_IsEdit").val();
//单击旅客类型事件
jQuery("input[type='radio'][name='pastype']").click(PasTypeChange);
jQuery("#ddlCardType").change(CardTypeChange);
if(IsEdit=="1") {
//编辑
var CpyandNo=jQuery.trim(jQuery("#Hid_CpyandNo").val());
var Arr=CpyandNo.split('|');
var num=0;
var carryCode='',Card='';
var name='carry';
for(var i=0;i<Arr.length;i++) {
var carrArr=Arr[i].split(',');
if(carrArr.length==2) {
carryCode=carrArr[0].toUpperCase();
Card=carrArr[1];
if(i>0) {
//添加
num=addGroup(null,name);
}
//赋值
jQuery("#tab_"+name+" select[id='ddlCarryCode_"+num+"'] option[value='"+carryCode+"']").attr("selected",true);
jQuery("#tab_"+name+" #txtCarryCode_"+num).val(carryCode);
jQuery("#tab_"+name+" #txtAirNo_"+num).val(Card);
}
}
var Flyer=jQuery.trim(jQuery("#Hid_Flyer").val());
if(Flyer!=null) {
var model=eval("("+Flyer+")");
jQuery("#txtUser").val(model._name);
jQuery("#txtPhone").val(model._tel);
//证件类型
jQuery("select[id='ddlCardType']").val(model._certificatetype);
//乘客类型
jQuery("input[type='radio'][name='pastype'][value="+model._flyertype+"]").attr("checked",true);
CardTypeChange();
if(jQuery("#txtCardNum").is(":visible")) {
jQuery("#txtCardNum").val(model._certificatenum);
} else {
jQuery("#txtDate").val(model._certificatenum);
}
jQuery("input[type='radio'][name='sex'][value="+model._sex+"]").attr("checked",true);
var Birthday=GetStrDate(eval("new "+model._brontime.NewReplace("/","")+""),0);
jQuery("#txtBirthday").val(Birthday);
jQuery("#txtRemark").val(model._remark);
}
}
}) | -MM-dd
| identifier_name |
js_PassengerEdit.js | //对话框包含处理
function showdialog(t,f) {
jQuery("select").hide();
jQuery("#dialog").html(t);
jQuery("#dialog").dialog({
title: '提示',
bgiframe: true,
height: 180,
modal: true,
overlay: {
backgroundColor: '#000',
opacity: 0.5
},
close: function () {
jQuery("select").show();
},
buttons: {
'确定': function (evt) {
jQuery(this).dialog('close');
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
//代码
}
}
});
}
//扩展新方法
String.prototype.NewReplace=function (sourceData,replaceData) {
sourceData=sourceData.replace("(","\\(").replace(")","\\)");
var reg=new RegExp(sourceData,"ig");
var data=this.replace(reg,replaceData);
return data;
}
//padLeft(10,'0')
String.prototype.padLeft=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=char+d;
len++;
}
return d;
}
//padRight(10,'0')
String.prototype.padRight=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=d+char;
len++;
}
return d;
}
//日期已字符串形式显示
function GetStrDate(date,fg) {
var d1="";
if(fg==0) {//yyyy-MM-dd
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0'));
} else if(fg==1)//yyyy-MM-dd HH
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) { | }
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
return false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
}
//选择旅客类型
function PasTypeChange() {
var text=jQuery(this).attr('txt');
var val=jQuery(this).val();
var opData=jQuery.trim(jQuery("#Hid_CardData").val()).split('|');
var ophtml=[];
var opArr=[];
for(var i=0;i<opData.length;i++) {
opArr=opData[i].split('@@');
if(text.indexOf('成人')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
} else if(text.indexOf('儿童')!= -1) {
if(opData[i].indexOf('身份证')!= -1||opData[i].indexOf('出生日期')!= -1||opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
} else if(text.indexOf('婴儿')!= -1) {
if(opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
}
}
jQuery("#ddlCardType").html(ophtml.join(''));
jQuery("#ddlCardType option:visible").eq(0).attr("selected",true);
CardTypeChange();
}
//选择证件类型
function CardTypeChange() {
var val=jQuery(this).val();
var text=jQuery("#ddlCardType option:selected").text();
var pasType=jQuery("input[type='radio'][name='pastype']:checked").attr("txt");
if(pasType.indexOf('成人')!= -1) {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
} else if(pasType.indexOf('儿童')!= -1) {
if(text.indexOf("出生日期")!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
} else {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
}
} else if(pasType.indexOf('婴儿')!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
}
}
//加载。。。
jQuery(function () {
//初始化航空公司和卡号数
initArr(carryArr,maxCarryNum);
var IsEdit=jQuery("#Hid_IsEdit").val();
//单击旅客类型事件
jQuery("input[type='radio'][name='pastype']").click(PasTypeChange);
jQuery("#ddlCardType").change(CardTypeChange);
if(IsEdit=="1") {
//编辑
var CpyandNo=jQuery.trim(jQuery("#Hid_CpyandNo").val());
var Arr=CpyandNo.split('|');
var num=0;
var carryCode='',Card='';
var name='carry';
for(var i=0;i<Arr.length;i++) {
var carrArr=Arr[i].split(',');
if(carrArr.length==2) {
carryCode=carrArr[0].toUpperCase();
Card=carrArr[1];
if(i>0) {
//添加
num=addGroup(null,name);
}
//赋值
jQuery("#tab_"+name+" select[id='ddlCarryCode_"+num+"'] option[value='"+carryCode+"']").attr("selected",true);
jQuery("#tab_"+name+" #txtCarryCode_"+num).val(carryCode);
jQuery("#tab_"+name+" #txtAirNo_"+num).val(Card);
}
}
var Flyer=jQuery.trim(jQuery("#Hid_Flyer").val());
if(Flyer!=null) {
var model=eval("("+Flyer+")");
jQuery("#txtUser").val(model._name);
jQuery("#txtPhone").val(model._tel);
//证件类型
jQuery("select[id='ddlCardType']").val(model._certificatetype);
//乘客类型
jQuery("input[type='radio'][name='pastype'][value="+model._flyertype+"]").attr("checked",true);
CardTypeChange();
if(jQuery("#txtCardNum").is(":visible")) {
jQuery("#txtCardNum").val(model._certificatenum);
} else {
jQuery("#txtDate").val(model._certificatenum);
}
jQuery("input[type='radio'][name='sex'][value="+model._sex+"]").attr("checked",true);
var Birthday=GetStrDate(eval("new "+model._brontime.NewReplace("/","")+""),0);
jQuery("#txtBirthday").val(Birthday);
jQuery("#txtRemark").val(model._remark);
}
}
}) | if(istrue) {
break; | random_line_split |
js_PassengerEdit.js | //对话框包含处理
function showdialog(t,f) {
jQuery("select").hide();
jQuery("#dialog").html(t);
jQuery("#dialog").dialog({
title: '提示',
bgiframe: true,
height: 180,
modal: true,
overlay: {
backgroundColor: '#000',
opacity: 0.5
},
close: function () {
jQuery("select").show();
},
buttons: {
'确定': function (evt) {
jQuery(this).dialog('close');
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
//代码
}
}
});
}
//扩展新方法
String.prototype.NewReplace=function (sourceData,replaceData) {
sourceData=sourceData.replace("(","\\(").replace(")","\\)");
var reg=new RegExp(sourceData,"ig");
var data=this.replace(reg,replaceData);
return data;
}
//padLeft(10,'0')
String.prototype.padLeft=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=char+d;
len++;
}
return d;
}
//padRight(10,'0')
String.prototype.padRight=function (length,char) {
var d=this;
var len=d.length;
while(len<length) {
d=d+char;
len++;
}
return d;
}
//日期已字符串形式显示
function GetStrDate(date,fg) {
var d1="";
if(fg==0) {//yyyy-MM-dd
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0'));
} else if(fg==1)//yyyy-MM-dd HH
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0'));
}
else if(fg==2)//yyyy-MM-dd HH:mm
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0'));
} else if(fg==3)//yyyy-MM-dd HH:mm:ss
{
d1=(date.getFullYear()+"-"+(date.getMonth()+1).toString().padLeft(2,'0')+"-"+date.getDate().toString().padLeft(2,'0')+" "+date.getHours().toString().padLeft(2,'0')+":"+date.getMinutes().toString().padLeft(2,'0')+":"+date.getSeconds().toString().padLeft(2,'0'));
}
return d1;
}
function initArr(arr,num) {
for(var i=1;i<=num;i++) {
eval("var obj={_"+i+":0}");
arr.push(obj);
}
return arr;
}
//重置序号状态
function resetFg(arr,val) {
for(var i=0;i<arr.length;i++) {
for(var m in arr[i]) {
arr[i][m]=val;
}
}
}
//设置序号状态
function setFg(arr,key,val) {
for(var i=0;i<arr.length;i++) {
for(var j in arr[i]) {
if(j=="_"+key) {
arr[i][j]=val;
}
}
}
}
//获取最小没有使用的序号
function getMinFg(arr) {
var index="0";
var istrue=false;
for(var i=0;i<arr.length;i++) {
if(istrue) {
break;
}
for(var key in arr[i]) {
if(arr[i][key]=="0") {
index=key.replace("_","");
istrue=true;
break;
}
}
}
return index;
}
function ddlSetText(ddlObj,flag,num) {
var ddlVal=jQuery.trim(jQuery(ddlObj).val()).split('-')[0].toUpperCase();
jQuery("#"+flag+"_"+num).val(ddlVal);
}
function txtSetSel(txtObj,flag,num) {
var txtVal=jQuery(txtObj).val().toUpperCase();
if(txtVal!="") {
jQuery("#"+flag+"_"+num+" option[value*='"+txtVal+"']").attr("selected",true);
} else {
jQuery("#"+flag+"_"+num+" option").eq(0).attr("selected",true);
}
}
//最多可以添加航空公司和卡号数
var maxCarryNum=20;
var carryArr=[];
//添加一行
function addGroup(evt,name) {
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",true);
}
var num=0;
//模板
var trHtml=jQuery("<div></div>").append(jQuery("#tab_"+name+" tr[id='tr"+name+"_0'").clone(true)).html();
var trCnt=jQuery("#tab_"+name+" tr[id*='tr"+name+"_']").length;
if(name=="carry") {
if(trCnt>=maxCarryNum) {
showdialog("已超过最大范围,不能继续添加了!");
return false;
}
//获取可用序号从1开始
num=getMinFg(carryArr);
//标记为已使用
setFg(carryArr,num,"1");
}
//操作内容
var opDiv='<div id="'+name+'_opdiv_'+num+'"> <span class="btn btn-ok-s"><input type="button" value="添加" id="btnAdd_'+num+'" onclick="return addGroup(event,\''+name+'\')" /></span>'+
' <span class="btn btn-ok-s"><input type="button" value="删除" id="btnDel_'+num+'" onclick="return removeGroup(event,\''+name+'\','+num+')" /></span></div>';
//设置操作内容HTML
trHtml="<tr id='tr"+name+"_"+num+"'>"+jQuery("<tr>"+trHtml+"</tr>").find("td:last").html(opDiv).parent().html()+"</tr>";
//替换id
trHtml=trHtml.NewReplace("_0","_"+num).NewReplace("txtSetSel(this,'ddlCarryCode',0)","txtSetSel(this,'ddlCarryCode',"+num+")").NewReplace("ddlSetText(this,'txtCarryCode',0)","ddlSetText(this,'txtCarryCode',"+num+")");
//添加节点
jQuery("#tab_"+name).append(trHtml);
//设置初始值
jQuery("#tab_"+name+" ddlCarryCode_"+num).eq(0).attr("selected",true);
jQuery("#tab_"+name+" txtCarryCode_"+num).val("");
jQuery("#tab_"+name+" txtAirNo_"+num).val("");
if(evt!=null) {
var target=evt.srcElement?evt.srcElement:evt.target;
jQuery(target).attr("disabled",false);
}
return num;
}
//移除一行
function removeGroup(evt,name,num) {
if(num!=null) {
jQuery("#tab_"+name+" tr[id='tr"+name+"_"+num+"'").remove();
} else {
var trCount=jQuery("#tab_"+name+" tr").length;
if(trCount>1) {
//从后往前删除
var lastTr=jQuery("#tab_"+name+" tr:last");
num=lastTr.attr("id").NewReplace("tr"+name+"_","");
lastTr.remove();
} else {
showdialog("该行数据不能删除!");
return false;
}
}
//标记为没有使用
setFg(carryArr,num,"0");
return false;
}
//保存数据
function SaveData() {
var url="PassengerEdit.aspx";
var val_IsEdit=jQuery("#Hid_IsEdit").val();
var val_Name=jQuery.trim(jQuery("#txtUser").val());
var val_Phone=jQuery.trim(jQuery("#txtPhone").val());
var val_CardType=jQuery.trim(jQuery("#ddlCardType").val());
//var text_CardType=jQuery.trim(jQuery("#ddlCardType option:selected").text());
var val_CardNum=jQuery.trim(jQuery("#txtCardNum").val());
//var val_Date=jQuery.trim(jQuery("#txtDate").val());
if(jQuery("#txtDate").is(":visible")) {
val_CardNum=jQuery.trim(jQuery("#txtDate").val());
}
var val_sex=jQuery("input[type='radio'][name='sex']:checked").val();
var val_pastype=jQuery("input[type='radio'][name='pastype']:checked").val();
var val_Birthday=jQuery("#txtBirthday").val();
var val_Remark=jQuery("#txtRemark").val();
if(val_Name=="") {
showdialog("旅客姓名不能为空!");
return false;
}
if(val_Phone=="") {
showdialog("旅客手机号码不能为空!");
return false;
}
if(val_CardNum=="") {
showdialog("旅客证件号码不能为空!");
return false;
}
if(val_Birthday=="") {
showdialog("旅客出生日期不能为空!");
return false;
}
//验证航空公司卡号 暂时不验证
var val_CpyandNo="";
var msg="";
var carrNo=[];
jQuery("#tab_carry tr").each(function (index,tr) {
var carrCode=jQuery(tr).find("select[id*='ddlCarryCode_']").val();
var AirNo=jQuery.trim(jQuery(tr).find("input[id*='txtAirNo_']").val());
if(carrCode!=""&&AirNo=="") {
msg="航空公司卡号不能为空!";
return false;
}
carrNo.push(carrCode+","+AirNo);
});
if(msg!="") {
showdialog(msg);
return false;
}
if(carrNo!=null&&carrNo.length>0) {
val_CpyandNo=carrNo.join('|');
}
var param={
IsEdit: escape(val_IsEdit),
Name: escape(val_Name),
Phone: escape(val_Phone),
CardType: escape(val_CardType),
CardNum: escape(val_CardNum),
Sex: escape(val_sex),
Pastype: escape(val_pastype),
Birthday: escape(val_Birthday),
Remark: escape(val_Remark),
CpyandNo: escape(val_CpyandNo),
save: "save",
num: Math.random(),
currentuserid: jQuery("#currentuserid").val()
};
if(val_IsEdit=="1") {
//编辑
var Id=jQuery("#Hid_id").val();
param.Id=jQuery("#Hid_id").val();
}
jQuery.post(url,param,function (data) {
if(jQuery.trim(data)!="") {
var strArr=data.split('@@');
if(strArr.length==2) {
if(strArr[0]=="1") {
showdialog(strArr[1]);
} else {
showdialog(strArr[1]);
}
}
} else {
showdialog("操作失败!");
}
},"text");
return false;
}
//选择旅客类型
function PasTypeChange() {
var text=jQuery(this).attr('txt');
var val=jQuery(this).val();
var opData=jQuery.trim(jQuery("#Hid_CardData").val()).split('|');
var ophtml=[];
var opArr=[];
for(var i=0;i<opData.length;i++) {
opArr=opData[i].split('@@');
if(text.indexOf('成人')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
} else if(text.indexOf('儿童')!= -1) {
if(opData[i].indexOf('身份证')!= -1||opData[i].indexOf('出生日期')!= -1||opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
} else if(text.indexOf('婴儿')!= -1) {
if(opData[i].indexOf('其他有效证件')!= -1) {
ophtml.push('<option value="'+opArr[1]+'">'+opArr[0]+'</option>');
}
}
}
jQuery("#ddlCardType").html(ophtml.join(''));
jQuery("#ddlCardType option:visible").eq(0).attr("selected",true);
CardTypeChange();
}
//选择证件类型
function CardTypeChange() {
var val=jQuery(this).val();
var text=jQuery("#ddlCardType option:selected").text();
var pasType=jQuery("input[type='radio'][name='pastype']:checked").attr("txt");
if(pasType.indexOf('成人')!= -1) {
jQuery("#txtCardNum").show();
jQuery("#txtDate").hide();
} else if(pasType.indexOf('儿童')!= -1) {
if(text.indexOf("出生日期")!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
} else {
jQuery("#txtCardNum").show();
jQuery("#tx | ].split(',');
if(carrArr.length==2) {
carryCode=carrArr[0].toUpperCase();
Card=carrArr[1];
if(i>0) {
//添加
num=addGroup(null,name);
}
//赋值
jQuery("#tab_"+name+" select[id='ddlCarryCode_"+num+"'] option[value='"+carryCode+"']").attr("selected",true);
jQuery("#tab_"+name+" #txtCarryCode_"+num).val(carryCode);
jQuery("#tab_"+name+" #txtAirNo_"+num).val(Card);
}
}
var Flyer=jQuery.trim(jQuery("#Hid_Flyer").val());
if(Flyer!=null) {
var model=eval("("+Flyer+")");
jQuery("#txtUser").val(model._name);
jQuery("#txtPhone").val(model._tel);
//证件类型
jQuery("select[id='ddlCardType']").val(model._certificatetype);
//乘客类型
jQuery("input[type='radio'][name='pastype'][value="+model._flyertype+"]").attr("checked",true);
CardTypeChange();
if(jQuery("#txtCardNum").is(":visible")) {
jQuery("#txtCardNum").val(model._certificatenum);
} else {
jQuery("#txtDate").val(model._certificatenum);
}
jQuery("input[type='radio'][name='sex'][value="+model._sex+"]").attr("checked",true);
var Birthday=GetStrDate(eval("new "+model._brontime.NewReplace("/","")+""),0);
jQuery("#txtBirthday").val(Birthday);
jQuery("#txtRemark").val(model._remark);
}
}
}) | tDate").hide();
}
} else if(pasType.indexOf('婴儿')!= -1) {
jQuery("#txtCardNum").hide();
jQuery("#txtDate").show();
}
}
//加载。。。
jQuery(function () {
//初始化航空公司和卡号数
initArr(carryArr,maxCarryNum);
var IsEdit=jQuery("#Hid_IsEdit").val();
//单击旅客类型事件
jQuery("input[type='radio'][name='pastype']").click(PasTypeChange);
jQuery("#ddlCardType").change(CardTypeChange);
if(IsEdit=="1") {
//编辑
var CpyandNo=jQuery.trim(jQuery("#Hid_CpyandNo").val());
var Arr=CpyandNo.split('|');
var num=0;
var carryCode='',Card='';
var name='carry';
for(var i=0;i<Arr.length;i++) {
var carrArr=Arr[i | identifier_body |
translator.ts | declare const Zotero: any
declare const ZOTERO_TRANSLATOR_INFO: any
import { defaults } from '../../content/prefs-meta'
import { client } from '../../content/client'
import { ZoteroTranslator } from '../../gen/typings/serialized-item'
import type { Preferences } from '../../gen/preferences'
type TranslatorMode = 'export' | 'import'
const cacheDisabler = new class {
get(target, property) {
// collections: jabref 4 stores collection info inside the reference, and collection info depends on which part of your library you're exporting
if (['collections'].includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private | (field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection.parent = false
delete collection.parent
Zotero.debug(`BBT translator: collection with key ${collection.key} has non-existent parent ${collection.parent}, assuming root collection`)
}
}
}
this.initialized = true
}
public items(): ZoteroTranslator.Item[] {
if (!this.sortedItems) {
this.sortedItems = []
let item: ZoteroTranslator.Item
while (item = (Zotero.nextItem() as ZoteroTranslator.Item)) {
item.cachable = this.cachable
item.journalAbbreviation = item.journalAbbreviation || item.autoJournalAbbreviation
this.sortedItems.push(new Proxy(item, cacheDisabler))
}
// fallback to itemType.itemID for notes and attachments. And some items may have duplicate keys
this.sortedItems.sort((a, b) => {
const ka = [ a.citationKey || a.itemType, a.dateModified || a.dateAdded, a.itemID ].join('\t')
const kb = [ b.citationKey || b.itemType, b.dateModified || b.dateAdded, b.itemID ].join('\t')
return ka.localeCompare(kb, undefined, { sensitivity: 'base' })
})
}
return this.sortedItems
}
public nextItem() {
return (this.currentItem = this.items().shift())
}
}
| typefield | identifier_name |
translator.ts | declare const Zotero: any
declare const ZOTERO_TRANSLATOR_INFO: any
import { defaults } from '../../content/prefs-meta'
import { client } from '../../content/client'
import { ZoteroTranslator } from '../../gen/typings/serialized-item'
import type { Preferences } from '../../gen/preferences'
type TranslatorMode = 'export' | 'import'
const cacheDisabler = new class {
get(target, property) {
// collections: jabref 4 stores collection info inside the reference, and collection info depends on which part of your library you're exporting
if (['collections'].includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private typefield(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) |
this.initialized = true
}
public items(): ZoteroTranslator.Item[] {
if (!this.sortedItems) {
this.sortedItems = []
let item: ZoteroTranslator.Item
while (item = (Zotero.nextItem() as ZoteroTranslator.Item)) {
item.cachable = this.cachable
item.journalAbbreviation = item.journalAbbreviation || item.autoJournalAbbreviation
this.sortedItems.push(new Proxy(item, cacheDisabler))
}
// fallback to itemType.itemID for notes and attachments. And some items may have duplicate keys
this.sortedItems.sort((a, b) => {
const ka = [ a.citationKey || a.itemType, a.dateModified || a.dateAdded, a.itemID ].join('\t')
const kb = [ b.citationKey || b.itemType, b.dateModified || b.dateAdded, b.itemID ].join('\t')
return ka.localeCompare(kb, undefined, { sensitivity: 'base' })
})
}
return this.sortedItems
}
public nextItem() {
return (this.currentItem = this.items().shift())
}
}
| {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection.parent = false
delete collection.parent
Zotero.debug(`BBT translator: collection with key ${collection.key} has non-existent parent ${collection.parent}, assuming root collection`)
}
}
} | conditional_block |
translator.ts | declare const Zotero: any
declare const ZOTERO_TRANSLATOR_INFO: any
import { defaults } from '../../content/prefs-meta'
import { client } from '../../content/client'
import { ZoteroTranslator } from '../../gen/typings/serialized-item'
import type { Preferences } from '../../gen/preferences'
type TranslatorMode = 'export' | 'import'
const cacheDisabler = new class {
get(target, property) {
// collections: jabref 4 stores collection info inside the reference, and collection info depends on which part of your library you're exporting
if (['collections'].includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
}
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private typefield(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = { |
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection.parent = false
delete collection.parent
Zotero.debug(`BBT translator: collection with key ${collection.key} has non-existent parent ${collection.parent}, assuming root collection`)
}
}
}
this.initialized = true
}
public items(): ZoteroTranslator.Item[] {
if (!this.sortedItems) {
this.sortedItems = []
let item: ZoteroTranslator.Item
while (item = (Zotero.nextItem() as ZoteroTranslator.Item)) {
item.cachable = this.cachable
item.journalAbbreviation = item.journalAbbreviation || item.autoJournalAbbreviation
this.sortedItems.push(new Proxy(item, cacheDisabler))
}
// fallback to itemType.itemID for notes and attachments. And some items may have duplicate keys
this.sortedItems.sort((a, b) => {
const ka = [ a.citationKey || a.itemType, a.dateModified || a.dateAdded, a.itemID ].join('\t')
const kb = [ b.citationKey || b.itemType, b.dateModified || b.dateAdded, b.itemID ].join('\t')
return ka.localeCompare(kb, undefined, { sensitivity: 'base' })
})
}
return this.sortedItems
}
public nextItem() {
return (this.currentItem = this.items().shift())
}
} | caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
} | random_line_split |
translator.ts | declare const Zotero: any
declare const ZOTERO_TRANSLATOR_INFO: any
import { defaults } from '../../content/prefs-meta'
import { client } from '../../content/client'
import { ZoteroTranslator } from '../../gen/typings/serialized-item'
import type { Preferences } from '../../gen/preferences'
type TranslatorMode = 'export' | 'import'
const cacheDisabler = new class {
get(target, property) |
}
type TranslatorHeader = {
translatorID: string
translatorType: number
label: string
description: string
creator: string
target: string
minVersion: string
maxVersion: string
priority: number
inRepository: boolean
lastUpdated: string
browserSupport: string
displayOptions: {
exportNotes: boolean
exportFileData: boolean
useJournalAbbreviation: boolean
keepUpdated: boolean
quickCopyMode: string
Title: boolean
Authors: boolean
Year: boolean
Normalize: boolean
}
configOptions: {
getCollections: boolean
async: boolean
}
}
export const Translator = new class implements ITranslator { // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public preferences: Preferences
public skipFields: string[]
public skipField: Record<string, boolean>
public verbatimFields?: string[]
public csquotes: { open: string, close: string }
public export: { dir: string, path: string } = {
dir: undefined,
path: undefined,
}
public options: {
quickCopyMode?: string
dropAttachments?: boolean
exportNotes?: boolean
exportFileData?: boolean
useJournalAbbreviation?: boolean
keepUpdated?: boolean
Title?: boolean
Authors?: boolean
Year?: boolean
Normalize?: boolean
}
public BetterBibLaTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeX?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterTeX: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSLYAML?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterCSL?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXCitationKeyQuickCopy?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public BetterBibTeXJSON?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Citationgraph?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
public Collectednotes?: boolean // eslint-disable-line @typescript-eslint/naming-convention,no-underscore-dangle,id-blacklist,id-match
// public TeX: boolean
// public CSL: boolean
private cachable: boolean
public cache: {
hits: number
misses: number
}
public header: TranslatorHeader
public collections: Record<string, ZoteroTranslator.Collection>
private sortedItems: ZoteroTranslator.Item[]
private currentItem: ZoteroTranslator.Item
public isJurisM: boolean
public isZotero: boolean
public unicode: boolean
public platform: string
public paths: {
caseSensitive: boolean
sep: string
}
public stringCompare: (a: string, b: string) => number
public initialized = false
constructor() {
this.header = (ZOTERO_TRANSLATOR_INFO as TranslatorHeader)
this[this.header.label.replace(/[^a-z]/ig, '')] = true
this.BetterTeX = this.BetterBibTeX || this.BetterBibLaTeX
this.BetterCSL = this.BetterCSLJSON || this.BetterCSLYAML
this.preferences = defaults
this.options = this.header.displayOptions || {}
const collator = new Intl.Collator('en')
this.stringCompare = (collator.compare.bind(collator) as (left: string, right: string) => number)
}
public get exportDir(): string {
this.currentItem.cachable = false
return this.export.dir
}
public get exportPath(): string {
this.currentItem.cachable = false
return this.export.path
}
private typefield(field: string): string {
field = field.trim()
if (field.startsWith('bibtex.')) return this.BetterBibTeX ? field.replace(/^bibtex\./, '') : ''
if (field.startsWith('biblatex.')) return this.BetterBibLaTeX ? field.replace(/^biblatex\./, '') : ''
return field
}
public init(mode: TranslatorMode) {
this.platform = (Zotero.getHiddenPref('better-bibtex.platform') as string)
this.isJurisM = client === 'jurism'
this.isZotero = !this.isJurisM
this.paths = {
caseSensitive: this.platform !== 'mac' && this.platform !== 'win',
sep: this.platform === 'win' ? '\\' : '/',
}
for (const key in this.options) {
if (typeof this.options[key] === 'boolean') {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = !!Zotero.getOption(key)
}
else {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.options[key] = Zotero.getOption(key)
}
}
// special handling
if (mode === 'export') {
this.cache = {
hits: 0,
misses: 0,
}
this.export = {
dir: (Zotero.getOption('exportDir') as string),
path: (Zotero.getOption('exportPath') as string),
}
if (this.export.dir && this.export.dir.endsWith(this.paths.sep)) this.export.dir = this.export.dir.slice(0, -1)
}
for (const pref of Object.keys(this.preferences)) {
let value
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
value = Zotero.getOption(`preference_${pref}`)
}
catch (err) {
value = undefined
}
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
if (typeof value === 'undefined') value = Zotero.getHiddenPref(`better-bibtex.${pref}`)
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
this.preferences[pref] = value
}
// special handling
this.skipFields = this.preferences.skipFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
this.skipField = this.skipFields.reduce((acc, field) => { acc[field] = true; return acc }, {})
this.verbatimFields = this.preferences.verbatimFields.toLowerCase().split(',').map(field => this.typefield(field)).filter((s: string) => s)
if (!this.verbatimFields.length) this.verbatimFields = null
this.csquotes = this.preferences.csquotes ? { open: this.preferences.csquotes[0], close: this.preferences.csquotes[1] } : null
this.preferences.testing = (Zotero.getHiddenPref('better-bibtex.testing') as boolean)
if (mode === 'export') {
this.unicode = (this.BetterBibTeX && !Translator.preferences.asciiBibTeX) || (this.BetterBibLaTeX && !Translator.preferences.asciiBibLaTeX)
// when exporting file data you get relative paths, when not, you get absolute paths, only one version can go into the cache
// relative file paths are going to be different based on the file being exported to
this.cachable = !(this.options.exportFileData || this.preferences.relativeFilePaths)
}
this.collections = {}
if (mode === 'export' && this.header.configOptions?.getCollections && Zotero.nextCollection) {
let collection: any
while (collection = Zotero.nextCollection()) {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const children = collection.children || collection.descendents || []
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const key = (collection.primary ? collection.primary : collection).key
this.collections[key] = {
// id: collection.id,
key,
parent: collection.fields.parentKey,
name: collection.name,
items: collection.childItems,
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
collections: children.filter(coll => coll.type === 'collection').map(coll => coll.key),
// items: (item.itemID for item in children when item.type != 'collection')
// descendents: undefined
// children: undefined
// childCollections: undefined
// primary: undefined
// fields: undefined
// type: undefined
// level: undefined
}
}
for (collection of Object.values(this.collections)) {
if (collection.parent && !this.collections[collection.parent]) {
// collection.parent = false
delete collection.parent
Zotero.debug(`BBT translator: collection with key ${collection.key} has non-existent parent ${collection.parent}, assuming root collection`)
}
}
}
this.initialized = true
}
public items(): ZoteroTranslator.Item[] {
if (!this.sortedItems) {
this.sortedItems = []
let item: ZoteroTranslator.Item
while (item = (Zotero.nextItem() as ZoteroTranslator.Item)) {
item.cachable = this.cachable
item.journalAbbreviation = item.journalAbbreviation || item.autoJournalAbbreviation
this.sortedItems.push(new Proxy(item, cacheDisabler))
}
// fallback to itemType.itemID for notes and attachments. And some items may have duplicate keys
this.sortedItems.sort((a, b) => {
const ka = [ a.citationKey || a.itemType, a.dateModified || a.dateAdded, a.itemID ].join('\t')
const kb = [ b.citationKey || b.itemType, b.dateModified || b.dateAdded, b.itemID ].join('\t')
return ka.localeCompare(kb, undefined, { sensitivity: 'base' })
})
}
return this.sortedItems
}
public nextItem() {
return (this.currentItem = this.items().shift())
}
}
| {
// collections: jabref 4 stores collection info inside the reference, and collection info depends on which part of your library you're exporting
if (['collections'].includes(property)) target.cachable = false
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
return target[property]
} | identifier_body |
dposhandler.go | // Copyright 2018 The go-infinet Authors
// This file is part of the go-infinet library.
//
// The go-infinet library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-infinet library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-infinet library. If not, see <http://www.gnu.org/licenses/>.
package protocol
import (
"time"
"strconv"
"sync"
"github.com/juchain/go-juchain/common"
"github.com/juchain/go-juchain/common/rlp"
"github.com/juchain/go-juchain/common/crypto/sha3"
"github.com/juchain/go-juchain/common/log"
"github.com/juchain/go-juchain/core"
"github.com/juchain/go-juchain/vm/solc/abi"
"github.com/juchain/go-juchain/p2p/protocol/downloader"
"github.com/juchain/go-juchain/p2p"
"github.com/juchain/go-juchain/p2p/discover"
"github.com/juchain/go-juchain/p2p/node"
"github.com/juchain/go-juchain/consensus"
"github.com/juchain/go-juchain/consensus/dpos"
"github.com/juchain/go-juchain/config"
"github.com/juchain/go-juchain/vm/solc"
"github.com/juchain/go-juchain/core/types"
"github.com/juchain/go-juchain/rpc"
"fmt"
"reflect"
"math/big"
"context"
"math"
"strings"
"errors"
)
// DPoS consensus handler of delegator packaging process.
// only 31 delegators voted, then this process will be started.
/**
Sample code:
for round i
dlist_i = get N delegates sort by votes
dlist_i = mixorder(dlist_i)
loop
slot = global_time_offset / block_interval
pos = slot % N
if dlist_i[pos] exists in this node
generateBlock(keypair of dlist_i[pos])
else
skip
*/
var (
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
TotalDelegatorNumber uint8 = 31; // we make 31 candidates as the best group for packaging.
SmallPeriodInterval uint32 = 5; // small period for packaging node in every 5 seconds.
GigPeriodInterval uint32 = uint32(TotalDelegatorNumber) * 5; // create a big period for all delegated nodes in every 155 seconds.
BigPeriodHistorySize uint8 = 10; // keep 10 records for the confirmation of delayed block
GigPeriodHistory = make([]GigPeriodTable, 0); // <GigPeriodTable>
GigPeriodInstance *GigPeriodTable; // we use two versions of election info for switching delegated nodes smoothly.
NextGigPeriodInstance *GigPeriodTable;
VotingAccessor DelegatorAccessor; // responsible for access voting data.
DelegatorsTable []string; // only for all delegated node ids. the table will receive from a voting contract.
DelegatorNodeInfo []*discover.Node; // all delegated peers. = make([]*discover.Node, 0, len(urls))
)
// Delegator table refers to the voting contract.
type DelegatorAccessor interface {
Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error)
}
// only for test purpose.
type DelegatorAccessorTestImpl struct {
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
}
func (d *DelegatorAccessorTestImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
return []string{d.currNodeId}, []*discover.Node{}, nil
}
// access production contract.
type DelegatorAccessorImpl struct {
blockchain *core.BlockChain;
b p2p.Backend;
dappabi abi.ABI;
}
type DelegatedNodeInfoMapping struct {
ip string
port uint
ticket uint64
}
// https://solidity.readthedocs.io/en/develop/abi-spec.html#use-of-dynamic-types
// The first four bytes of the call data for a function call specifies the function to be called.
// It is the first (left, high-order in big-endian) four bytes of the Keccak (SHA-3) hash of the signature of the function.
// The signature is defined as the canonical expression of the basic prototype, i.e.
// the function name with the parenthesised list of parameter types. Parameter types are split by a single comma
// no spaces are used. for example: bytes4(sha3("set(uint256[])"))
// "0xb4701401": "birusu()",
// "0x1ab88d26": "delegatorInfo(string)",
// "0x61b29d69": "delegatorList()",
// https://solidity.readthedocs.io/en/develop/abi-spec.html#examples
// please also refer to abi_test.go
// hw.Sum(data[:0])
func (d *DelegatorAccessorImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
// call delegatorList()
data, err0 := d.dappabi.Pack("delegatorList")
if err0 != nil {
log.Error("Error to encode delegatorList function call.")
return nil,nil, errors.New("Error to encode delegatorList function call.")
}
//var data = common.Hex2Bytes("0x61b29d69")
var result string;
output, err0 := d.doCall(data);
if err0 != nil {
log.Error("Error to call delegatorList function.")
return nil,nil, errors.New("Error to call delegatorList function.")
}
if len(output) == 0 {
// no result
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
err0 = d.dappabi.Unpack(&result, "result", output)
if err0 != nil {
log.Error("Error to parse the result of delegatorList function.")
return nil,nil, errors.New("Error to parse the result of delegatorList function.")
}
if len(result) == 0 {
log.Error("Delegator list must not be empty! the state of this node is incorrect.")
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
delegatorIds := strings.Split(result, ";")
ids := make([]string, len(delegatorIds))
peerinfo := make([]*discover.Node, len(delegatorIds))
for i,delegatorId := range delegatorIds {
// call delegatorInfo(string) 0x6162630000000000000000000000000000000000000000000000000000000000
data1, err0 := d.dappabi.Pack("delegatorInfo", delegatorId)
if err0 != nil {
log.Error("Error to parse delegatorInfo function.")
return nil,nil, errors.New("Error to parse delegatorInfo function.")
}
output1, err0 := d.doCall(data1)
if err0 != nil {
log.Error("Error to call delegatorInfo function.")
return nil,nil, errors.New("Error to call delegatorInfo function.")
}
var result DelegatedNodeInfoMapping
//string ip, uint port, uint256 ticket
err0 = d.dappabi.Unpack(&result, "result", output1)
if err0 != nil {
log.Error("Error to parse the result of delegatorInfo function.")
return nil,nil, errors.New("Error to parse the result of delegatorInfo function.")
}
ids[i] = delegatorId
peerinfo[i] = &discover.Node{}
}
return ids, peerinfo, nil;
}
func (d *DelegatorAccessorImpl) doCall(data []byte) ([]byte, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
ctx := context.Background()
state, header, err := d.b.StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber)
if state == nil || err != nil {
return nil, err
}
// Set sender address or use a default if none specified
addr := common.Address{};
if !TestMode {
if wallets := d.b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
}
// Set default gas & gas price if none were set
defaultGasPrice := uint64(50 * config.Shannon)
gas, gasPrice := uint64(math.MaxUint64 / 2), new(big.Int).SetUint64(defaultGasPrice)
// Create new call message
msg := types.NewMessage(addr, &core.DPOSBallotContractAddress, 0, new(big.Int), gas, gasPrice, data, false)
// Setup context so it may be cancelled the call has completed.
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
// Get a new instance of the EVM.
evm, vmError, err := d.b.GetEVM(ctx, msg, state, header, vm.Config{})
if err != nil {
return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
<-ctx.Done()
evm.Cancel()
}()
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
res, gas, _, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return res, err
}
type DPoSProtocolManager struct {
networkId uint64;
eth *JuchainService;
ethManager *ProtocolManager;
blockchain *core.BlockChain;
lock *sync.Mutex; // protects running
packager *dpos.Packager;
t1 *time.Timer; // global synchronized timer.
}
// NewProtocolManager returns a new obod sub protocol manager. The JuchainService sub protocol manages peers capable
// with the obod network.
func NewDPoSProtocolManager(eth *JuchainService, ethManager *ProtocolManager, config *config.ChainConfig, config2 *node.Config,
mode downloader.SyncMode, networkId uint64, blockchain *core.BlockChain, engine consensus.Engine) (*DPoSProtocolManager, error) {
// Set sender address or use a default if none specified
// Create the protocol manager with the base fields
manager := &DPoSProtocolManager{
networkId: networkId,
eth: eth,
ethManager: ethManager,
blockchain: blockchain,
lock: &sync.Mutex{},
packager: dpos.NewPackager(config, engine, DefaultConfig.Etherbase, eth, eth.EventMux()),
}
currNodeId = discover.PubkeyID(&config2.NodeKey().PublicKey).TerminalString();
currNodeIdHash = common.Hex2Bytes(currNodeId);
if TestMode {
VotingAccessor = &DelegatorAccessorTestImpl{currNodeId:currNodeId, currNodeIdHash:currNodeIdHash};
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh();
} else {
/**
var addr common.Address;
if wallets := eth.ApiBackend.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
if addr == (common.Address{}) {
log.Error("We must have a default address to activate dpos delegator consensus")
return nil, errors.New("we must have a default address to activate dpos delegator consensus")
}*/
dappabi, err := abi.JSON(strings.NewReader(core.DPOSBallotABI))
if err != nil {
log.Error("Unable to load DPoS Ballot ABI object!")
return nil, errors.New("Unable to load DPoS Ballot ABI object!")
}
VotingAccessor = &DelegatorAccessorImpl{dappabi: dappabi, blockchain: eth.blockchain, b: eth.ApiBackend};
DelegatorsTable, DelegatorNodeInfo, err = VotingAccessor.Refresh();
}
return manager, nil;
}
func (pm *DPoSProtocolManager) Start() {
if pm.isDelegatedNode() {
log.Info("I am a delegator.")
pm.packager.Start();
go pm.schedule();
if !TestMode {
time.AfterFunc(time.Second*time.Duration(SmallPeriodInterval), pm.syncDelegatedNodeSafely) //initial attempt.
}
}
}
func (pm *DPoSProtocolManager) schedule() {
t2 := time.NewTimer(time.Second * time.Duration(1))
for {
select {
case <-t2.C:
go pm.roundRobinSafely();
t2 = time.NewTimer(time.Second * time.Duration(1))
}
}
}
// this is a loop function for electing node.
func (pm *DPoSProtocolManager) syncDelegatedNodeSafely() {
if !pm.isDelegatedNode() {
// only candidate node is able to participant to this process.
return;
}
pm.lock.Lock()
defer pm.lock.Unlock()
log.Info("Preparing for next big period...");
// pull the newest delegators from voting contract.
a, b, err0 := VotingAccessor.Refresh()
if err0 != nil {
log.Error(err0.Error())
return;
}
DelegatorsTable = a
DelegatorNodeInfo = b
if uint8(len(GigPeriodHistory)) >= BigPeriodHistorySize {
GigPeriodHistory = GigPeriodHistory[1:] //remove the first old one.
}
if len(DelegatorsTable) == 0 || pm.ethManager.peers.Len() == 0 {
log.Info("Sorry, could not detect any delegator!");
return;
}
round := uint64(1)
activeTime := uint64(time.Now().Unix() + int64(GigPeriodInterval))
if NextGigPeriodInstance != nil {
if !TestMode {
gap := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if gap > 2 || gap < -2 {
log.Warn(fmt.Sprintf("Scheduling of the new electing round is improper! current gap: %v seconds", gap))
//restart the scheduler
NextElectionInfo = nil;
go pm.syncDelegatedNodeSafely();
return;
}
}
round = NextGigPeriodInstance.round + 1
activeTime = GigPeriodInstance.activeTime + uint64(GigPeriodInterval)
// keep the big period history for block validation.
GigPeriodHistory[len(GigPeriodHistory)-1] = *NextGigPeriodInstance;
GigPeriodInstance = &GigPeriodTable{
NextGigPeriodInstance.round,
NextGigPeriodInstance.state,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
NextGigPeriodInstance.confirmedTickets,
NextGigPeriodInstance.confirmedBestNode,
NextGigPeriodInstance.activeTime,
};
log.Info(fmt.Sprintf("Switched the new big period round. %d ", GigPeriodInstance.round));
}
// make sure all delegators are synced at this round.
NextGigPeriodInstance = &GigPeriodTable{
round,
STATE_LOOKING,
DelegatorsTable,
SignCandidates(DelegatorsTable),
make(map[string]uint32),
make(map[string]*GigPeriodTable),
activeTime,
};
pm.trySyncAllDelegators()
}
func (pm *DPoSProtocolManager) trySyncAllDelegators() {
if TestMode {
return;
}
//send this round to all delegated peers.
//all delegated must giving the response in SYNC_BIGPERIOD_RESPONSE state.
for _, delegator := range NextGigPeriodInstance.delegatedNodes {
// make sure all delegator are alive.
if pm.ethManager.peers.Peer(delegator) == nil {
// try to add DelegatorNodeInfo[i] into peers table.
// but can't talk to it directly.
for i,e := range DelegatorsTable {
if e == delegator {
pm.eth.server.AddPeer(DelegatorNodeInfo[i]);
break;
}
}
} else {
err := pm.ethManager.peers.Peer(delegator).SendSyncBigPeriodRequest(
&SyncBigPeriodRequest{NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
currNodeIdHash});
if err != nil {
log.Debug("Error occurred while sending SyncBigPeriodRequest: " + err.Error())
}
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++; | STATE_CONFIRMED,
response.DelegatedTable,
response.DelegatedTableSign,
nil,
nil,
response.ActiveTime,
};
maxTickets, bestNodeId := uint32(0), "";
for key, value := range NextGigPeriodInstance.confirmedTickets {
if maxTickets < value {
maxTickets = value;
bestNodeId = key;
}
}
if NextGigPeriodInstance.state == STATE_CONFIRMED {
// set the best node as the final state.
bestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];
NextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;
NextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;
NextGigPeriodInstance.activeTime = bestNode.activeTime;
log.Debug(fmt.Sprintf("Updated the best table: %v", bestNode.delegatedNodes));
pm.setNextRoundTimer();
} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = response.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;
NextGigPeriodInstance.activeTime = response.ActiveTime;
pm.setNextRoundTimer();
} else if response.State == STATE_MISMATCHED_ROUND {
// force to create new round
NextGigPeriodInstance = &GigPeriodTable{
response.Round,
STATE_LOOKING,
response.DelegatedTable,
response.DelegatedTableSign,
make(map[string]uint32),
make(map[string]*GigPeriodTable),
response.ActiveTime,
};
pm.trySyncAllDelegators()
} else if response.State == STATE_MISMATCHED_DNUMBER {
// refresh table only, and this node loses the election power of this round.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
return nil;
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
func (pm *DPoSProtocolManager) setNextRoundTimer() {
leftTime := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if leftTime < 1 {
log.Warn("Discard this round due to the expiration of the active time.")
go pm.syncDelegatedNodeSafely()
return;
}
if pm.t1 != nil {
// potentially could be an issue if the timer is unable to be cancelled.
pm.t1.Stop()
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
} else {
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
}
log.Debug(fmt.Sprintf("scheduled for next round in %v seconds", leftTime))
}
// the node would not be a candidate if it is not qualified.
func (pm *DPoSProtocolManager) isDelegatedNode() bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == currNodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) isDelegatedNode2(nodeId string) bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == nodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) Stop() {
if pm.isDelegatedNode() {
pm.packager.Stop();
}
// Quit the sync loop.
log.Info("DPoS Consensus stopped")
}
func (pm *DPoSProtocolManager) newPeer(pv uint, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, p, newMeteredMsgWriter(rw))
}
// --------------------Packaging Process-------------------//
// start round robin for packaging blocks in small period.
func (self *DPoSProtocolManager) roundRobinSafely() {
if !self.isDelegatedNode() || GigPeriodInstance == nil {
return;
}
log.Info(GigPeriodInstance.whosTurn())
// generate block by election node.
if GigPeriodInstance.isMyTurn() {
log.Debug("it's my turn now " + time.Now().String());
round := self.blockchain.CurrentFastBlock().Header().Round;
block := self.packager.GenerateNewBlock(round+1, currNodeId);
block.ToString();
//response := &PackageResponse{block.Round(), currNodeId, block.Hash(),DPOSMSG_SUCCESS};
}
}
// this GigPeriodTable only serves for delegators.
type GigPeriodTable struct {
round uint64; // synchronization round
state uint8; // STATE_LOOKING
delegatedNodes []string; // all 31 nodes id
delegatedNodesSign common.Hash; // a security sign for all delegated nodes which can be verified from node array.
confirmedTickets map[string]uint32; // 31 node must be confirmed this ticket or must equal to delegatedNodes length.
confirmedBestNode map[string]*GigPeriodTable; // confirmed the next active time from all peers. <nodeid><GigPeriodTable>
activeTime uint64; // Unix timestamp for all nodes.
}
func (t *GigPeriodTable) wasHisTurn(round uint64, nodeId string, minedTime int64) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= minedTime && (beatStartTime+ int64(SmallPeriodInterval)) >= minedTime {
return true;
}
}
}
// check the history.
if len(GigPeriodHistory) > 0 {
for _, v := range GigPeriodHistory {
if int64(v.activeTime) <= minedTime && (int64(v.activeTime) + int64(SmallPeriodInterval)) >= minedTime {
for i :=0; i < len(v.delegatedNodes); i++ {
if v.delegatedNodes[i] == nodeId {
//todo check round as well.
return true;
}
}
}
}
}
return false;
}
func (t *GigPeriodTable) isMyTurn() bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == currNodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
currTime := time.Now().Unix()
// we only give 4s to avoid the mismatched timestamp issue of last packaging.
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) > currTime {
return true;
}
}
}
return false;
}
func (t *GigPeriodTable) whosTurn() string {
currTime := time.Now().Unix()
for i :=0; i < len(t.delegatedNodes); i++ {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) >= currTime {
return "Who's turn: {position: " + strconv.Itoa(i) + ", delegator: " + t.delegatedNodes[i] + " }";
}
}
return "";
}
func (t *GigPeriodTable) isDelegatedNode(nodeId string) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
return true;
}
}
return false;
}
func RemoveCanditate(s []string, i int) []string {
s[len(s)-1], s[i] = s[i], s[len(s)-1]
return s[:len(s)-1]
}
func SignCandidates(candidates []string) common.Hash {
var signCandidates = []byte{}
hw := sha3.NewKeccak256()
rlp.Encode(hw, candidates)
return common.BytesToHash(hw.Sum(signCandidates))
} | NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round, | random_line_split |
dposhandler.go | // Copyright 2018 The go-infinet Authors
// This file is part of the go-infinet library.
//
// The go-infinet library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-infinet library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-infinet library. If not, see <http://www.gnu.org/licenses/>.
package protocol
import (
"time"
"strconv"
"sync"
"github.com/juchain/go-juchain/common"
"github.com/juchain/go-juchain/common/rlp"
"github.com/juchain/go-juchain/common/crypto/sha3"
"github.com/juchain/go-juchain/common/log"
"github.com/juchain/go-juchain/core"
"github.com/juchain/go-juchain/vm/solc/abi"
"github.com/juchain/go-juchain/p2p/protocol/downloader"
"github.com/juchain/go-juchain/p2p"
"github.com/juchain/go-juchain/p2p/discover"
"github.com/juchain/go-juchain/p2p/node"
"github.com/juchain/go-juchain/consensus"
"github.com/juchain/go-juchain/consensus/dpos"
"github.com/juchain/go-juchain/config"
"github.com/juchain/go-juchain/vm/solc"
"github.com/juchain/go-juchain/core/types"
"github.com/juchain/go-juchain/rpc"
"fmt"
"reflect"
"math/big"
"context"
"math"
"strings"
"errors"
)
// DPoS consensus handler of delegator packaging process.
// only 31 delegators voted, then this process will be started.
/**
Sample code:
for round i
dlist_i = get N delegates sort by votes
dlist_i = mixorder(dlist_i)
loop
slot = global_time_offset / block_interval
pos = slot % N
if dlist_i[pos] exists in this node
generateBlock(keypair of dlist_i[pos])
else
skip
*/
var (
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
TotalDelegatorNumber uint8 = 31; // we make 31 candidates as the best group for packaging.
SmallPeriodInterval uint32 = 5; // small period for packaging node in every 5 seconds.
GigPeriodInterval uint32 = uint32(TotalDelegatorNumber) * 5; // create a big period for all delegated nodes in every 155 seconds.
BigPeriodHistorySize uint8 = 10; // keep 10 records for the confirmation of delayed block
GigPeriodHistory = make([]GigPeriodTable, 0); // <GigPeriodTable>
GigPeriodInstance *GigPeriodTable; // we use two versions of election info for switching delegated nodes smoothly.
NextGigPeriodInstance *GigPeriodTable;
VotingAccessor DelegatorAccessor; // responsible for access voting data.
DelegatorsTable []string; // only for all delegated node ids. the table will receive from a voting contract.
DelegatorNodeInfo []*discover.Node; // all delegated peers. = make([]*discover.Node, 0, len(urls))
)
// Delegator table refers to the voting contract.
type DelegatorAccessor interface {
Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error)
}
// only for test purpose.
type DelegatorAccessorTestImpl struct {
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
}
func (d *DelegatorAccessorTestImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
return []string{d.currNodeId}, []*discover.Node{}, nil
}
// access production contract.
type DelegatorAccessorImpl struct {
blockchain *core.BlockChain;
b p2p.Backend;
dappabi abi.ABI;
}
type DelegatedNodeInfoMapping struct {
ip string
port uint
ticket uint64
}
// https://solidity.readthedocs.io/en/develop/abi-spec.html#use-of-dynamic-types
// The first four bytes of the call data for a function call specifies the function to be called.
// It is the first (left, high-order in big-endian) four bytes of the Keccak (SHA-3) hash of the signature of the function.
// The signature is defined as the canonical expression of the basic prototype, i.e.
// the function name with the parenthesised list of parameter types. Parameter types are split by a single comma
// no spaces are used. for example: bytes4(sha3("set(uint256[])"))
// "0xb4701401": "birusu()",
// "0x1ab88d26": "delegatorInfo(string)",
// "0x61b29d69": "delegatorList()",
// https://solidity.readthedocs.io/en/develop/abi-spec.html#examples
// please also refer to abi_test.go
// hw.Sum(data[:0])
func (d *DelegatorAccessorImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
// call delegatorList()
data, err0 := d.dappabi.Pack("delegatorList")
if err0 != nil {
log.Error("Error to encode delegatorList function call.")
return nil,nil, errors.New("Error to encode delegatorList function call.")
}
//var data = common.Hex2Bytes("0x61b29d69")
var result string;
output, err0 := d.doCall(data);
if err0 != nil {
log.Error("Error to call delegatorList function.")
return nil,nil, errors.New("Error to call delegatorList function.")
}
if len(output) == 0 {
// no result
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
err0 = d.dappabi.Unpack(&result, "result", output)
if err0 != nil {
log.Error("Error to parse the result of delegatorList function.")
return nil,nil, errors.New("Error to parse the result of delegatorList function.")
}
if len(result) == 0 {
log.Error("Delegator list must not be empty! the state of this node is incorrect.")
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
delegatorIds := strings.Split(result, ";")
ids := make([]string, len(delegatorIds))
peerinfo := make([]*discover.Node, len(delegatorIds))
for i,delegatorId := range delegatorIds {
// call delegatorInfo(string) 0x6162630000000000000000000000000000000000000000000000000000000000
data1, err0 := d.dappabi.Pack("delegatorInfo", delegatorId)
if err0 != nil {
log.Error("Error to parse delegatorInfo function.")
return nil,nil, errors.New("Error to parse delegatorInfo function.")
}
output1, err0 := d.doCall(data1)
if err0 != nil {
log.Error("Error to call delegatorInfo function.")
return nil,nil, errors.New("Error to call delegatorInfo function.")
}
var result DelegatedNodeInfoMapping
//string ip, uint port, uint256 ticket
err0 = d.dappabi.Unpack(&result, "result", output1)
if err0 != nil {
log.Error("Error to parse the result of delegatorInfo function.")
return nil,nil, errors.New("Error to parse the result of delegatorInfo function.")
}
ids[i] = delegatorId
peerinfo[i] = &discover.Node{}
}
return ids, peerinfo, nil;
}
func (d *DelegatorAccessorImpl) doCall(data []byte) ([]byte, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
ctx := context.Background()
state, header, err := d.b.StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber)
if state == nil || err != nil {
return nil, err
}
// Set sender address or use a default if none specified
addr := common.Address{};
if !TestMode {
if wallets := d.b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
}
// Set default gas & gas price if none were set
defaultGasPrice := uint64(50 * config.Shannon)
gas, gasPrice := uint64(math.MaxUint64 / 2), new(big.Int).SetUint64(defaultGasPrice)
// Create new call message
msg := types.NewMessage(addr, &core.DPOSBallotContractAddress, 0, new(big.Int), gas, gasPrice, data, false)
// Setup context so it may be cancelled the call has completed.
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
// Get a new instance of the EVM.
evm, vmError, err := d.b.GetEVM(ctx, msg, state, header, vm.Config{})
if err != nil {
return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
<-ctx.Done()
evm.Cancel()
}()
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
res, gas, _, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return res, err
}
type DPoSProtocolManager struct {
networkId uint64;
eth *JuchainService;
ethManager *ProtocolManager;
blockchain *core.BlockChain;
lock *sync.Mutex; // protects running
packager *dpos.Packager;
t1 *time.Timer; // global synchronized timer.
}
// NewProtocolManager returns a new obod sub protocol manager. The JuchainService sub protocol manages peers capable
// with the obod network.
func NewDPoSProtocolManager(eth *JuchainService, ethManager *ProtocolManager, config *config.ChainConfig, config2 *node.Config,
mode downloader.SyncMode, networkId uint64, blockchain *core.BlockChain, engine consensus.Engine) (*DPoSProtocolManager, error) {
// Set sender address or use a default if none specified
// Create the protocol manager with the base fields
manager := &DPoSProtocolManager{
networkId: networkId,
eth: eth,
ethManager: ethManager,
blockchain: blockchain,
lock: &sync.Mutex{},
packager: dpos.NewPackager(config, engine, DefaultConfig.Etherbase, eth, eth.EventMux()),
}
currNodeId = discover.PubkeyID(&config2.NodeKey().PublicKey).TerminalString();
currNodeIdHash = common.Hex2Bytes(currNodeId);
if TestMode {
VotingAccessor = &DelegatorAccessorTestImpl{currNodeId:currNodeId, currNodeIdHash:currNodeIdHash};
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh();
} else {
/**
var addr common.Address;
if wallets := eth.ApiBackend.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
if addr == (common.Address{}) {
log.Error("We must have a default address to activate dpos delegator consensus")
return nil, errors.New("we must have a default address to activate dpos delegator consensus")
}*/
dappabi, err := abi.JSON(strings.NewReader(core.DPOSBallotABI))
if err != nil {
log.Error("Unable to load DPoS Ballot ABI object!")
return nil, errors.New("Unable to load DPoS Ballot ABI object!")
}
VotingAccessor = &DelegatorAccessorImpl{dappabi: dappabi, blockchain: eth.blockchain, b: eth.ApiBackend};
DelegatorsTable, DelegatorNodeInfo, err = VotingAccessor.Refresh();
}
return manager, nil;
}
func (pm *DPoSProtocolManager) Start() {
if pm.isDelegatedNode() {
log.Info("I am a delegator.")
pm.packager.Start();
go pm.schedule();
if !TestMode {
time.AfterFunc(time.Second*time.Duration(SmallPeriodInterval), pm.syncDelegatedNodeSafely) //initial attempt.
}
}
}
func (pm *DPoSProtocolManager) schedule() {
t2 := time.NewTimer(time.Second * time.Duration(1))
for {
select {
case <-t2.C:
go pm.roundRobinSafely();
t2 = time.NewTimer(time.Second * time.Duration(1))
}
}
}
// this is a loop function for electing node.
func (pm *DPoSProtocolManager) syncDelegatedNodeSafely() {
if !pm.isDelegatedNode() {
// only candidate node is able to participant to this process.
return;
}
pm.lock.Lock()
defer pm.lock.Unlock()
log.Info("Preparing for next big period...");
// pull the newest delegators from voting contract.
a, b, err0 := VotingAccessor.Refresh()
if err0 != nil {
log.Error(err0.Error())
return;
}
DelegatorsTable = a
DelegatorNodeInfo = b
if uint8(len(GigPeriodHistory)) >= BigPeriodHistorySize {
GigPeriodHistory = GigPeriodHistory[1:] //remove the first old one.
}
if len(DelegatorsTable) == 0 || pm.ethManager.peers.Len() == 0 {
log.Info("Sorry, could not detect any delegator!");
return;
}
round := uint64(1)
activeTime := uint64(time.Now().Unix() + int64(GigPeriodInterval))
if NextGigPeriodInstance != nil |
// make sure all delegators are synced at this round.
NextGigPeriodInstance = &GigPeriodTable{
round,
STATE_LOOKING,
DelegatorsTable,
SignCandidates(DelegatorsTable),
make(map[string]uint32),
make(map[string]*GigPeriodTable),
activeTime,
};
pm.trySyncAllDelegators()
}
func (pm *DPoSProtocolManager) trySyncAllDelegators() {
if TestMode {
return;
}
//send this round to all delegated peers.
//all delegated must giving the response in SYNC_BIGPERIOD_RESPONSE state.
for _, delegator := range NextGigPeriodInstance.delegatedNodes {
// make sure all delegator are alive.
if pm.ethManager.peers.Peer(delegator) == nil {
// try to add DelegatorNodeInfo[i] into peers table.
// but can't talk to it directly.
for i,e := range DelegatorsTable {
if e == delegator {
pm.eth.server.AddPeer(DelegatorNodeInfo[i]);
break;
}
}
} else {
err := pm.ethManager.peers.Peer(delegator).SendSyncBigPeriodRequest(
&SyncBigPeriodRequest{NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
currNodeIdHash});
if err != nil {
log.Debug("Error occurred while sending SyncBigPeriodRequest: " + err.Error())
}
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++;
NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round,
STATE_CONFIRMED,
response.DelegatedTable,
response.DelegatedTableSign,
nil,
nil,
response.ActiveTime,
};
maxTickets, bestNodeId := uint32(0), "";
for key, value := range NextGigPeriodInstance.confirmedTickets {
if maxTickets < value {
maxTickets = value;
bestNodeId = key;
}
}
if NextGigPeriodInstance.state == STATE_CONFIRMED {
// set the best node as the final state.
bestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];
NextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;
NextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;
NextGigPeriodInstance.activeTime = bestNode.activeTime;
log.Debug(fmt.Sprintf("Updated the best table: %v", bestNode.delegatedNodes));
pm.setNextRoundTimer();
} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = response.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;
NextGigPeriodInstance.activeTime = response.ActiveTime;
pm.setNextRoundTimer();
} else if response.State == STATE_MISMATCHED_ROUND {
// force to create new round
NextGigPeriodInstance = &GigPeriodTable{
response.Round,
STATE_LOOKING,
response.DelegatedTable,
response.DelegatedTableSign,
make(map[string]uint32),
make(map[string]*GigPeriodTable),
response.ActiveTime,
};
pm.trySyncAllDelegators()
} else if response.State == STATE_MISMATCHED_DNUMBER {
// refresh table only, and this node loses the election power of this round.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
return nil;
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
func (pm *DPoSProtocolManager) setNextRoundTimer() {
leftTime := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if leftTime < 1 {
log.Warn("Discard this round due to the expiration of the active time.")
go pm.syncDelegatedNodeSafely()
return;
}
if pm.t1 != nil {
// potentially could be an issue if the timer is unable to be cancelled.
pm.t1.Stop()
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
} else {
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
}
log.Debug(fmt.Sprintf("scheduled for next round in %v seconds", leftTime))
}
// the node would not be a candidate if it is not qualified.
func (pm *DPoSProtocolManager) isDelegatedNode() bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == currNodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) isDelegatedNode2(nodeId string) bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == nodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) Stop() {
if pm.isDelegatedNode() {
pm.packager.Stop();
}
// Quit the sync loop.
log.Info("DPoS Consensus stopped")
}
func (pm *DPoSProtocolManager) newPeer(pv uint, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, p, newMeteredMsgWriter(rw))
}
// --------------------Packaging Process-------------------//
// start round robin for packaging blocks in small period.
func (self *DPoSProtocolManager) roundRobinSafely() {
if !self.isDelegatedNode() || GigPeriodInstance == nil {
return;
}
log.Info(GigPeriodInstance.whosTurn())
// generate block by election node.
if GigPeriodInstance.isMyTurn() {
log.Debug("it's my turn now " + time.Now().String());
round := self.blockchain.CurrentFastBlock().Header().Round;
block := self.packager.GenerateNewBlock(round+1, currNodeId);
block.ToString();
//response := &PackageResponse{block.Round(), currNodeId, block.Hash(),DPOSMSG_SUCCESS};
}
}
// this GigPeriodTable only serves for delegators.
type GigPeriodTable struct {
round uint64; // synchronization round
state uint8; // STATE_LOOKING
delegatedNodes []string; // all 31 nodes id
delegatedNodesSign common.Hash; // a security sign for all delegated nodes which can be verified from node array.
confirmedTickets map[string]uint32; // 31 node must be confirmed this ticket or must equal to delegatedNodes length.
confirmedBestNode map[string]*GigPeriodTable; // confirmed the next active time from all peers. <nodeid><GigPeriodTable>
activeTime uint64; // Unix timestamp for all nodes.
}
func (t *GigPeriodTable) wasHisTurn(round uint64, nodeId string, minedTime int64) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= minedTime && (beatStartTime+ int64(SmallPeriodInterval)) >= minedTime {
return true;
}
}
}
// check the history.
if len(GigPeriodHistory) > 0 {
for _, v := range GigPeriodHistory {
if int64(v.activeTime) <= minedTime && (int64(v.activeTime) + int64(SmallPeriodInterval)) >= minedTime {
for i :=0; i < len(v.delegatedNodes); i++ {
if v.delegatedNodes[i] == nodeId {
//todo check round as well.
return true;
}
}
}
}
}
return false;
}
func (t *GigPeriodTable) isMyTurn() bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == currNodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
currTime := time.Now().Unix()
// we only give 4s to avoid the mismatched timestamp issue of last packaging.
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) > currTime {
return true;
}
}
}
return false;
}
func (t *GigPeriodTable) whosTurn() string {
currTime := time.Now().Unix()
for i :=0; i < len(t.delegatedNodes); i++ {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) >= currTime {
return "Who's turn: {position: " + strconv.Itoa(i) + ", delegator: " + t.delegatedNodes[i] + " }";
}
}
return "";
}
func (t *GigPeriodTable) isDelegatedNode(nodeId string) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
return true;
}
}
return false;
}
func RemoveCanditate(s []string, i int) []string {
s[len(s)-1], s[i] = s[i], s[len(s)-1]
return s[:len(s)-1]
}
func SignCandidates(candidates []string) common.Hash {
var signCandidates = []byte{}
hw := sha3.NewKeccak256()
rlp.Encode(hw, candidates)
return common.BytesToHash(hw.Sum(signCandidates))
} | {
if !TestMode {
gap := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if gap > 2 || gap < -2 {
log.Warn(fmt.Sprintf("Scheduling of the new electing round is improper! current gap: %v seconds", gap))
//restart the scheduler
NextElectionInfo = nil;
go pm.syncDelegatedNodeSafely();
return;
}
}
round = NextGigPeriodInstance.round + 1
activeTime = GigPeriodInstance.activeTime + uint64(GigPeriodInterval)
// keep the big period history for block validation.
GigPeriodHistory[len(GigPeriodHistory)-1] = *NextGigPeriodInstance;
GigPeriodInstance = &GigPeriodTable{
NextGigPeriodInstance.round,
NextGigPeriodInstance.state,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
NextGigPeriodInstance.confirmedTickets,
NextGigPeriodInstance.confirmedBestNode,
NextGigPeriodInstance.activeTime,
};
log.Info(fmt.Sprintf("Switched the new big period round. %d ", GigPeriodInstance.round));
} | conditional_block |
dposhandler.go | // Copyright 2018 The go-infinet Authors
// This file is part of the go-infinet library.
//
// The go-infinet library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-infinet library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-infinet library. If not, see <http://www.gnu.org/licenses/>.
package protocol
import (
"time"
"strconv"
"sync"
"github.com/juchain/go-juchain/common"
"github.com/juchain/go-juchain/common/rlp"
"github.com/juchain/go-juchain/common/crypto/sha3"
"github.com/juchain/go-juchain/common/log"
"github.com/juchain/go-juchain/core"
"github.com/juchain/go-juchain/vm/solc/abi"
"github.com/juchain/go-juchain/p2p/protocol/downloader"
"github.com/juchain/go-juchain/p2p"
"github.com/juchain/go-juchain/p2p/discover"
"github.com/juchain/go-juchain/p2p/node"
"github.com/juchain/go-juchain/consensus"
"github.com/juchain/go-juchain/consensus/dpos"
"github.com/juchain/go-juchain/config"
"github.com/juchain/go-juchain/vm/solc"
"github.com/juchain/go-juchain/core/types"
"github.com/juchain/go-juchain/rpc"
"fmt"
"reflect"
"math/big"
"context"
"math"
"strings"
"errors"
)
// DPoS consensus handler of delegator packaging process.
// only 31 delegators voted, then this process will be started.
/**
Sample code:
for round i
dlist_i = get N delegates sort by votes
dlist_i = mixorder(dlist_i)
loop
slot = global_time_offset / block_interval
pos = slot % N
if dlist_i[pos] exists in this node
generateBlock(keypair of dlist_i[pos])
else
skip
*/
var (
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
TotalDelegatorNumber uint8 = 31; // we make 31 candidates as the best group for packaging.
SmallPeriodInterval uint32 = 5; // small period for packaging node in every 5 seconds.
GigPeriodInterval uint32 = uint32(TotalDelegatorNumber) * 5; // create a big period for all delegated nodes in every 155 seconds.
BigPeriodHistorySize uint8 = 10; // keep 10 records for the confirmation of delayed block
GigPeriodHistory = make([]GigPeriodTable, 0); // <GigPeriodTable>
GigPeriodInstance *GigPeriodTable; // we use two versions of election info for switching delegated nodes smoothly.
NextGigPeriodInstance *GigPeriodTable;
VotingAccessor DelegatorAccessor; // responsible for access voting data.
DelegatorsTable []string; // only for all delegated node ids. the table will receive from a voting contract.
DelegatorNodeInfo []*discover.Node; // all delegated peers. = make([]*discover.Node, 0, len(urls))
)
// Delegator table refers to the voting contract.
type DelegatorAccessor interface {
Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error)
}
// only for test purpose.
type DelegatorAccessorTestImpl struct {
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
}
func (d *DelegatorAccessorTestImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
return []string{d.currNodeId}, []*discover.Node{}, nil
}
// access production contract.
type DelegatorAccessorImpl struct {
blockchain *core.BlockChain;
b p2p.Backend;
dappabi abi.ABI;
}
type DelegatedNodeInfoMapping struct {
ip string
port uint
ticket uint64
}
// https://solidity.readthedocs.io/en/develop/abi-spec.html#use-of-dynamic-types
// The first four bytes of the call data for a function call specifies the function to be called.
// It is the first (left, high-order in big-endian) four bytes of the Keccak (SHA-3) hash of the signature of the function.
// The signature is defined as the canonical expression of the basic prototype, i.e.
// the function name with the parenthesised list of parameter types. Parameter types are split by a single comma
// no spaces are used. for example: bytes4(sha3("set(uint256[])"))
// "0xb4701401": "birusu()",
// "0x1ab88d26": "delegatorInfo(string)",
// "0x61b29d69": "delegatorList()",
// https://solidity.readthedocs.io/en/develop/abi-spec.html#examples
// please also refer to abi_test.go
// hw.Sum(data[:0])
func (d *DelegatorAccessorImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
// call delegatorList()
data, err0 := d.dappabi.Pack("delegatorList")
if err0 != nil {
log.Error("Error to encode delegatorList function call.")
return nil,nil, errors.New("Error to encode delegatorList function call.")
}
//var data = common.Hex2Bytes("0x61b29d69")
var result string;
output, err0 := d.doCall(data);
if err0 != nil {
log.Error("Error to call delegatorList function.")
return nil,nil, errors.New("Error to call delegatorList function.")
}
if len(output) == 0 {
// no result
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
err0 = d.dappabi.Unpack(&result, "result", output)
if err0 != nil {
log.Error("Error to parse the result of delegatorList function.")
return nil,nil, errors.New("Error to parse the result of delegatorList function.")
}
if len(result) == 0 {
log.Error("Delegator list must not be empty! the state of this node is incorrect.")
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
delegatorIds := strings.Split(result, ";")
ids := make([]string, len(delegatorIds))
peerinfo := make([]*discover.Node, len(delegatorIds))
for i,delegatorId := range delegatorIds {
// call delegatorInfo(string) 0x6162630000000000000000000000000000000000000000000000000000000000
data1, err0 := d.dappabi.Pack("delegatorInfo", delegatorId)
if err0 != nil {
log.Error("Error to parse delegatorInfo function.")
return nil,nil, errors.New("Error to parse delegatorInfo function.")
}
output1, err0 := d.doCall(data1)
if err0 != nil {
log.Error("Error to call delegatorInfo function.")
return nil,nil, errors.New("Error to call delegatorInfo function.")
}
var result DelegatedNodeInfoMapping
//string ip, uint port, uint256 ticket
err0 = d.dappabi.Unpack(&result, "result", output1)
if err0 != nil {
log.Error("Error to parse the result of delegatorInfo function.")
return nil,nil, errors.New("Error to parse the result of delegatorInfo function.")
}
ids[i] = delegatorId
peerinfo[i] = &discover.Node{}
}
return ids, peerinfo, nil;
}
func (d *DelegatorAccessorImpl) doCall(data []byte) ([]byte, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
ctx := context.Background()
state, header, err := d.b.StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber)
if state == nil || err != nil {
return nil, err
}
// Set sender address or use a default if none specified
addr := common.Address{};
if !TestMode {
if wallets := d.b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
}
// Set default gas & gas price if none were set
defaultGasPrice := uint64(50 * config.Shannon)
gas, gasPrice := uint64(math.MaxUint64 / 2), new(big.Int).SetUint64(defaultGasPrice)
// Create new call message
msg := types.NewMessage(addr, &core.DPOSBallotContractAddress, 0, new(big.Int), gas, gasPrice, data, false)
// Setup context so it may be cancelled the call has completed.
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
// Get a new instance of the EVM.
evm, vmError, err := d.b.GetEVM(ctx, msg, state, header, vm.Config{})
if err != nil {
return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
<-ctx.Done()
evm.Cancel()
}()
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
res, gas, _, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return res, err
}
type DPoSProtocolManager struct {
networkId uint64;
eth *JuchainService;
ethManager *ProtocolManager;
blockchain *core.BlockChain;
lock *sync.Mutex; // protects running
packager *dpos.Packager;
t1 *time.Timer; // global synchronized timer.
}
// NewProtocolManager returns a new obod sub protocol manager. The JuchainService sub protocol manages peers capable
// with the obod network.
func NewDPoSProtocolManager(eth *JuchainService, ethManager *ProtocolManager, config *config.ChainConfig, config2 *node.Config,
mode downloader.SyncMode, networkId uint64, blockchain *core.BlockChain, engine consensus.Engine) (*DPoSProtocolManager, error) {
// Set sender address or use a default if none specified
// Create the protocol manager with the base fields
manager := &DPoSProtocolManager{
networkId: networkId,
eth: eth,
ethManager: ethManager,
blockchain: blockchain,
lock: &sync.Mutex{},
packager: dpos.NewPackager(config, engine, DefaultConfig.Etherbase, eth, eth.EventMux()),
}
currNodeId = discover.PubkeyID(&config2.NodeKey().PublicKey).TerminalString();
currNodeIdHash = common.Hex2Bytes(currNodeId);
if TestMode {
VotingAccessor = &DelegatorAccessorTestImpl{currNodeId:currNodeId, currNodeIdHash:currNodeIdHash};
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh();
} else {
/**
var addr common.Address;
if wallets := eth.ApiBackend.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
if addr == (common.Address{}) {
log.Error("We must have a default address to activate dpos delegator consensus")
return nil, errors.New("we must have a default address to activate dpos delegator consensus")
}*/
dappabi, err := abi.JSON(strings.NewReader(core.DPOSBallotABI))
if err != nil {
log.Error("Unable to load DPoS Ballot ABI object!")
return nil, errors.New("Unable to load DPoS Ballot ABI object!")
}
VotingAccessor = &DelegatorAccessorImpl{dappabi: dappabi, blockchain: eth.blockchain, b: eth.ApiBackend};
DelegatorsTable, DelegatorNodeInfo, err = VotingAccessor.Refresh();
}
return manager, nil;
}
func (pm *DPoSProtocolManager) Start() {
if pm.isDelegatedNode() {
log.Info("I am a delegator.")
pm.packager.Start();
go pm.schedule();
if !TestMode {
time.AfterFunc(time.Second*time.Duration(SmallPeriodInterval), pm.syncDelegatedNodeSafely) //initial attempt.
}
}
}
func (pm *DPoSProtocolManager) schedule() {
t2 := time.NewTimer(time.Second * time.Duration(1))
for {
select {
case <-t2.C:
go pm.roundRobinSafely();
t2 = time.NewTimer(time.Second * time.Duration(1))
}
}
}
// this is a loop function for electing node.
func (pm *DPoSProtocolManager) syncDelegatedNodeSafely() {
if !pm.isDelegatedNode() {
// only candidate node is able to participant to this process.
return;
}
pm.lock.Lock()
defer pm.lock.Unlock()
log.Info("Preparing for next big period...");
// pull the newest delegators from voting contract.
a, b, err0 := VotingAccessor.Refresh()
if err0 != nil {
log.Error(err0.Error())
return;
}
DelegatorsTable = a
DelegatorNodeInfo = b
if uint8(len(GigPeriodHistory)) >= BigPeriodHistorySize {
GigPeriodHistory = GigPeriodHistory[1:] //remove the first old one.
}
if len(DelegatorsTable) == 0 || pm.ethManager.peers.Len() == 0 {
log.Info("Sorry, could not detect any delegator!");
return;
}
round := uint64(1)
activeTime := uint64(time.Now().Unix() + int64(GigPeriodInterval))
if NextGigPeriodInstance != nil {
if !TestMode {
gap := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if gap > 2 || gap < -2 {
log.Warn(fmt.Sprintf("Scheduling of the new electing round is improper! current gap: %v seconds", gap))
//restart the scheduler
NextElectionInfo = nil;
go pm.syncDelegatedNodeSafely();
return;
}
}
round = NextGigPeriodInstance.round + 1
activeTime = GigPeriodInstance.activeTime + uint64(GigPeriodInterval)
// keep the big period history for block validation.
GigPeriodHistory[len(GigPeriodHistory)-1] = *NextGigPeriodInstance;
GigPeriodInstance = &GigPeriodTable{
NextGigPeriodInstance.round,
NextGigPeriodInstance.state,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
NextGigPeriodInstance.confirmedTickets,
NextGigPeriodInstance.confirmedBestNode,
NextGigPeriodInstance.activeTime,
};
log.Info(fmt.Sprintf("Switched the new big period round. %d ", GigPeriodInstance.round));
}
// make sure all delegators are synced at this round.
NextGigPeriodInstance = &GigPeriodTable{
round,
STATE_LOOKING,
DelegatorsTable,
SignCandidates(DelegatorsTable),
make(map[string]uint32),
make(map[string]*GigPeriodTable),
activeTime,
};
pm.trySyncAllDelegators()
}
func (pm *DPoSProtocolManager) trySyncAllDelegators() {
if TestMode {
return;
}
//send this round to all delegated peers.
//all delegated must giving the response in SYNC_BIGPERIOD_RESPONSE state.
for _, delegator := range NextGigPeriodInstance.delegatedNodes {
// make sure all delegator are alive.
if pm.ethManager.peers.Peer(delegator) == nil {
// try to add DelegatorNodeInfo[i] into peers table.
// but can't talk to it directly.
for i,e := range DelegatorsTable {
if e == delegator {
pm.eth.server.AddPeer(DelegatorNodeInfo[i]);
break;
}
}
} else {
err := pm.ethManager.peers.Peer(delegator).SendSyncBigPeriodRequest(
&SyncBigPeriodRequest{NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
currNodeIdHash});
if err != nil {
log.Debug("Error occurred while sending SyncBigPeriodRequest: " + err.Error())
}
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++;
NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round,
STATE_CONFIRMED,
response.DelegatedTable,
response.DelegatedTableSign,
nil,
nil,
response.ActiveTime,
};
maxTickets, bestNodeId := uint32(0), "";
for key, value := range NextGigPeriodInstance.confirmedTickets {
if maxTickets < value {
maxTickets = value;
bestNodeId = key;
}
}
if NextGigPeriodInstance.state == STATE_CONFIRMED {
// set the best node as the final state.
bestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];
NextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;
NextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;
NextGigPeriodInstance.activeTime = bestNode.activeTime;
log.Debug(fmt.Sprintf("Updated the best table: %v", bestNode.delegatedNodes));
pm.setNextRoundTimer();
} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = response.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;
NextGigPeriodInstance.activeTime = response.ActiveTime;
pm.setNextRoundTimer();
} else if response.State == STATE_MISMATCHED_ROUND {
// force to create new round
NextGigPeriodInstance = &GigPeriodTable{
response.Round,
STATE_LOOKING,
response.DelegatedTable,
response.DelegatedTableSign,
make(map[string]uint32),
make(map[string]*GigPeriodTable),
response.ActiveTime,
};
pm.trySyncAllDelegators()
} else if response.State == STATE_MISMATCHED_DNUMBER {
// refresh table only, and this node loses the election power of this round.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
return nil;
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
func (pm *DPoSProtocolManager) setNextRoundTimer() {
leftTime := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if leftTime < 1 {
log.Warn("Discard this round due to the expiration of the active time.")
go pm.syncDelegatedNodeSafely()
return;
}
if pm.t1 != nil {
// potentially could be an issue if the timer is unable to be cancelled.
pm.t1.Stop()
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
} else {
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
}
log.Debug(fmt.Sprintf("scheduled for next round in %v seconds", leftTime))
}
// the node would not be a candidate if it is not qualified.
func (pm *DPoSProtocolManager) isDelegatedNode() bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == currNodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) isDelegatedNode2(nodeId string) bool |
func (pm *DPoSProtocolManager) Stop() {
if pm.isDelegatedNode() {
pm.packager.Stop();
}
// Quit the sync loop.
log.Info("DPoS Consensus stopped")
}
func (pm *DPoSProtocolManager) newPeer(pv uint, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, p, newMeteredMsgWriter(rw))
}
// --------------------Packaging Process-------------------//
// start round robin for packaging blocks in small period.
func (self *DPoSProtocolManager) roundRobinSafely() {
if !self.isDelegatedNode() || GigPeriodInstance == nil {
return;
}
log.Info(GigPeriodInstance.whosTurn())
// generate block by election node.
if GigPeriodInstance.isMyTurn() {
log.Debug("it's my turn now " + time.Now().String());
round := self.blockchain.CurrentFastBlock().Header().Round;
block := self.packager.GenerateNewBlock(round+1, currNodeId);
block.ToString();
//response := &PackageResponse{block.Round(), currNodeId, block.Hash(),DPOSMSG_SUCCESS};
}
}
// this GigPeriodTable only serves for delegators.
type GigPeriodTable struct {
round uint64; // synchronization round
state uint8; // STATE_LOOKING
delegatedNodes []string; // all 31 nodes id
delegatedNodesSign common.Hash; // a security sign for all delegated nodes which can be verified from node array.
confirmedTickets map[string]uint32; // 31 node must be confirmed this ticket or must equal to delegatedNodes length.
confirmedBestNode map[string]*GigPeriodTable; // confirmed the next active time from all peers. <nodeid><GigPeriodTable>
activeTime uint64; // Unix timestamp for all nodes.
}
func (t *GigPeriodTable) wasHisTurn(round uint64, nodeId string, minedTime int64) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= minedTime && (beatStartTime+ int64(SmallPeriodInterval)) >= minedTime {
return true;
}
}
}
// check the history.
if len(GigPeriodHistory) > 0 {
for _, v := range GigPeriodHistory {
if int64(v.activeTime) <= minedTime && (int64(v.activeTime) + int64(SmallPeriodInterval)) >= minedTime {
for i :=0; i < len(v.delegatedNodes); i++ {
if v.delegatedNodes[i] == nodeId {
//todo check round as well.
return true;
}
}
}
}
}
return false;
}
func (t *GigPeriodTable) isMyTurn() bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == currNodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
currTime := time.Now().Unix()
// we only give 4s to avoid the mismatched timestamp issue of last packaging.
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) > currTime {
return true;
}
}
}
return false;
}
func (t *GigPeriodTable) whosTurn() string {
currTime := time.Now().Unix()
for i :=0; i < len(t.delegatedNodes); i++ {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) >= currTime {
return "Who's turn: {position: " + strconv.Itoa(i) + ", delegator: " + t.delegatedNodes[i] + " }";
}
}
return "";
}
func (t *GigPeriodTable) isDelegatedNode(nodeId string) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
return true;
}
}
return false;
}
func RemoveCanditate(s []string, i int) []string {
s[len(s)-1], s[i] = s[i], s[len(s)-1]
return s[:len(s)-1]
}
func SignCandidates(candidates []string) common.Hash {
var signCandidates = []byte{}
hw := sha3.NewKeccak256()
rlp.Encode(hw, candidates)
return common.BytesToHash(hw.Sum(signCandidates))
} | {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == nodeId {
return true;
}
}
return false;
} | identifier_body |
dposhandler.go | // Copyright 2018 The go-infinet Authors
// This file is part of the go-infinet library.
//
// The go-infinet library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-infinet library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-infinet library. If not, see <http://www.gnu.org/licenses/>.
package protocol
import (
"time"
"strconv"
"sync"
"github.com/juchain/go-juchain/common"
"github.com/juchain/go-juchain/common/rlp"
"github.com/juchain/go-juchain/common/crypto/sha3"
"github.com/juchain/go-juchain/common/log"
"github.com/juchain/go-juchain/core"
"github.com/juchain/go-juchain/vm/solc/abi"
"github.com/juchain/go-juchain/p2p/protocol/downloader"
"github.com/juchain/go-juchain/p2p"
"github.com/juchain/go-juchain/p2p/discover"
"github.com/juchain/go-juchain/p2p/node"
"github.com/juchain/go-juchain/consensus"
"github.com/juchain/go-juchain/consensus/dpos"
"github.com/juchain/go-juchain/config"
"github.com/juchain/go-juchain/vm/solc"
"github.com/juchain/go-juchain/core/types"
"github.com/juchain/go-juchain/rpc"
"fmt"
"reflect"
"math/big"
"context"
"math"
"strings"
"errors"
)
// DPoS consensus handler of delegator packaging process.
// only 31 delegators voted, then this process will be started.
/**
Sample code:
for round i
dlist_i = get N delegates sort by votes
dlist_i = mixorder(dlist_i)
loop
slot = global_time_offset / block_interval
pos = slot % N
if dlist_i[pos] exists in this node
generateBlock(keypair of dlist_i[pos])
else
skip
*/
var (
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
TotalDelegatorNumber uint8 = 31; // we make 31 candidates as the best group for packaging.
SmallPeriodInterval uint32 = 5; // small period for packaging node in every 5 seconds.
GigPeriodInterval uint32 = uint32(TotalDelegatorNumber) * 5; // create a big period for all delegated nodes in every 155 seconds.
BigPeriodHistorySize uint8 = 10; // keep 10 records for the confirmation of delayed block
GigPeriodHistory = make([]GigPeriodTable, 0); // <GigPeriodTable>
GigPeriodInstance *GigPeriodTable; // we use two versions of election info for switching delegated nodes smoothly.
NextGigPeriodInstance *GigPeriodTable;
VotingAccessor DelegatorAccessor; // responsible for access voting data.
DelegatorsTable []string; // only for all delegated node ids. the table will receive from a voting contract.
DelegatorNodeInfo []*discover.Node; // all delegated peers. = make([]*discover.Node, 0, len(urls))
)
// Delegator table refers to the voting contract.
type DelegatorAccessor interface {
Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error)
}
// only for test purpose.
type DelegatorAccessorTestImpl struct {
currNodeId string; // current short node id.
currNodeIdHash []byte; // short node id hash.
}
func (d *DelegatorAccessorTestImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
return []string{d.currNodeId}, []*discover.Node{}, nil
}
// access production contract.
type DelegatorAccessorImpl struct {
blockchain *core.BlockChain;
b p2p.Backend;
dappabi abi.ABI;
}
type DelegatedNodeInfoMapping struct {
ip string
port uint
ticket uint64
}
// https://solidity.readthedocs.io/en/develop/abi-spec.html#use-of-dynamic-types
// The first four bytes of the call data for a function call specifies the function to be called.
// It is the first (left, high-order in big-endian) four bytes of the Keccak (SHA-3) hash of the signature of the function.
// The signature is defined as the canonical expression of the basic prototype, i.e.
// the function name with the parenthesised list of parameter types. Parameter types are split by a single comma
// no spaces are used. for example: bytes4(sha3("set(uint256[])"))
// "0xb4701401": "birusu()",
// "0x1ab88d26": "delegatorInfo(string)",
// "0x61b29d69": "delegatorList()",
// https://solidity.readthedocs.io/en/develop/abi-spec.html#examples
// please also refer to abi_test.go
// hw.Sum(data[:0])
func (d *DelegatorAccessorImpl) Refresh() (delegatorsTable []string, delegatorNodes []*discover.Node, e error) {
// call delegatorList()
data, err0 := d.dappabi.Pack("delegatorList")
if err0 != nil {
log.Error("Error to encode delegatorList function call.")
return nil,nil, errors.New("Error to encode delegatorList function call.")
}
//var data = common.Hex2Bytes("0x61b29d69")
var result string;
output, err0 := d.doCall(data);
if err0 != nil {
log.Error("Error to call delegatorList function.")
return nil,nil, errors.New("Error to call delegatorList function.")
}
if len(output) == 0 {
// no result
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
err0 = d.dappabi.Unpack(&result, "result", output)
if err0 != nil {
log.Error("Error to parse the result of delegatorList function.")
return nil,nil, errors.New("Error to parse the result of delegatorList function.")
}
if len(result) == 0 {
log.Error("Delegator list must not be empty! the state of this node is incorrect.")
return nil,nil, errors.New("Delegator list must not be empty! the state of this node is incorrect.")
}
delegatorIds := strings.Split(result, ";")
ids := make([]string, len(delegatorIds))
peerinfo := make([]*discover.Node, len(delegatorIds))
for i,delegatorId := range delegatorIds {
// call delegatorInfo(string) 0x6162630000000000000000000000000000000000000000000000000000000000
data1, err0 := d.dappabi.Pack("delegatorInfo", delegatorId)
if err0 != nil {
log.Error("Error to parse delegatorInfo function.")
return nil,nil, errors.New("Error to parse delegatorInfo function.")
}
output1, err0 := d.doCall(data1)
if err0 != nil {
log.Error("Error to call delegatorInfo function.")
return nil,nil, errors.New("Error to call delegatorInfo function.")
}
var result DelegatedNodeInfoMapping
//string ip, uint port, uint256 ticket
err0 = d.dappabi.Unpack(&result, "result", output1)
if err0 != nil {
log.Error("Error to parse the result of delegatorInfo function.")
return nil,nil, errors.New("Error to parse the result of delegatorInfo function.")
}
ids[i] = delegatorId
peerinfo[i] = &discover.Node{}
}
return ids, peerinfo, nil;
}
func (d *DelegatorAccessorImpl) doCall(data []byte) ([]byte, error) {
defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
ctx := context.Background()
state, header, err := d.b.StateAndHeaderByNumber(ctx, rpc.LatestBlockNumber)
if state == nil || err != nil {
return nil, err
}
// Set sender address or use a default if none specified
addr := common.Address{};
if !TestMode {
if wallets := d.b.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
}
// Set default gas & gas price if none were set
defaultGasPrice := uint64(50 * config.Shannon)
gas, gasPrice := uint64(math.MaxUint64 / 2), new(big.Int).SetUint64(defaultGasPrice)
// Create new call message
msg := types.NewMessage(addr, &core.DPOSBallotContractAddress, 0, new(big.Int), gas, gasPrice, data, false)
// Setup context so it may be cancelled the call has completed.
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
// Get a new instance of the EVM.
evm, vmError, err := d.b.GetEVM(ctx, msg, state, header, vm.Config{})
if err != nil {
return nil, err
}
// Wait for the context to be done and cancel the evm. Even if the
// EVM has finished, cancelling may be done (repeatedly)
go func() {
<-ctx.Done()
evm.Cancel()
}()
// Setup the gas pool (also for unmetered requests)
// and apply the message.
gp := new(core.GasPool).AddGas(math.MaxUint64)
res, gas, _, err := core.ApplyMessage(evm, msg, gp)
if err := vmError(); err != nil {
return nil, err
}
return res, err
}
type DPoSProtocolManager struct {
networkId uint64;
eth *JuchainService;
ethManager *ProtocolManager;
blockchain *core.BlockChain;
lock *sync.Mutex; // protects running
packager *dpos.Packager;
t1 *time.Timer; // global synchronized timer.
}
// NewProtocolManager returns a new obod sub protocol manager. The JuchainService sub protocol manages peers capable
// with the obod network.
func NewDPoSProtocolManager(eth *JuchainService, ethManager *ProtocolManager, config *config.ChainConfig, config2 *node.Config,
mode downloader.SyncMode, networkId uint64, blockchain *core.BlockChain, engine consensus.Engine) (*DPoSProtocolManager, error) {
// Set sender address or use a default if none specified
// Create the protocol manager with the base fields
manager := &DPoSProtocolManager{
networkId: networkId,
eth: eth,
ethManager: ethManager,
blockchain: blockchain,
lock: &sync.Mutex{},
packager: dpos.NewPackager(config, engine, DefaultConfig.Etherbase, eth, eth.EventMux()),
}
currNodeId = discover.PubkeyID(&config2.NodeKey().PublicKey).TerminalString();
currNodeIdHash = common.Hex2Bytes(currNodeId);
if TestMode {
VotingAccessor = &DelegatorAccessorTestImpl{currNodeId:currNodeId, currNodeIdHash:currNodeIdHash};
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh();
} else {
/**
var addr common.Address;
if wallets := eth.ApiBackend.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
addr = accounts[0].Address
}
}
if addr == (common.Address{}) {
log.Error("We must have a default address to activate dpos delegator consensus")
return nil, errors.New("we must have a default address to activate dpos delegator consensus")
}*/
dappabi, err := abi.JSON(strings.NewReader(core.DPOSBallotABI))
if err != nil {
log.Error("Unable to load DPoS Ballot ABI object!")
return nil, errors.New("Unable to load DPoS Ballot ABI object!")
}
VotingAccessor = &DelegatorAccessorImpl{dappabi: dappabi, blockchain: eth.blockchain, b: eth.ApiBackend};
DelegatorsTable, DelegatorNodeInfo, err = VotingAccessor.Refresh();
}
return manager, nil;
}
func (pm *DPoSProtocolManager) Start() {
if pm.isDelegatedNode() {
log.Info("I am a delegator.")
pm.packager.Start();
go pm.schedule();
if !TestMode {
time.AfterFunc(time.Second*time.Duration(SmallPeriodInterval), pm.syncDelegatedNodeSafely) //initial attempt.
}
}
}
func (pm *DPoSProtocolManager) schedule() {
t2 := time.NewTimer(time.Second * time.Duration(1))
for {
select {
case <-t2.C:
go pm.roundRobinSafely();
t2 = time.NewTimer(time.Second * time.Duration(1))
}
}
}
// this is a loop function for electing node.
func (pm *DPoSProtocolManager) syncDelegatedNodeSafely() {
if !pm.isDelegatedNode() {
// only candidate node is able to participant to this process.
return;
}
pm.lock.Lock()
defer pm.lock.Unlock()
log.Info("Preparing for next big period...");
// pull the newest delegators from voting contract.
a, b, err0 := VotingAccessor.Refresh()
if err0 != nil {
log.Error(err0.Error())
return;
}
DelegatorsTable = a
DelegatorNodeInfo = b
if uint8(len(GigPeriodHistory)) >= BigPeriodHistorySize {
GigPeriodHistory = GigPeriodHistory[1:] //remove the first old one.
}
if len(DelegatorsTable) == 0 || pm.ethManager.peers.Len() == 0 {
log.Info("Sorry, could not detect any delegator!");
return;
}
round := uint64(1)
activeTime := uint64(time.Now().Unix() + int64(GigPeriodInterval))
if NextGigPeriodInstance != nil {
if !TestMode {
gap := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if gap > 2 || gap < -2 {
log.Warn(fmt.Sprintf("Scheduling of the new electing round is improper! current gap: %v seconds", gap))
//restart the scheduler
NextElectionInfo = nil;
go pm.syncDelegatedNodeSafely();
return;
}
}
round = NextGigPeriodInstance.round + 1
activeTime = GigPeriodInstance.activeTime + uint64(GigPeriodInterval)
// keep the big period history for block validation.
GigPeriodHistory[len(GigPeriodHistory)-1] = *NextGigPeriodInstance;
GigPeriodInstance = &GigPeriodTable{
NextGigPeriodInstance.round,
NextGigPeriodInstance.state,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
NextGigPeriodInstance.confirmedTickets,
NextGigPeriodInstance.confirmedBestNode,
NextGigPeriodInstance.activeTime,
};
log.Info(fmt.Sprintf("Switched the new big period round. %d ", GigPeriodInstance.round));
}
// make sure all delegators are synced at this round.
NextGigPeriodInstance = &GigPeriodTable{
round,
STATE_LOOKING,
DelegatorsTable,
SignCandidates(DelegatorsTable),
make(map[string]uint32),
make(map[string]*GigPeriodTable),
activeTime,
};
pm.trySyncAllDelegators()
}
func (pm *DPoSProtocolManager) trySyncAllDelegators() {
if TestMode {
return;
}
//send this round to all delegated peers.
//all delegated must giving the response in SYNC_BIGPERIOD_RESPONSE state.
for _, delegator := range NextGigPeriodInstance.delegatedNodes {
// make sure all delegator are alive.
if pm.ethManager.peers.Peer(delegator) == nil {
// try to add DelegatorNodeInfo[i] into peers table.
// but can't talk to it directly.
for i,e := range DelegatorsTable {
if e == delegator {
pm.eth.server.AddPeer(DelegatorNodeInfo[i]);
break;
}
}
} else {
err := pm.ethManager.peers.Peer(delegator).SendSyncBigPeriodRequest(
&SyncBigPeriodRequest{NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
currNodeIdHash});
if err != nil {
log.Debug("Error occurred while sending SyncBigPeriodRequest: " + err.Error())
}
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {
pm.lock.Lock()
defer pm.lock.Unlock()
// Handle the message depending on its contents
switch {
case msg.Code == SYNC_BIGPERIOD_REQUEST:
var request SyncBigPeriodRequest;
if err := msg.Decode(&request); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
if DelegatorsTable == nil || len(DelegatorsTable) == 0 {
// i am not ready.
log.Info("I am not ready!!!")
return nil;
}
if request.Round == NextGigPeriodInstance.round {
if NextGigPeriodInstance.state == STATE_CONFIRMED {
log.Debug(fmt.Sprintf("I am in the agreed round %v", NextGigPeriodInstance.round));
// if i have already confirmed this round. send this round to peer.
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash});
} else {
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
if len(DelegatorsTable) < len(request.DelegatedTable) {
// refresh table if mismatch.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
if !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {
log.Debug("Delegators are mismatched in two tables.");
if TestMode {
return nil;
}
// both delegators are not matched, both lose the election power of this round.
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_DNUMBER,
currNodeIdHash});
}
}
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = request.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;
NextGigPeriodInstance.activeTime = request.ActiveTime;
pm.setNextRoundTimer();//sync the timer.
log.Debug(fmt.Sprintf("Agreed this table %v as %v round", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));
if TestMode {
return nil;
}
// broadcast it to all peers again.
for _, peer := range pm.ethManager.peers.peers {
err := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_CONFIRMED,
currNodeIdHash})
if (err != nil) {
log.Warn("Error occurred while sending VoteElectionRequest: " + err.Error())
}
}
}
} else if request.Round < NextGigPeriodInstance.round {
log.Debug(fmt.Sprintf("Mismatched request.round %v, CurrRound %v: ", request.Round, NextGigPeriodInstance.round))
if TestMode {
return nil;
}
return p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{
NextGigPeriodInstance.round,
NextGigPeriodInstance.activeTime,
NextGigPeriodInstance.delegatedNodes,
NextGigPeriodInstance.delegatedNodesSign,
STATE_MISMATCHED_ROUND,
currNodeIdHash});
} else if request.Round > NextGigPeriodInstance.round {
if (request.Round - NextElectionInfo.round) == 1 {
// the most reason could be the round timeframe switching later than this request.
// but we are continue switching as regular.
} else {
// attack happens.
}
}
case msg.Code == SYNC_BIGPERIOD_RESPONSE:
var response SyncBigPeriodResponse;
if err := msg.Decode(&response); err != nil {
return errResp(DPOSErrDecode, "%v: %v", msg, err);
}
if response.Round != NextGigPeriodInstance.round {
return nil;
}
if SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {
return errResp(DPOSErroDelegatorSign, "");
}
nodeId := common.Bytes2Hex(response.NodeId)
log.Debug("Received SYNC Big Period response: " + nodeId);
NextGigPeriodInstance.confirmedTickets[nodeId] ++;
NextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{
response.Round,
STATE_CONFIRMED,
response.DelegatedTable,
response.DelegatedTableSign,
nil,
nil,
response.ActiveTime,
};
maxTickets, bestNodeId := uint32(0), "";
for key, value := range NextGigPeriodInstance.confirmedTickets {
if maxTickets < value {
maxTickets = value;
bestNodeId = key;
}
}
if NextGigPeriodInstance.state == STATE_CONFIRMED {
// set the best node as the final state.
bestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];
NextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;
NextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;
NextGigPeriodInstance.activeTime = bestNode.activeTime;
log.Debug(fmt.Sprintf("Updated the best table: %v", bestNode.delegatedNodes));
pm.setNextRoundTimer();
} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {
NextGigPeriodInstance.state = STATE_CONFIRMED;
NextGigPeriodInstance.delegatedNodes = response.DelegatedTable;
NextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;
NextGigPeriodInstance.activeTime = response.ActiveTime;
pm.setNextRoundTimer();
} else if response.State == STATE_MISMATCHED_ROUND {
// force to create new round
NextGigPeriodInstance = &GigPeriodTable{
response.Round,
STATE_LOOKING,
response.DelegatedTable,
response.DelegatedTableSign,
make(map[string]uint32),
make(map[string]*GigPeriodTable),
response.ActiveTime,
};
pm.trySyncAllDelegators()
} else if response.State == STATE_MISMATCHED_DNUMBER {
// refresh table only, and this node loses the election power of this round.
DelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()
}
return nil;
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
return nil
}
func (pm *DPoSProtocolManager) setNextRoundTimer() {
leftTime := int64(NextGigPeriodInstance.activeTime) - time.Now().Unix()
if leftTime < 1 {
log.Warn("Discard this round due to the expiration of the active time.")
go pm.syncDelegatedNodeSafely()
return;
}
if pm.t1 != nil {
// potentially could be an issue if the timer is unable to be cancelled.
pm.t1.Stop()
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
} else {
pm.t1 = time.AfterFunc(time.Second*time.Duration(leftTime), pm.syncDelegatedNodeSafely)
}
log.Debug(fmt.Sprintf("scheduled for next round in %v seconds", leftTime))
}
// the node would not be a candidate if it is not qualified.
func (pm *DPoSProtocolManager) isDelegatedNode() bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == currNodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) | (nodeId string) bool {
if DelegatorsTable == nil {
return false;
}
for i :=0; i < len(DelegatorsTable); i++ {
if DelegatorsTable[i] == nodeId {
return true;
}
}
return false;
}
func (pm *DPoSProtocolManager) Stop() {
if pm.isDelegatedNode() {
pm.packager.Stop();
}
// Quit the sync loop.
log.Info("DPoS Consensus stopped")
}
func (pm *DPoSProtocolManager) newPeer(pv uint, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
return newPeer(pv, p, newMeteredMsgWriter(rw))
}
// --------------------Packaging Process-------------------//
// start round robin for packaging blocks in small period.
func (self *DPoSProtocolManager) roundRobinSafely() {
if !self.isDelegatedNode() || GigPeriodInstance == nil {
return;
}
log.Info(GigPeriodInstance.whosTurn())
// generate block by election node.
if GigPeriodInstance.isMyTurn() {
log.Debug("it's my turn now " + time.Now().String());
round := self.blockchain.CurrentFastBlock().Header().Round;
block := self.packager.GenerateNewBlock(round+1, currNodeId);
block.ToString();
//response := &PackageResponse{block.Round(), currNodeId, block.Hash(),DPOSMSG_SUCCESS};
}
}
// this GigPeriodTable only serves for delegators.
type GigPeriodTable struct {
round uint64; // synchronization round
state uint8; // STATE_LOOKING
delegatedNodes []string; // all 31 nodes id
delegatedNodesSign common.Hash; // a security sign for all delegated nodes which can be verified from node array.
confirmedTickets map[string]uint32; // 31 node must be confirmed this ticket or must equal to delegatedNodes length.
confirmedBestNode map[string]*GigPeriodTable; // confirmed the next active time from all peers. <nodeid><GigPeriodTable>
activeTime uint64; // Unix timestamp for all nodes.
}
func (t *GigPeriodTable) wasHisTurn(round uint64, nodeId string, minedTime int64) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= minedTime && (beatStartTime+ int64(SmallPeriodInterval)) >= minedTime {
return true;
}
}
}
// check the history.
if len(GigPeriodHistory) > 0 {
for _, v := range GigPeriodHistory {
if int64(v.activeTime) <= minedTime && (int64(v.activeTime) + int64(SmallPeriodInterval)) >= minedTime {
for i :=0; i < len(v.delegatedNodes); i++ {
if v.delegatedNodes[i] == nodeId {
//todo check round as well.
return true;
}
}
}
}
}
return false;
}
func (t *GigPeriodTable) isMyTurn() bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == currNodeId {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
currTime := time.Now().Unix()
// we only give 4s to avoid the mismatched timestamp issue of last packaging.
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) > currTime {
return true;
}
}
}
return false;
}
func (t *GigPeriodTable) whosTurn() string {
currTime := time.Now().Unix()
for i :=0; i < len(t.delegatedNodes); i++ {
beatStartTime := int64(t.activeTime) + (int64(i) * int64(SmallPeriodInterval))
if beatStartTime <= currTime && (beatStartTime+ int64(SmallPeriodInterval)) >= currTime {
return "Who's turn: {position: " + strconv.Itoa(i) + ", delegator: " + t.delegatedNodes[i] + " }";
}
}
return "";
}
func (t *GigPeriodTable) isDelegatedNode(nodeId string) bool {
for i :=0; i < len(t.delegatedNodes); i++ {
if t.delegatedNodes[i] == nodeId {
return true;
}
}
return false;
}
func RemoveCanditate(s []string, i int) []string {
s[len(s)-1], s[i] = s[i], s[len(s)-1]
return s[:len(s)-1]
}
func SignCandidates(candidates []string) common.Hash {
var signCandidates = []byte{}
hw := sha3.NewKeccak256()
rlp.Encode(hw, candidates)
return common.BytesToHash(hw.Sum(signCandidates))
} | isDelegatedNode2 | identifier_name |
staging.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
|
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError: if the staging command process exited non-zero.
"""
command = self.registry.get((runtime, environment))
if not command:
# Many runtimes do not require a staging step; this isn't a problem.
log.debug(('No staging command found for runtime [%s] and environment '
'[%s].'), runtime, environment.name)
return
command.EnsureInstalled()
return command.Run(self.staging_area, descriptor, app_dir)
def GetStager(staging_area):
"""Get the default stager."""
return Stager(_STAGING_REGISTRY, staging_area)
def GetBetaStager(staging_area):
"""Get the beta stager, used for `gcloud beta *` commands."""
registry = _STAGING_REGISTRY.copy()
registry.update(_STAGING_REGISTRY_BETA)
return Stager(registry, staging_area)
def GetNoopStager(staging_area):
"""Get a stager with an empty registry."""
return Stager({}, staging_area)
| return | conditional_block |
staging.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def | (self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError: if the staging command process exited non-zero.
"""
command = self.registry.get((runtime, environment))
if not command:
# Many runtimes do not require a staging step; this isn't a problem.
log.debug(('No staging command found for runtime [%s] and environment '
'[%s].'), runtime, environment.name)
return
command.EnsureInstalled()
return command.Run(self.staging_area, descriptor, app_dir)
def GetStager(staging_area):
"""Get the default stager."""
return Stager(_STAGING_REGISTRY, staging_area)
def GetBetaStager(staging_area):
"""Get the beta stager, used for `gcloud beta *` commands."""
registry = _STAGING_REGISTRY.copy()
registry.update(_STAGING_REGISTRY_BETA)
return Stager(registry, staging_area)
def GetNoopStager(staging_area):
"""Get a stager with an empty registry."""
return Stager({}, staging_area)
| __init__ | identifier_name |
staging.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
- A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code.
- On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
|
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError: if the staging command process exited non-zero.
"""
command = self.registry.get((runtime, environment))
if not command:
# Many runtimes do not require a staging step; this isn't a problem.
log.debug(('No staging command found for runtime [%s] and environment '
'[%s].'), runtime, environment.name)
return
command.EnsureInstalled()
return command.Run(self.staging_area, descriptor, app_dir)
def GetStager(staging_area):
"""Get the default stager."""
return Stager(_STAGING_REGISTRY, staging_area)
def GetBetaStager(staging_area):
"""Get the beta stager, used for `gcloud beta *` commands."""
registry = _STAGING_REGISTRY.copy()
registry.update(_STAGING_REGISTRY_BETA)
return Stager(registry, staging_area)
def GetNoopStager(staging_area):
"""Get a stager with an empty registry."""
return Stager({}, staging_area)
| self.registry = registry
self.staging_area = staging_area | identifier_body |
staging.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to provide a hook for staging.
Some App Engine runtimes require an additional staging step before deployment
(e.g. when deploying compiled artifacts, or vendoring code that normally lives
outside of the app directory). This module contains (1) a registry mapping
runtime/environment combinations to staging commands, and (2) code to run said
commands.
The interface is defined as follows:
| - On success, the STDOUT and STDERR of the staging command are logged at the
INFO level. On failure, a StagingCommandFailedError is raised containing the
STDOUT and STDERR of the staging command (which are surfaced to the user as an
ERROR message).
"""
import cStringIO
import os
import tempfile
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
_JAVA_APPCFG_ENTRY_POINT = 'com.google.appengine.tools.admin.AppCfg'
_JAVA_APPCFG_STAGE_FLAGS = [
'--enable_jar_splitting',
'--enable_jar_classes']
_STAGING_COMMAND_OUTPUT_TEMPLATE = """\
------------------------------------ STDOUT ------------------------------------
{out}\
------------------------------------ STDERR ------------------------------------
{err}\
--------------------------------------------------------------------------------
"""
class NoSdkRootError(exceptions.Error):
def __init__(self):
super(NoSdkRootError, self).__init__(
'No SDK root could be found. Please check your installation.')
class StagingCommandFailedError(exceptions.Error):
def __init__(self, args, return_code, output_message):
super(StagingCommandFailedError, self).__init__(
'Staging command [{0}] failed with return code [{1}].\n\n{2}'.format(
' '.join(args), return_code, output_message))
def _StagingProtocolMapper(command_path, descriptor, app_dir, staging_dir):
return [command_path, descriptor, app_dir, staging_dir]
def _JavaStagingMapper(command_path, descriptor, app_dir, staging_dir):
"""Map a java staging request to the right args.
Args:
command_path: str, path to the jar tool file.
descriptor: str, path to the `appengine-web.xml`
app_dir: str, path to the unstaged app directory
staging_dir: str, path to the empty staging dir
Raises:
java.JavaError, if Java is not installed.
Returns:
[str], args for executable invocation.
"""
del descriptor # Unused, app_dir is sufficient
java.CheckIfJavaIsInstalled('local staging for java')
java_bin = files.FindExecutableOnPath('java')
args = ([java_bin, '-classpath', command_path, _JAVA_APPCFG_ENTRY_POINT] +
_JAVA_APPCFG_STAGE_FLAGS + ['stage', app_dir, staging_dir])
return args
class _Command(object):
"""Represents a cross-platform command.
Paths are relative to the Cloud SDK Root directory.
Attributes:
nix_path: str, the path to the executable on Linux and OS X
windows_path: str, the path to the executable on Windows
component: str or None, the name of the Cloud SDK component which contains
the executable
mapper: fn or None, function that maps a staging invocation to a command.
"""
def __init__(self, nix_path, windows_path, component=None, mapper=None):
self.nix_path = nix_path
self.windows_path = windows_path
self.component = component
self.mapper = mapper or _StagingProtocolMapper
@property
def name(self):
if platforms.OperatingSystem.Current() is platforms.OperatingSystem.WINDOWS:
return self.windows_path
else:
return self.nix_path
def GetPath(self):
"""Returns the path to the command.
Returns:
str, the path to the command
Raises:
NoSdkRootError: if no Cloud SDK root could be found (and therefore the
command is not installed).
"""
sdk_root = config.Paths().sdk_root
if not sdk_root:
raise NoSdkRootError()
return os.path.join(sdk_root, self.name)
def EnsureInstalled(self):
if self.component is None:
return
msg = ('The component [{component}] is required for staging this '
'application.').format(component=self.component)
update_manager.UpdateManager.EnsureInstalledAndRestart([self.component],
msg=msg)
def Run(self, staging_area, descriptor, app_dir):
"""Invokes a staging command with a given <service>.yaml and temp dir.
Args:
staging_area: str, path to the staging area.
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
Returns:
str, the path to the staged directory.
Raises:
StagingCommandFailedError: if the staging command process exited non-zero.
"""
staging_dir = tempfile.mkdtemp(dir=staging_area)
args = self.mapper(self.GetPath(), descriptor, app_dir, staging_dir)
log.info('Executing staging command: [{0}]\n\n'.format(' '.join(args)))
out = cStringIO.StringIO()
err = cStringIO.StringIO()
return_code = execution_utils.Exec(args, no_exit=True, out_func=out.write,
err_func=err.write)
message = _STAGING_COMMAND_OUTPUT_TEMPLATE.format(out=out.getvalue(),
err=err.getvalue())
log.info(message)
if return_code:
raise StagingCommandFailedError(args, return_code, message)
return staging_dir
# Path to the go-app-stager binary
_GO_APP_STAGER_DIR = os.path.join('platform', 'google_appengine')
# Path to the jar which contains the staging command
_APPENGINE_TOOLS_JAR = os.path.join(
'platform', 'google_appengine', 'google', 'appengine', 'tools', 'java',
'lib', 'appengine-tools-api.jar')
# STAGING_REGISTRY is a map of (runtime, app-engine-environment) to executable
# path relative to Cloud SDK Root; it should look something like the following:
#
# from googlecloudsdk.api_lib.app import util
# STAGING_REGISTRY = {
# ('intercal', util.Environment.FLEX):
# _Command(
# os.path.join('command_dir', 'stage-intercal-flex.sh'),
# os.path.join('command_dir', 'stage-intercal-flex.exe'),
# component='app-engine-intercal'),
# ('x86-asm', util.Environment.STANDARD):
# _Command(
# os.path.join('command_dir', 'stage-x86-asm-standard'),
# os.path.join('command_dir', 'stage-x86-asm-standard.exe'),
# component='app-engine-intercal'),
# }
_STAGING_REGISTRY = {
('go', util.Environment.STANDARD):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.MANAGED_VMS):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
('go', util.Environment.FLEX):
_Command(
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager'),
os.path.join(_GO_APP_STAGER_DIR, 'go-app-stager.exe'),
component='app-engine-go'),
}
# _STAGING_REGISTRY_BETA extends _STAGING_REGISTRY, overriding entries if the
# same key is used.
_STAGING_REGISTRY_BETA = {
('java-xml', util.Environment.STANDARD):
_Command(
_APPENGINE_TOOLS_JAR,
_APPENGINE_TOOLS_JAR,
component='app-engine-java',
mapper=_JavaStagingMapper)
}
class Stager(object):
def __init__(self, registry, staging_area):
self.registry = registry
self.staging_area = staging_area
def Stage(self, descriptor, app_dir, runtime, environment):
"""Stage the given deployable or do nothing if N/A.
Args:
descriptor: str, path to the unstaged <service>.yaml or appengine-web.xml
app_dir: str, path to the unstaged app directory
runtime: str, the name of the runtime for the application to stage
environment: api_lib.app.util.Environment, the environment for the
application to stage
Returns:
str, the path to the staged directory or None if no corresponding staging
command was found.
Raises:
NoSdkRootError: if no Cloud SDK installation root could be found.
StagingCommandFailedError: if the staging command process exited non-zero.
"""
command = self.registry.get((runtime, environment))
if not command:
# Many runtimes do not require a staging step; this isn't a problem.
log.debug(('No staging command found for runtime [%s] and environment '
'[%s].'), runtime, environment.name)
return
command.EnsureInstalled()
return command.Run(self.staging_area, descriptor, app_dir)
def GetStager(staging_area):
"""Get the default stager."""
return Stager(_STAGING_REGISTRY, staging_area)
def GetBetaStager(staging_area):
"""Get the beta stager, used for `gcloud beta *` commands."""
registry = _STAGING_REGISTRY.copy()
registry.update(_STAGING_REGISTRY_BETA)
return Stager(registry, staging_area)
def GetNoopStager(staging_area):
"""Get a stager with an empty registry."""
return Stager({}, staging_area) | - A staging command is an executable (binary or script) that takes two
positional parameters: the path of the `<service>.yaml` in the directory
containing the unstaged application code, and the path of an empty directory
in which to stage the application code. | random_line_split |
rozetka_webscrapper.py | import requests
import datetime
import model
import settings
from model.category import Category
from model.group import Group
from model.item import Item
from model.comment import Comment
from bs4 import BeautifulSoup
from driver import Driver
import files.file_reader as fr
import files.file_writer as fw
import json
def decode_str(unicodestr):
#encoded = unicodestr.encode()
#decoded = encoded.decode('unicode-escape')
return unicodestr
def parse_comment(comment):
parsed_comment = Comment()
comment_author = comment.find(class_="comment__author")
if comment_author:
comment_date = comment.find(class_="comment__date")
if comment_date:
parsed_comment.date = decode_str(comment_date.get_text())
comment_date.decompose()
parsed_comment.author = decode_str(comment_author.get_text())
comment_link = comment.find(class_="comment__link")
if comment_link:
parsed_comment.url = comment_link.get("href")
comment_vars_list = comment.find(class_="comment__vars-list") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def | (url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items_group('https://rozetka.com.ua/notebooks/c80004/')
#print(parsed_group)
#specific item 'https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/'
#parsed_item = parse_item_page_for_comments('https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/')
#print(parsed_item)
#full parser
#get all categories by selector
#.menu-categories .menu-categories_type_main
#for each category scrap sub-categories by selector
#.tile-cats
#for each category scrap available items by
#.goods-tile
#then get
#.goods-tile__heading -> href property
#add /comments to href property
#get all divs by selector
#.comment
#parse comment
def check_application_mode(application_mode_str) :
for str in settings.APPLICATION_MODES_LIST:
if str == application_mode_str :
return True
return False
def run_clean() :
print("Running reading from site sequence")
result = scrap_rozetka_web_site()
print("Scraped", len(result), "categories.")
return result
def run_from_file() :
print("Running reading from previously stored data sequence")
filenames = fr.get_all_filenames('./'+settings.RESULT_FOLDER_NAME, settings.SITE_SCRAP_RESULT_FILE_NAME_PREFIX)
def parse_file_data (file_data) :
parsed_as_json = json.loads(file_data)
parsed = []
for cat in parsed_as_json:
parsed.append(Category(cat))
return parsed
result = []
for filename in filenames:
categories = fr.read_file_as(filename, lambda file_data : parse_file_data(file_data) )
result += categories
print("Loaded", len(result), "categories.")
return result
def run() :
mode = settings.APPLICATION_MODE
if not check_application_mode(mode):
print("Error!", "App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST)
raise RuntimeError(" ".join(["App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST]))
mode_runner_map = {
settings.APPLICATION_MODES_LIST[0] : lambda : run_clean(),
settings.APPLICATION_MODES_LIST[1] : lambda : run_from_file()
}
return mode_runner_map[mode]()
| parse_category | identifier_name |
rozetka_webscrapper.py | import requests
import datetime
import model
import settings
from model.category import Category
from model.group import Group
from model.item import Item
from model.comment import Comment
from bs4 import BeautifulSoup
from driver import Driver
import files.file_reader as fr
import files.file_writer as fw
import json
def decode_str(unicodestr):
#encoded = unicodestr.encode()
#decoded = encoded.decode('unicode-escape')
return unicodestr
def parse_comment(comment):
parsed_comment = Comment()
comment_author = comment.find(class_="comment__author")
if comment_author:
comment_date = comment.find(class_="comment__date")
if comment_date:
parsed_comment.date = decode_str(comment_date.get_text())
comment_date.decompose()
parsed_comment.author = decode_str(comment_author.get_text())
comment_link = comment.find(class_="comment__link")
if comment_link:
parsed_comment.url = comment_link.get("href")
comment_vars_list = comment.find(class_="comment__vars-list") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill") | #parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def parse_category(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items_group('https://rozetka.com.ua/notebooks/c80004/')
#print(parsed_group)
#specific item 'https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/'
#parsed_item = parse_item_page_for_comments('https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/')
#print(parsed_item)
#full parser
#get all categories by selector
#.menu-categories .menu-categories_type_main
#for each category scrap sub-categories by selector
#.tile-cats
#for each category scrap available items by
#.goods-tile
#then get
#.goods-tile__heading -> href property
#add /comments to href property
#get all divs by selector
#.comment
#parse comment
def check_application_mode(application_mode_str) :
for str in settings.APPLICATION_MODES_LIST:
if str == application_mode_str :
return True
return False
def run_clean() :
print("Running reading from site sequence")
result = scrap_rozetka_web_site()
print("Scraped", len(result), "categories.")
return result
def run_from_file() :
print("Running reading from previously stored data sequence")
filenames = fr.get_all_filenames('./'+settings.RESULT_FOLDER_NAME, settings.SITE_SCRAP_RESULT_FILE_NAME_PREFIX)
def parse_file_data (file_data) :
parsed_as_json = json.loads(file_data)
parsed = []
for cat in parsed_as_json:
parsed.append(Category(cat))
return parsed
result = []
for filename in filenames:
categories = fr.read_file_as(filename, lambda file_data : parse_file_data(file_data) )
result += categories
print("Loaded", len(result), "categories.")
return result
def run() :
mode = settings.APPLICATION_MODE
if not check_application_mode(mode):
print("Error!", "App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST)
raise RuntimeError(" ".join(["App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST]))
mode_runner_map = {
settings.APPLICATION_MODES_LIST[0] : lambda : run_clean(),
settings.APPLICATION_MODES_LIST[1] : lambda : run_from_file()
}
return mode_runner_map[mode]() | if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
| random_line_split |
rozetka_webscrapper.py | import requests
import datetime
import model
import settings
from model.category import Category
from model.group import Group
from model.item import Item
from model.comment import Comment
from bs4 import BeautifulSoup
from driver import Driver
import files.file_reader as fr
import files.file_writer as fw
import json
def decode_str(unicodestr):
#encoded = unicodestr.encode()
#decoded = encoded.decode('unicode-escape')
|
def parse_comment(comment):
parsed_comment = Comment()
comment_author = comment.find(class_="comment__author")
if comment_author:
comment_date = comment.find(class_="comment__date")
if comment_date:
parsed_comment.date = decode_str(comment_date.get_text())
comment_date.decompose()
parsed_comment.author = decode_str(comment_author.get_text())
comment_link = comment.find(class_="comment__link")
if comment_link:
parsed_comment.url = comment_link.get("href")
comment_vars_list = comment.find(class_="comment__vars-list") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def parse_category(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items_group('https://rozetka.com.ua/notebooks/c80004/')
#print(parsed_group)
#specific item 'https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/'
#parsed_item = parse_item_page_for_comments('https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/')
#print(parsed_item)
#full parser
#get all categories by selector
#.menu-categories .menu-categories_type_main
#for each category scrap sub-categories by selector
#.tile-cats
#for each category scrap available items by
#.goods-tile
#then get
#.goods-tile__heading -> href property
#add /comments to href property
#get all divs by selector
#.comment
#parse comment
def check_application_mode(application_mode_str) :
for str in settings.APPLICATION_MODES_LIST:
if str == application_mode_str :
return True
return False
def run_clean() :
print("Running reading from site sequence")
result = scrap_rozetka_web_site()
print("Scraped", len(result), "categories.")
return result
def run_from_file() :
print("Running reading from previously stored data sequence")
filenames = fr.get_all_filenames('./'+settings.RESULT_FOLDER_NAME, settings.SITE_SCRAP_RESULT_FILE_NAME_PREFIX)
def parse_file_data (file_data) :
parsed_as_json = json.loads(file_data)
parsed = []
for cat in parsed_as_json:
parsed.append(Category(cat))
return parsed
result = []
for filename in filenames:
categories = fr.read_file_as(filename, lambda file_data : parse_file_data(file_data) )
result += categories
print("Loaded", len(result), "categories.")
return result
def run() :
mode = settings.APPLICATION_MODE
if not check_application_mode(mode):
print("Error!", "App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST)
raise RuntimeError(" ".join(["App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST]))
mode_runner_map = {
settings.APPLICATION_MODES_LIST[0] : lambda : run_clean(),
settings.APPLICATION_MODES_LIST[1] : lambda : run_from_file()
}
return mode_runner_map[mode]()
| return unicodestr | identifier_body |
rozetka_webscrapper.py | import requests
import datetime
import model
import settings
from model.category import Category
from model.group import Group
from model.item import Item
from model.comment import Comment
from bs4 import BeautifulSoup
from driver import Driver
import files.file_reader as fr
import files.file_writer as fw
import json
def decode_str(unicodestr):
#encoded = unicodestr.encode()
#decoded = encoded.decode('unicode-escape')
return unicodestr
def parse_comment(comment):
parsed_comment = Comment()
comment_author = comment.find(class_="comment__author")
if comment_author:
comment_date = comment.find(class_="comment__date")
if comment_date:
parsed_comment.date = decode_str(comment_date.get_text())
comment_date.decompose()
parsed_comment.author = decode_str(comment_author.get_text())
comment_link = comment.find(class_="comment__link")
if comment_link:
parsed_comment.url = comment_link.get("href")
comment_vars_list = comment.find(class_="comment__vars-list") #sellers
#parse vars_list
parsed_comment_vars_list = []
if comment_vars_list:
comment_vars_lists = comment_vars_list.find_all(class_="comment__vars-item")
if comment_vars_lists:
for item in comment_vars_lists:
res = {}
label = item.find(class_="comment__vars-label")
value = item.find(class_="comment__vars-value")
if label:
res["label"] = decode_str(label.get_text())
if value:
res["value"] = decode_str(value.get_text())
if res:
parsed_comment_vars_list.append(res)
parsed_comment.vars_list = parsed_comment_vars_list
#parse rating
comment_rating = comment.find("rz-comment-rating") #may be Empty
#has 5 items
#each star has fill(#0) or fiil(#1)
# svg path (tag) fill
if comment_rating:
stars = comment_rating.find_all("svg")
stars_count = 0;
for star in stars:
path = star.find("path")
if path:
fill = path.get("fill")
if fill == "url(#1)":
stars_count += 1
parsed_comment.rating = stars_count
#parse essentials
comment_text = comment.find(class_="comment__text")
if comment_text:
parsed_comment.text = decode_str(comment_text.get_text())
comment_essentials_list = comment.find_all(class_="comment__essentials-item") #has label and optional <dd> with text
parsed_essentials_list = []
if comment_essentials_list:
for essential in comment_essentials_list:
res = {}
essential_label = essential.find("dt", class_="comment__essentials-label")
essential_data = essential.find("dd")
if essential_label:
res["label"] = decode_str(essential_label.get_text())
if essential_data:
res["data"] = decode_str(essential_data.get_text())
parsed_essentials_list.append(res)
parsed_comment.essentials_list = parsed_essentials_list;
#parse attached photos
parsed_photos_urls = []
comment_attached_photos_urls = comment.find(class_="product-comments__photos-list")
if comment_attached_photos_urls:
photos_list = comment_attached_photos_urls.find_all(class_="product-comments__photos-item")
if photos_list:
for photo in photos_list:
img = photo.find("img")
if img:
url = img.get("src")
parsed_photos_urls.append(url)
parsed_comment.attached_photos_urls = parsed_photos_urls
return parsed_comment
def parse_item_page_for_comments(page):
soup = BeautifulSoup(page.text, 'html.parser')
# get the comments
comments = soup.find('comment-list')
#print(comments)
parsed_comments = []
# find all instances of that class (should return 25 as shown in the github main page)
if comments:
|
return parsed_comments
def parse_item_page_for_description(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
description = soup.find(class_="product-about__description-content")
return decode_str(description.get_text()) if description else "" #runtime generated
def parse_item_page(url):
parsed_item = Item()
parsed_item.description = parse_item_page_for_description(url)
page = requests.get(url+'comments/')
parsed_item.url = url
soup = BeautifulSoup(page.text, 'html.parser')
title = soup.find(class_="product__title")
if title:
parsed_item.name = decode_str(title.get_text())
if page.reason == 'OK':
print('parse item:', parsed_item.name)
parsed_item.comments = parse_item_page_for_comments(page)
else:
parsed_item.error = page.reason
return parsed_item
def parse_specific_items_group(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_group = Group()
parsed_group.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="catalog-heading")
if title:
parsed_group.name = decode_str(title.get_text())
print('parse group:', parsed_group.name)
parsed_items = []
if html != "":
#parse items in group
item_wrappers = soup.find_all("div", class_="goods-tile")
if item_wrappers:
items_count = 0
for item_wrapper in item_wrappers:
item_link_holder = item_wrapper.find("a", class_="goods-tile__picture")
item_href = item_link_holder.get("href")
if item_href:
parsed_item = parse_item_page(item_href)
parsed_items.append(parsed_item)
items_count += 1
if items_count >= settings.ITEMS_PER_GROUP_LIMIT:
break
else:
print("PARSED ITEMS:", str(items_count) , "/", str(min(len(item_wrappers), settings.ITEMS_PER_GROUP_LIMIT)))
parsed_group.items = parsed_items
else:
parsed_group.error = "error"
return parsed_group
def parse_category(url):
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_category = Category()
parsed_category.url = url
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("h1", class_="portal__heading")
if title:
parsed_category.name = decode_str(title.get_text())
print('parse category:', parsed_category.name)
parsed_groups = []
if html != "":
#parse groups in category
group_wrappers = soup.find_all("div", class_="tile-cats")
if group_wrappers:
groups_count = 0
for group_wrapper in group_wrappers:
group_link_holder = group_wrapper.find("a", class_="tile-cats__picture")
group_href = group_link_holder.get("href")
if group_href:
parsed_group = parse_specific_items_group(group_href)
parsed_groups.append(parsed_group)
groups_count += 1
if groups_count >= settings.GROUPS_PER_CATEGORY_LIMIT:
break
else:
print("PARSED GROUPS:", str(groups_count) , "/", str(min(len(group_wrappers), settings.GROUPS_PER_CATEGORY_LIMIT)))
parsed_category.groups = parsed_groups
else:
parsed_item.error = "error"
return parsed_category
def parse_root():
url = 'https://rozetka.com.ua/'
driver = Driver.get()
driver.get(url)
html = driver.page_source
parsed_categories = []
categories_count = 0
soup = BeautifulSoup(html, 'html.parser')
link_holders = soup.find_all("a", class_="menu-categories__link")
if link_holders:
for link_holder in link_holders:
link = link_holder.get("href")
category = parse_category(link)
if category:
parsed_categories.append(category)
categories_count += 1
if categories_count >= settings.CATEGORIES_LIMIT:
break
else:
print("PARSED CATEGORIES:", str(categories_count) , "/", str(min(len(link_holders), settings.CATEGORIES_LIMIT)))
return parsed_categories
def scrap_rozetka_web_site():
time_start = datetime.datetime.now()
print("Parsing started at:", time_start)
parsed_site_data = parse_root()
time_end = datetime.datetime.now()
print("Parsing ended at:", time_end)
print("Parsing took:", time_end - time_start)
#### testing of correct json parsing
# for parsed_category in parsed_site_data :
# reparsed = model.category.Category.fromJson(parsed_category.toJson())
# #print(reparsed)
# #print(isinstance(reparsed, model.category.Category))
# for g in reparsed.groups:
# #print(g)
# #print(isinstance(g, model.group.Group))
# for i in g.items:
# #print(i)
# #print(isinstance(i, model.item.Item))
# for c in i.comments:
# #print(c)
# #print(isinstance(c, model.item.Comment))
Driver.close()
print("End of parsing!")
Driver.quit()
#filter empty categories
filtered_parsed_site_data = []
for c in parsed_site_data:
if len(c.groups) > 0:
#groups = []
#for g in c.groups:
# if len(g.items) > 0:
# groups.append(g)
#c.groups = groups
filtered_parsed_site_data.append(c)
print("Saving to file!")
fw.write_plain_iterable(
settings.SITE_SCRAP_RELATIVE_FILE_PATH_STRING.format(str(datetime.datetime.now()).replace(" ", "_").replace(":","").replace(".", "")).replace("/+", "/"),
filtered_parsed_site_data,
lambda o : o.toJson(),
encoding='utf-8'
)
return parsed_site_data
#top category 'https://rozetka.com.ua/computers-notebooks/c80253/'
#parsed_category = parse_category('https://rozetka.com.ua/computers-notebooks/c80253/')
#print(parsed_category)
#specific category 'https://rozetka.com.ua/notebooks/c80004/'
#parsed_group = parse_specific_items_group('https://rozetka.com.ua/notebooks/c80004/')
#print(parsed_group)
#specific item 'https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/'
#parsed_item = parse_item_page_for_comments('https://rozetka.com.ua/asus_90nr0351_m02460/p238731799/')
#print(parsed_item)
#full parser
#get all categories by selector
#.menu-categories .menu-categories_type_main
#for each category scrap sub-categories by selector
#.tile-cats
#for each category scrap available items by
#.goods-tile
#then get
#.goods-tile__heading -> href property
#add /comments to href property
#get all divs by selector
#.comment
#parse comment
def check_application_mode(application_mode_str) :
for str in settings.APPLICATION_MODES_LIST:
if str == application_mode_str :
return True
return False
def run_clean() :
print("Running reading from site sequence")
result = scrap_rozetka_web_site()
print("Scraped", len(result), "categories.")
return result
def run_from_file() :
print("Running reading from previously stored data sequence")
filenames = fr.get_all_filenames('./'+settings.RESULT_FOLDER_NAME, settings.SITE_SCRAP_RESULT_FILE_NAME_PREFIX)
def parse_file_data (file_data) :
parsed_as_json = json.loads(file_data)
parsed = []
for cat in parsed_as_json:
parsed.append(Category(cat))
return parsed
result = []
for filename in filenames:
categories = fr.read_file_as(filename, lambda file_data : parse_file_data(file_data) )
result += categories
print("Loaded", len(result), "categories.")
return result
def run() :
mode = settings.APPLICATION_MODE
if not check_application_mode(mode):
print("Error!", "App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST)
raise RuntimeError(" ".join(["App mode:", mode, "not found in list", settings.APPLICATION_MODES_LIST]))
mode_runner_map = {
settings.APPLICATION_MODES_LIST[0] : lambda : run_clean(),
settings.APPLICATION_MODES_LIST[1] : lambda : run_from_file()
}
return mode_runner_map[mode]()
| comments_list = comments.find_all("li", class_="product-comments__list-item")
comments_count = 0
if comments_list:
for comment in comments_list:
parsed_comments.append(parse_comment(comment))
comments_count += 1
if comments_count >= settings.COMMENTS_PER_PAGE_LIMIT :
break | conditional_block |
value.rs | //! Types representing for data which will be retrieved from the driver.
//! Currently this data is expected to look like a JSON object but this may be
//! changed in the future. Driver authors must cast the data they retrieve from | use serde::de::{Deserialize, Deserializer, Error as DeError, Visitor, SeqVisitor, MapVisitor};
use serde::de::impls::VecVisitor;
use serde_json;
use error::Error;
/// The type which represents the key for maps used throughout the Ardite
/// codebase.
///
/// Functions similarly to an object key in JavaScript.
pub type Key = String;
/// Represents a [JSON pointer][1] to a document property. Examples of a
/// pointer in this context include `/hello/world` or `/a/b/c/d`.
///
/// These pointers are represented as a list of keys.
///
/// [1]: https://duckduckgo.com/?q=json+pointer&atb=v1&ia=about
pub type Pointer = Vec<Key>;
/// Ordered representation of a map of key/value pairs, like a JSON object.
/// Backed by a linear map to maintain order and have high performance for
/// small objects.
// TODO: newtype pattern?
pub type Object = LinearMap<Key, Value>;
/// Ordered array of values, like a JSON array.
// TODO: newtype pattern?
pub type Array = Vec<Value>;
/// Various value types. Based on types in the [JSON standard][1] (see section
/// 5).
///
/// [1]: http://ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
#[derive(PartialEq, Clone, Debug)]
pub enum Value {
/// The abscense of any value.
Null,
/// True or false.
Boolean(bool),
/// An integer numeric value.
I64(i64),
/// A floating point numeric value.
F64(f64),
/// A list of characters.
String(String),
/// A map of key/value pairs.
Object(Object),
/// A list of values.
Array(Array)
}
impl Value {
/// Gets a value at a specific point. Helpful for retrieving nested values.
pub fn get(&self, mut pointer: Pointer) -> Option<&Value> {
match *self {
Value::Object(ref map) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = map.get(&pointer.remove(0)) {
value.get(pointer)
} else {
None
}
},
Value::Array(ref vec) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = pointer.remove(0).parse::<usize>().ok().map_or(None, |i| vec.get(i)) {
value.get(pointer)
} else {
None
}
},
_ => if pointer.is_empty() { Some(self) } else { None }
}
}
/// Creates a `Value` from a JSON string.
pub fn from_json(json: &str) -> Result<Value, Error> {
serde_json::from_str(json).map_err(Error::from)
}
/// Converts a `Value` into a JSON string.
pub fn to_json(&self) -> Result<String, Error> {
serde_json::to_string(self).map_err(Error::from)
}
/// Converts a `Value` into a nice and indented JSON string.
pub fn to_json_pretty(&self) -> Result<String, Error> {
serde_json::to_string_pretty(self).map_err(Error::from)
}
}
impl Serialize for Value {
#[inline]
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
match *self {
Value::Null => serializer.serialize_unit(),
Value::Boolean(value) => serializer.serialize_bool(value),
Value::I64(value) => serializer.serialize_i64(value),
Value::F64(value) => serializer.serialize_f64(value),
Value::String(ref value) => serializer.serialize_str(&value),
Value::Array(ref value) => value.serialize(serializer),
Value::Object(ref value) => value.serialize(serializer)
}
}
}
impl Deserialize for Value {
#[inline]
fn deserialize<D>(deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer {
struct ValueVisitor;
impl Visitor for ValueVisitor {
type Value = Value;
#[inline] fn visit_bool<E>(&mut self, value: bool) -> Result<Value, E> { Ok(Value::Boolean(value)) }
#[inline] fn visit_u64<E>(&mut self, value: u64) -> Result<Value, E> { Ok(Value::I64(value as i64)) }
#[inline] fn visit_i64<E>(&mut self, value: i64) -> Result<Value, E> { Ok(Value::I64(value)) }
#[inline] fn visit_f64<E>(&mut self, value: f64) -> Result<Value, E> { Ok(Value::F64(value)) }
#[inline] fn visit_str<E>(&mut self, value: &str) -> Result<Value, E> where E: DeError { self.visit_string(value.to_owned()) }
#[inline] fn visit_string<E>(&mut self, value: String) -> Result<Value, E> { Ok(Value::String(value)) }
#[inline] fn visit_none<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_some<D>(&mut self, deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer { Deserialize::deserialize(deserializer) }
#[inline] fn visit_unit<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_seq<V>(&mut self, visitor: V) -> Result<Value, V::Error> where V: SeqVisitor { let values = try!(VecVisitor::new().visit_seq(visitor)); Ok(Value::Array(values)) }
#[inline]
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Value, V::Error> where V: MapVisitor {
let mut object = LinearMap::with_capacity(visitor.size_hint().0);
while let Some((key, value)) = try!(visitor.visit()) {
object.insert(key, value);
}
try!(visitor.end());
Ok(Value::Object(object))
}
}
deserializer.deserialize(ValueVisitor)
}
}
impl<V> From<Option<V>> for Value where V: Into<Value> {
fn from(option: Option<V>) -> Self {
match option {
None => Value::Null,
Some(value) => value.into()
}
}
}
impl From<bool> for Value {
fn from(boolean: bool) -> Self {
Value::Boolean(boolean)
}
}
impl From<i64> for Value {
fn from(number: i64) -> Self {
Value::I64(number)
}
}
impl From<f64> for Value {
fn from(number: f64) -> Self {
Value::F64(number)
}
}
impl From<String> for Value {
fn from(string: String) -> Self {
Value::String(string)
}
}
impl<'a> From<&'a str> for Value {
fn from(string: &'a str) -> Self {
Value::from(string.to_owned())
}
}
/// An iterator of values. Used by drivers to convert their own iterator
/// implementations into a single type.
pub struct ValueIter<'a> {
iter: Box<Iterator<Item=Value> + 'a>
}
impl<'a> ValueIter<'a> {
/// Create a new value iterator.
pub fn new<I>(iter: I) -> Self where I: Iterator<Item=Value> + 'a {
ValueIter {
iter: Box::new(iter)
}
}
}
impl<'a> Iterator for ValueIter<'a> {
type Item = Value;
#[inline]
fn next(&mut self) -> Option<Value> {
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use value::Value;
#[test]
fn test_get_primitive() {
assert_eq!(value!().get(point![]).cloned(), Some(value!()));
assert_eq!(value!().get(point!["hello"]).cloned(), None);
assert_eq!(value!().get(point!["a", "b", "c", "d", "e"]).cloned(), None);
assert_eq!(value!(true).get(point![]).cloned(), Some(value!(true)));
assert_eq!(value!(true).get(point!["hello"]).cloned(), None);
assert_eq!(value!(36).get(point![]).cloned(), Some(value!(36)));
assert_eq!(value!(36).get(point!["hello"]).cloned(), None);
assert_eq!(value!("world").get(point![]).cloned(), Some(value!("world")));
assert_eq!(value!("world").get(point!["hello"]).cloned(), None);
}
#[test]
fn test_get_object() {
let object = value!({
"hello" => true,
"world" => 8,
"yolo" => "swag",
"5" => (),
"moon" => {
"hello" => "yoyo"
}
});
assert_eq!(object.get(point![]).cloned(), Some(object.clone()));
assert_eq!(object.get(point!["hello"]).cloned(), Some(value!(true)));
assert_eq!(object.get(point!["yolo"]).cloned(), Some(value!("swag")));
assert_eq!(object.get(point!["5"]).cloned(), Some(value!()));
assert_eq!(object.get(point!["world", "hello"]).cloned(), None);
assert_eq!(object.get(point!["moon", "hello"]).cloned(), Some(value!("yoyo")));
assert_eq!(object.get(point!["moon", "nope"]).cloned(), None);
}
#[test]
fn test_get_array() {
let array = value!([
false,
64,
{
"hello" => true,
"world" => false,
"moon" => {
"goodbye" => "yoyo"
}
},
[[1, 2, 3], 4, 5 ]
]);
assert_eq!(array.get(point![]).cloned(), Some(array.clone()));
assert_eq!(array.get(point!["0"]).cloned(), Some(value!(false)));
assert_eq!(array.get(point!["1"]).cloned(), Some(value!(64)));
assert_eq!(array.get(point!["2", "hello"]).cloned(), Some(value!(true)));
assert_eq!(array.get(point!["2", "moon", "goodbye"]).cloned(), Some(value!("yoyo")));
assert_eq!(array.get(point!["length"]).cloned(), None);
assert_eq!(array.get(point!["3", "0", "1"]).cloned(), Some(value!(2)));
}
#[test]
fn test_from_json() {
assert_eq!(Value::from_json("null").unwrap(), value!());
assert_eq!(Value::from_json("true").unwrap(), value!(true));
assert_eq!(Value::from_json("false").unwrap(), value!(false));
assert_eq!(Value::from_json("7").unwrap(), value!(7));
assert_eq!(Value::from_json("3.3").unwrap(), value!(3.3));
assert_eq!(Value::from_json(r#""Hello,\n\"world\"!""#).unwrap(), value!("Hello,\n\"world\"!"));
assert_eq!(Value::from_json(r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#).unwrap(), value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}));
assert_eq!(
Value::from_json(r#"["world",3.3,{"hello":"world"},null,null,[1,2,3],null]"#).unwrap(),
value!(["world", 3.3, { "hello" => "world" }, (), (), [1, 2, 3], ()])
);
}
#[test]
fn test_to_json() {
assert_eq!(&value!().to_json().unwrap(), "null");
assert_eq!(&value!(true).to_json().unwrap(), "true");
assert_eq!(&value!(false).to_json().unwrap(), "false");
assert_eq!(&value!(7).to_json().unwrap(), "7");
assert_eq!(&value!(6.667).to_json().unwrap(), "6.667");
assert_eq!(&value!("Hello,\n\"world\"!").to_json().unwrap(), r#""Hello,\n\"world\"!""#);
assert_eq!(&value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}).to_json().unwrap(), r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#);
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json().unwrap(),
r#"["world",3.333,{"hello":"world"},null,null,[1,2,3],null]"#
);
}
#[test]
fn test_to_json_pretty() {
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json_pretty().unwrap(),
"[\n \"world\",\n 3.333,\n {\n \"hello\": \"world\"\n },\n null,\n null,\n [\n 1,\n 2,\n 3\n ],\n null\n]"
);
}
} | //! the driver to these types.
use linear_map::LinearMap;
use serde::ser::{Serialize, Serializer}; | random_line_split |
value.rs | //! Types representing for data which will be retrieved from the driver.
//! Currently this data is expected to look like a JSON object but this may be
//! changed in the future. Driver authors must cast the data they retrieve from
//! the driver to these types.
use linear_map::LinearMap;
use serde::ser::{Serialize, Serializer};
use serde::de::{Deserialize, Deserializer, Error as DeError, Visitor, SeqVisitor, MapVisitor};
use serde::de::impls::VecVisitor;
use serde_json;
use error::Error;
/// The type which represents the key for maps used throughout the Ardite
/// codebase.
///
/// Functions similarly to an object key in JavaScript.
pub type Key = String;
/// Represents a [JSON pointer][1] to a document property. Examples of a
/// pointer in this context include `/hello/world` or `/a/b/c/d`.
///
/// These pointers are represented as a list of keys.
///
/// [1]: https://duckduckgo.com/?q=json+pointer&atb=v1&ia=about
pub type Pointer = Vec<Key>;
/// Ordered representation of a map of key/value pairs, like a JSON object.
/// Backed by a linear map to maintain order and have high performance for
/// small objects.
// TODO: newtype pattern?
pub type Object = LinearMap<Key, Value>;
/// Ordered array of values, like a JSON array.
// TODO: newtype pattern?
pub type Array = Vec<Value>;
/// Various value types. Based on types in the [JSON standard][1] (see section
/// 5).
///
/// [1]: http://ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
#[derive(PartialEq, Clone, Debug)]
pub enum Value {
/// The abscense of any value.
Null,
/// True or false.
Boolean(bool),
/// An integer numeric value.
I64(i64),
/// A floating point numeric value.
F64(f64),
/// A list of characters.
String(String),
/// A map of key/value pairs.
Object(Object),
/// A list of values.
Array(Array)
}
impl Value {
/// Gets a value at a specific point. Helpful for retrieving nested values.
pub fn get(&self, mut pointer: Pointer) -> Option<&Value> {
match *self {
Value::Object(ref map) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = map.get(&pointer.remove(0)) {
value.get(pointer)
} else {
None
}
},
Value::Array(ref vec) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = pointer.remove(0).parse::<usize>().ok().map_or(None, |i| vec.get(i)) {
value.get(pointer)
} else {
None
}
},
_ => if pointer.is_empty() { Some(self) } else { None }
}
}
/// Creates a `Value` from a JSON string.
pub fn from_json(json: &str) -> Result<Value, Error> {
serde_json::from_str(json).map_err(Error::from)
}
/// Converts a `Value` into a JSON string.
pub fn to_json(&self) -> Result<String, Error> {
serde_json::to_string(self).map_err(Error::from)
}
/// Converts a `Value` into a nice and indented JSON string.
pub fn to_json_pretty(&self) -> Result<String, Error> {
serde_json::to_string_pretty(self).map_err(Error::from)
}
}
impl Serialize for Value {
#[inline]
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
match *self {
Value::Null => serializer.serialize_unit(),
Value::Boolean(value) => serializer.serialize_bool(value),
Value::I64(value) => serializer.serialize_i64(value),
Value::F64(value) => serializer.serialize_f64(value),
Value::String(ref value) => serializer.serialize_str(&value),
Value::Array(ref value) => value.serialize(serializer),
Value::Object(ref value) => value.serialize(serializer)
}
}
}
impl Deserialize for Value {
#[inline]
fn deserialize<D>(deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer {
struct ValueVisitor;
impl Visitor for ValueVisitor {
type Value = Value;
#[inline] fn visit_bool<E>(&mut self, value: bool) -> Result<Value, E> { Ok(Value::Boolean(value)) }
#[inline] fn visit_u64<E>(&mut self, value: u64) -> Result<Value, E> { Ok(Value::I64(value as i64)) }
#[inline] fn visit_i64<E>(&mut self, value: i64) -> Result<Value, E> { Ok(Value::I64(value)) }
#[inline] fn visit_f64<E>(&mut self, value: f64) -> Result<Value, E> { Ok(Value::F64(value)) }
#[inline] fn visit_str<E>(&mut self, value: &str) -> Result<Value, E> where E: DeError { self.visit_string(value.to_owned()) }
#[inline] fn visit_string<E>(&mut self, value: String) -> Result<Value, E> { Ok(Value::String(value)) }
#[inline] fn visit_none<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_some<D>(&mut self, deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer { Deserialize::deserialize(deserializer) }
#[inline] fn visit_unit<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_seq<V>(&mut self, visitor: V) -> Result<Value, V::Error> where V: SeqVisitor { let values = try!(VecVisitor::new().visit_seq(visitor)); Ok(Value::Array(values)) }
#[inline]
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Value, V::Error> where V: MapVisitor {
let mut object = LinearMap::with_capacity(visitor.size_hint().0);
while let Some((key, value)) = try!(visitor.visit()) {
object.insert(key, value);
}
try!(visitor.end());
Ok(Value::Object(object))
}
}
deserializer.deserialize(ValueVisitor)
}
}
impl<V> From<Option<V>> for Value where V: Into<Value> {
fn from(option: Option<V>) -> Self {
match option {
None => Value::Null,
Some(value) => value.into()
}
}
}
impl From<bool> for Value {
fn from(boolean: bool) -> Self {
Value::Boolean(boolean)
}
}
impl From<i64> for Value {
fn from(number: i64) -> Self {
Value::I64(number)
}
}
impl From<f64> for Value {
fn from(number: f64) -> Self {
Value::F64(number)
}
}
impl From<String> for Value {
fn from(string: String) -> Self {
Value::String(string)
}
}
impl<'a> From<&'a str> for Value {
fn from(string: &'a str) -> Self {
Value::from(string.to_owned())
}
}
/// An iterator of values. Used by drivers to convert their own iterator
/// implementations into a single type.
pub struct ValueIter<'a> {
iter: Box<Iterator<Item=Value> + 'a>
}
impl<'a> ValueIter<'a> {
/// Create a new value iterator.
pub fn new<I>(iter: I) -> Self where I: Iterator<Item=Value> + 'a {
ValueIter {
iter: Box::new(iter)
}
}
}
impl<'a> Iterator for ValueIter<'a> {
type Item = Value;
#[inline]
fn next(&mut self) -> Option<Value> {
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use value::Value;
#[test]
fn test_get_primitive() {
assert_eq!(value!().get(point![]).cloned(), Some(value!()));
assert_eq!(value!().get(point!["hello"]).cloned(), None);
assert_eq!(value!().get(point!["a", "b", "c", "d", "e"]).cloned(), None);
assert_eq!(value!(true).get(point![]).cloned(), Some(value!(true)));
assert_eq!(value!(true).get(point!["hello"]).cloned(), None);
assert_eq!(value!(36).get(point![]).cloned(), Some(value!(36)));
assert_eq!(value!(36).get(point!["hello"]).cloned(), None);
assert_eq!(value!("world").get(point![]).cloned(), Some(value!("world")));
assert_eq!(value!("world").get(point!["hello"]).cloned(), None);
}
#[test]
fn test_get_object() {
let object = value!({
"hello" => true,
"world" => 8,
"yolo" => "swag",
"5" => (),
"moon" => {
"hello" => "yoyo"
}
});
assert_eq!(object.get(point![]).cloned(), Some(object.clone()));
assert_eq!(object.get(point!["hello"]).cloned(), Some(value!(true)));
assert_eq!(object.get(point!["yolo"]).cloned(), Some(value!("swag")));
assert_eq!(object.get(point!["5"]).cloned(), Some(value!()));
assert_eq!(object.get(point!["world", "hello"]).cloned(), None);
assert_eq!(object.get(point!["moon", "hello"]).cloned(), Some(value!("yoyo")));
assert_eq!(object.get(point!["moon", "nope"]).cloned(), None);
}
#[test]
fn test_get_array() {
let array = value!([
false,
64,
{
"hello" => true,
"world" => false,
"moon" => {
"goodbye" => "yoyo"
}
},
[[1, 2, 3], 4, 5 ]
]);
assert_eq!(array.get(point![]).cloned(), Some(array.clone()));
assert_eq!(array.get(point!["0"]).cloned(), Some(value!(false)));
assert_eq!(array.get(point!["1"]).cloned(), Some(value!(64)));
assert_eq!(array.get(point!["2", "hello"]).cloned(), Some(value!(true)));
assert_eq!(array.get(point!["2", "moon", "goodbye"]).cloned(), Some(value!("yoyo")));
assert_eq!(array.get(point!["length"]).cloned(), None);
assert_eq!(array.get(point!["3", "0", "1"]).cloned(), Some(value!(2)));
}
#[test]
fn test_from_json() {
assert_eq!(Value::from_json("null").unwrap(), value!());
assert_eq!(Value::from_json("true").unwrap(), value!(true));
assert_eq!(Value::from_json("false").unwrap(), value!(false));
assert_eq!(Value::from_json("7").unwrap(), value!(7));
assert_eq!(Value::from_json("3.3").unwrap(), value!(3.3));
assert_eq!(Value::from_json(r#""Hello,\n\"world\"!""#).unwrap(), value!("Hello,\n\"world\"!"));
assert_eq!(Value::from_json(r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#).unwrap(), value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}));
assert_eq!(
Value::from_json(r#"["world",3.3,{"hello":"world"},null,null,[1,2,3],null]"#).unwrap(),
value!(["world", 3.3, { "hello" => "world" }, (), (), [1, 2, 3], ()])
);
}
#[test]
fn test_to_json() |
#[test]
fn test_to_json_pretty() {
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json_pretty().unwrap(),
"[\n \"world\",\n 3.333,\n {\n \"hello\": \"world\"\n },\n null,\n null,\n [\n 1,\n 2,\n 3\n ],\n null\n]"
);
}
}
| {
assert_eq!(&value!().to_json().unwrap(), "null");
assert_eq!(&value!(true).to_json().unwrap(), "true");
assert_eq!(&value!(false).to_json().unwrap(), "false");
assert_eq!(&value!(7).to_json().unwrap(), "7");
assert_eq!(&value!(6.667).to_json().unwrap(), "6.667");
assert_eq!(&value!("Hello,\n\"world\"!").to_json().unwrap(), r#""Hello,\n\"world\"!""#);
assert_eq!(&value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}).to_json().unwrap(), r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#);
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json().unwrap(),
r#"["world",3.333,{"hello":"world"},null,null,[1,2,3],null]"#
);
} | identifier_body |
value.rs | //! Types representing for data which will be retrieved from the driver.
//! Currently this data is expected to look like a JSON object but this may be
//! changed in the future. Driver authors must cast the data they retrieve from
//! the driver to these types.
use linear_map::LinearMap;
use serde::ser::{Serialize, Serializer};
use serde::de::{Deserialize, Deserializer, Error as DeError, Visitor, SeqVisitor, MapVisitor};
use serde::de::impls::VecVisitor;
use serde_json;
use error::Error;
/// The type which represents the key for maps used throughout the Ardite
/// codebase.
///
/// Functions similarly to an object key in JavaScript.
pub type Key = String;
/// Represents a [JSON pointer][1] to a document property. Examples of a
/// pointer in this context include `/hello/world` or `/a/b/c/d`.
///
/// These pointers are represented as a list of keys.
///
/// [1]: https://duckduckgo.com/?q=json+pointer&atb=v1&ia=about
pub type Pointer = Vec<Key>;
/// Ordered representation of a map of key/value pairs, like a JSON object.
/// Backed by a linear map to maintain order and have high performance for
/// small objects.
// TODO: newtype pattern?
pub type Object = LinearMap<Key, Value>;
/// Ordered array of values, like a JSON array.
// TODO: newtype pattern?
pub type Array = Vec<Value>;
/// Various value types. Based on types in the [JSON standard][1] (see section
/// 5).
///
/// [1]: http://ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
#[derive(PartialEq, Clone, Debug)]
pub enum Value {
/// The abscense of any value.
Null,
/// True or false.
Boolean(bool),
/// An integer numeric value.
I64(i64),
/// A floating point numeric value.
F64(f64),
/// A list of characters.
String(String),
/// A map of key/value pairs.
Object(Object),
/// A list of values.
Array(Array)
}
impl Value {
/// Gets a value at a specific point. Helpful for retrieving nested values.
pub fn get(&self, mut pointer: Pointer) -> Option<&Value> {
match *self {
Value::Object(ref map) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = map.get(&pointer.remove(0)) {
value.get(pointer)
} else {
None
}
},
Value::Array(ref vec) => {
if pointer.is_empty() {
Some(self)
} else if let Some(value) = pointer.remove(0).parse::<usize>().ok().map_or(None, |i| vec.get(i)) {
value.get(pointer)
} else {
None
}
},
_ => if pointer.is_empty() { Some(self) } else { None }
}
}
/// Creates a `Value` from a JSON string.
pub fn from_json(json: &str) -> Result<Value, Error> {
serde_json::from_str(json).map_err(Error::from)
}
/// Converts a `Value` into a JSON string.
pub fn to_json(&self) -> Result<String, Error> {
serde_json::to_string(self).map_err(Error::from)
}
/// Converts a `Value` into a nice and indented JSON string.
pub fn to_json_pretty(&self) -> Result<String, Error> {
serde_json::to_string_pretty(self).map_err(Error::from)
}
}
impl Serialize for Value {
#[inline]
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer {
match *self {
Value::Null => serializer.serialize_unit(),
Value::Boolean(value) => serializer.serialize_bool(value),
Value::I64(value) => serializer.serialize_i64(value),
Value::F64(value) => serializer.serialize_f64(value),
Value::String(ref value) => serializer.serialize_str(&value),
Value::Array(ref value) => value.serialize(serializer),
Value::Object(ref value) => value.serialize(serializer)
}
}
}
impl Deserialize for Value {
#[inline]
fn deserialize<D>(deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer {
struct ValueVisitor;
impl Visitor for ValueVisitor {
type Value = Value;
#[inline] fn visit_bool<E>(&mut self, value: bool) -> Result<Value, E> { Ok(Value::Boolean(value)) }
#[inline] fn visit_u64<E>(&mut self, value: u64) -> Result<Value, E> { Ok(Value::I64(value as i64)) }
#[inline] fn visit_i64<E>(&mut self, value: i64) -> Result<Value, E> { Ok(Value::I64(value)) }
#[inline] fn visit_f64<E>(&mut self, value: f64) -> Result<Value, E> { Ok(Value::F64(value)) }
#[inline] fn visit_str<E>(&mut self, value: &str) -> Result<Value, E> where E: DeError { self.visit_string(value.to_owned()) }
#[inline] fn visit_string<E>(&mut self, value: String) -> Result<Value, E> { Ok(Value::String(value)) }
#[inline] fn visit_none<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_some<D>(&mut self, deserializer: &mut D) -> Result<Value, D::Error> where D: Deserializer { Deserialize::deserialize(deserializer) }
#[inline] fn visit_unit<E>(&mut self) -> Result<Value, E> { Ok(Value::Null) }
#[inline] fn visit_seq<V>(&mut self, visitor: V) -> Result<Value, V::Error> where V: SeqVisitor { let values = try!(VecVisitor::new().visit_seq(visitor)); Ok(Value::Array(values)) }
#[inline]
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Value, V::Error> where V: MapVisitor {
let mut object = LinearMap::with_capacity(visitor.size_hint().0);
while let Some((key, value)) = try!(visitor.visit()) {
object.insert(key, value);
}
try!(visitor.end());
Ok(Value::Object(object))
}
}
deserializer.deserialize(ValueVisitor)
}
}
impl<V> From<Option<V>> for Value where V: Into<Value> {
fn from(option: Option<V>) -> Self {
match option {
None => Value::Null,
Some(value) => value.into()
}
}
}
impl From<bool> for Value {
fn from(boolean: bool) -> Self {
Value::Boolean(boolean)
}
}
impl From<i64> for Value {
fn from(number: i64) -> Self {
Value::I64(number)
}
}
impl From<f64> for Value {
fn from(number: f64) -> Self {
Value::F64(number)
}
}
impl From<String> for Value {
fn from(string: String) -> Self {
Value::String(string)
}
}
impl<'a> From<&'a str> for Value {
fn from(string: &'a str) -> Self {
Value::from(string.to_owned())
}
}
/// An iterator of values. Used by drivers to convert their own iterator
/// implementations into a single type.
pub struct ValueIter<'a> {
iter: Box<Iterator<Item=Value> + 'a>
}
impl<'a> ValueIter<'a> {
/// Create a new value iterator.
pub fn new<I>(iter: I) -> Self where I: Iterator<Item=Value> + 'a {
ValueIter {
iter: Box::new(iter)
}
}
}
impl<'a> Iterator for ValueIter<'a> {
type Item = Value;
#[inline]
fn next(&mut self) -> Option<Value> {
self.iter.next()
}
}
#[cfg(test)]
mod tests {
use value::Value;
#[test]
fn | () {
assert_eq!(value!().get(point![]).cloned(), Some(value!()));
assert_eq!(value!().get(point!["hello"]).cloned(), None);
assert_eq!(value!().get(point!["a", "b", "c", "d", "e"]).cloned(), None);
assert_eq!(value!(true).get(point![]).cloned(), Some(value!(true)));
assert_eq!(value!(true).get(point!["hello"]).cloned(), None);
assert_eq!(value!(36).get(point![]).cloned(), Some(value!(36)));
assert_eq!(value!(36).get(point!["hello"]).cloned(), None);
assert_eq!(value!("world").get(point![]).cloned(), Some(value!("world")));
assert_eq!(value!("world").get(point!["hello"]).cloned(), None);
}
#[test]
fn test_get_object() {
let object = value!({
"hello" => true,
"world" => 8,
"yolo" => "swag",
"5" => (),
"moon" => {
"hello" => "yoyo"
}
});
assert_eq!(object.get(point![]).cloned(), Some(object.clone()));
assert_eq!(object.get(point!["hello"]).cloned(), Some(value!(true)));
assert_eq!(object.get(point!["yolo"]).cloned(), Some(value!("swag")));
assert_eq!(object.get(point!["5"]).cloned(), Some(value!()));
assert_eq!(object.get(point!["world", "hello"]).cloned(), None);
assert_eq!(object.get(point!["moon", "hello"]).cloned(), Some(value!("yoyo")));
assert_eq!(object.get(point!["moon", "nope"]).cloned(), None);
}
#[test]
fn test_get_array() {
let array = value!([
false,
64,
{
"hello" => true,
"world" => false,
"moon" => {
"goodbye" => "yoyo"
}
},
[[1, 2, 3], 4, 5 ]
]);
assert_eq!(array.get(point![]).cloned(), Some(array.clone()));
assert_eq!(array.get(point!["0"]).cloned(), Some(value!(false)));
assert_eq!(array.get(point!["1"]).cloned(), Some(value!(64)));
assert_eq!(array.get(point!["2", "hello"]).cloned(), Some(value!(true)));
assert_eq!(array.get(point!["2", "moon", "goodbye"]).cloned(), Some(value!("yoyo")));
assert_eq!(array.get(point!["length"]).cloned(), None);
assert_eq!(array.get(point!["3", "0", "1"]).cloned(), Some(value!(2)));
}
#[test]
fn test_from_json() {
assert_eq!(Value::from_json("null").unwrap(), value!());
assert_eq!(Value::from_json("true").unwrap(), value!(true));
assert_eq!(Value::from_json("false").unwrap(), value!(false));
assert_eq!(Value::from_json("7").unwrap(), value!(7));
assert_eq!(Value::from_json("3.3").unwrap(), value!(3.3));
assert_eq!(Value::from_json(r#""Hello,\n\"world\"!""#).unwrap(), value!("Hello,\n\"world\"!"));
assert_eq!(Value::from_json(r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#).unwrap(), value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}));
assert_eq!(
Value::from_json(r#"["world",3.3,{"hello":"world"},null,null,[1,2,3],null]"#).unwrap(),
value!(["world", 3.3, { "hello" => "world" }, (), (), [1, 2, 3], ()])
);
}
#[test]
fn test_to_json() {
assert_eq!(&value!().to_json().unwrap(), "null");
assert_eq!(&value!(true).to_json().unwrap(), "true");
assert_eq!(&value!(false).to_json().unwrap(), "false");
assert_eq!(&value!(7).to_json().unwrap(), "7");
assert_eq!(&value!(6.667).to_json().unwrap(), "6.667");
assert_eq!(&value!("Hello,\n\"world\"!").to_json().unwrap(), r#""Hello,\n\"world\"!""#);
assert_eq!(&value!({
"hello" => "world",
"foo" => true,
"null" => (),
"goodbye" => {
"moon" => 2
}
}).to_json().unwrap(), r#"{"hello":"world","foo":true,"null":null,"goodbye":{"moon":2}}"#);
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json().unwrap(),
r#"["world",3.333,{"hello":"world"},null,null,[1,2,3],null]"#
);
}
#[test]
fn test_to_json_pretty() {
assert_eq!(
&value!(["world", 3.333, { "hello" => "world" }, (), (), [1, 2, 3], ()]).to_json_pretty().unwrap(),
"[\n \"world\",\n 3.333,\n {\n \"hello\": \"world\"\n },\n null,\n null,\n [\n 1,\n 2,\n 3\n ],\n null\n]"
);
}
}
| test_get_primitive | identifier_name |
balloon.rs | // Copyright (c) 2020 Ant Financial
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
seccomp_filters::Thread, thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper,
EpollHelperError, EpollHelperHandler, GuestMemoryMmap, VirtioCommon, VirtioDevice,
VirtioDeviceType, VirtioInterrupt, VirtioInterruptType, EPOLL_HELPER_EVENT_LAST,
VIRTIO_F_VERSION_1,
};
use anyhow::anyhow;
use seccompiler::SeccompAction;
use std::io::{self, Write};
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::result;
use std::sync::{atomic::AtomicBool, Arc, Barrier};
use thiserror::Error;
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use virtio_queue::{Queue, QueueT};
use vm_allocator::page_size::{align_page_size_down, get_page_size};
use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryError, GuestMemoryRegion,
};
use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
};
use vmm_sys_util::eventfd::EventFd;
const QUEUE_SIZE: u16 = 128;
const REPORTING_QUEUE_SIZE: u16 = 32;
const MIN_NUM_QUEUES: usize = 2;
// Inflate virtio queue event.
const INFLATE_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
// Deflate virtio queue event.
const DEFLATE_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
// Reporting virtio queue event.
const REPORTING_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
// Size of a PFN in the balloon interface.
const VIRTIO_BALLOON_PFN_SHIFT: u64 = 12;
// Deflate balloon on OOM
const VIRTIO_BALLOON_F_DEFLATE_ON_OOM: u64 = 2;
// Enable an additional virtqueue to let the guest notify the host about free
// pages.
const VIRTIO_BALLOON_F_REPORTING: u64 = 5;
#[derive(Error, Debug)]
pub enum Error {
#[error("Guest gave us bad memory addresses.: {0}")]
GuestMemory(GuestMemoryError),
#[error("Guest gave us a write only descriptor that protocol says to read from")]
UnexpectedWriteOnlyDescriptor,
#[error("Guest sent us invalid request")]
InvalidRequest,
#[error("Fallocate fail.: {0}")]
FallocateFail(std::io::Error),
#[error("Madvise fail.: {0}")]
MadviseFail(std::io::Error),
#[error("Failed to EventFd write.: {0}")]
EventFdWriteFail(std::io::Error),
#[error("Invalid queue index: {0}")]
InvalidQueueIndex(usize),
#[error("Fail tp signal: {0}")]
FailedSignal(io::Error),
#[error("Descriptor chain is too short")]
DescriptorChainTooShort,
#[error("Failed adding used index: {0}")]
QueueAddUsed(virtio_queue::Error),
#[error("Failed creating an iterator over the queue: {0}")]
QueueIterator(virtio_queue::Error),
}
// Got from include/uapi/linux/virtio_balloon.h
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Versionize)]
pub struct VirtioBalloonConfig {
// Number of pages host wants Guest to give up.
num_pages: u32,
// Number of pages we've actually got in balloon.
actual: u32,
}
#[derive(Clone, Debug)]
struct PartiallyBalloonedPage {
addr: u64,
bitmap: Vec<u64>,
page_size: u64,
}
impl PartiallyBalloonedPage {
fn new() -> Self {
let page_size = get_page_size();
let len = ((page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
// Initial each padding bit as 1 in bitmap.
let mut bitmap = vec![0_u64; len as usize];
let pad_num = len * 64 - (page_size >> VIRTIO_BALLOON_PFN_SHIFT);
bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
Self {
addr: 0,
bitmap,
page_size,
}
}
fn pfn_match(&self, addr: u64) -> bool {
self.addr == addr & !(self.page_size - 1)
}
fn bitmap_full(&self) -> bool {
self.bitmap.iter().all(|b| *b == u64::MAX)
}
fn | (&mut self, addr: u64) {
let addr_offset = (addr % self.page_size) >> VIRTIO_BALLOON_PFN_SHIFT;
self.bitmap[(addr_offset / 64) as usize] |= 1 << (addr_offset % 64);
}
fn reset(&mut self) {
let len = ((self.page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
self.addr = 0;
self.bitmap = vec![0; len as usize];
let pad_num = len * 64 - (self.page_size >> VIRTIO_BALLOON_PFN_SHIFT);
self.bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
}
}
const CONFIG_ACTUAL_OFFSET: u64 = 4;
const CONFIG_ACTUAL_SIZE: usize = 4;
// SAFETY: it only has data and has no implicit padding.
unsafe impl ByteValued for VirtioBalloonConfig {}
struct BalloonEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd,
reporting_queue_evt: Option<EventFd>,
kill_evt: EventFd,
pause_evt: EventFd,
pbp: Option<PartiallyBalloonedPage>,
}
impl BalloonEpollHandler {
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
self.interrupt_cb.trigger(int_type).map_err(|e| {
error!("Failed to signal used queue: {:?}", e);
Error::FailedSignal(e)
})
}
fn advise_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
advice: libc::c_int,
) -> result::Result<(), Error> {
let hva = memory
.get_host_address(range_base)
.map_err(Error::GuestMemory)?;
let res =
// SAFETY: Need unsafe to do syscall madvise
unsafe { libc::madvise(hva as *mut libc::c_void, range_len as libc::size_t, advice) };
if res != 0 {
return Err(Error::MadviseFail(io::Error::last_os_error()));
}
Ok(())
}
fn release_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
) -> result::Result<(), Error> {
let region = memory.find_region(range_base).ok_or(Error::GuestMemory(
GuestMemoryError::InvalidGuestAddress(range_base),
))?;
if let Some(f_off) = region.file_offset() {
let offset = range_base.0 - region.start_addr().0;
// SAFETY: FFI call with valid arguments
let res = unsafe {
libc::fallocate64(
f_off.file().as_raw_fd(),
libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE,
(offset + f_off.start()) as libc::off64_t,
range_len as libc::off64_t,
)
};
if res != 0 {
return Err(Error::FallocateFail(io::Error::last_os_error()));
}
}
Self::advise_memory_range(memory, range_base, range_len, libc::MADV_DONTNEED)
}
fn release_memory_range_4k(
pbp: &mut Option<PartiallyBalloonedPage>,
memory: &GuestMemoryMmap,
pfn: u32,
) -> result::Result<(), Error> {
let range_base = GuestAddress((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
let range_len = 1 << VIRTIO_BALLOON_PFN_SHIFT;
let page_size: u64 = get_page_size();
if page_size == 1 << VIRTIO_BALLOON_PFN_SHIFT {
return Self::release_memory_range(memory, range_base, range_len);
}
if pbp.is_none() {
*pbp = Some(PartiallyBalloonedPage::new());
}
if !pbp.as_ref().unwrap().pfn_match(range_base.0) {
// We are trying to free memory region in a different pfn with current pbp. Flush pbp.
pbp.as_mut().unwrap().reset();
pbp.as_mut().unwrap().addr = align_page_size_down(range_base.0);
}
pbp.as_mut().unwrap().set_bit(range_base.0);
if pbp.as_ref().unwrap().bitmap_full() {
Self::release_memory_range(
memory,
vm_memory::GuestAddress(pbp.as_ref().unwrap().addr),
page_size as usize,
)?;
pbp.as_mut().unwrap().reset();
}
Ok(())
}
fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
let data_chunk_size = size_of::<u32>();
// The head contains the request type which MUST be readable.
if desc.is_write_only() {
error!("The head contains the request type is not right");
return Err(Error::UnexpectedWriteOnlyDescriptor);
}
if desc.len() as usize % data_chunk_size != 0 {
error!("the request size {} is not right", desc.len());
return Err(Error::InvalidRequest);
}
let mut offset = 0u64;
while offset < desc.len() as u64 {
let addr = desc.addr().checked_add(offset).unwrap();
let pfn: u32 = desc_chain
.memory()
.read_obj(addr)
.map_err(Error::GuestMemory)?;
offset += data_chunk_size as u64;
match queue_index {
0 => {
Self::release_memory_range_4k(&mut self.pbp, desc_chain.memory(), pfn)?;
}
1 => {
let page_size = get_page_size() as usize;
let rbase = align_page_size_down((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
Self::advise_memory_range(
desc_chain.memory(),
vm_memory::GuestAddress(rbase),
page_size,
libc::MADV_WILLNEED,
)?;
}
_ => return Err(Error::InvalidQueueIndex(queue_index)),
}
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), desc.len())
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let mut descs_len = 0;
while let Some(desc) = desc_chain.next() {
descs_len += desc.len();
Self::release_memory_range(desc_chain.memory(), desc.addr(), desc.len() as usize)?;
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), descs_len)
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn run(
&mut self,
paused: Arc<AtomicBool>,
paused_sync: Arc<Barrier>,
) -> result::Result<(), EpollHelperError> {
let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
helper.add_event(self.inflate_queue_evt.as_raw_fd(), INFLATE_QUEUE_EVENT)?;
helper.add_event(self.deflate_queue_evt.as_raw_fd(), DEFLATE_QUEUE_EVENT)?;
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
helper.add_event(reporting_queue_evt.as_raw_fd(), REPORTING_QUEUE_EVENT)?;
}
helper.run(paused, paused_sync, self)?;
Ok(())
}
}
impl EpollHelperHandler for BalloonEpollHandler {
fn handle_event(
&mut self,
_helper: &mut EpollHelper,
event: &epoll::Event,
) -> result::Result<(), EpollHelperError> {
let ev_type = event.data as u16;
match ev_type {
INFLATE_QUEUE_EVENT => {
self.inflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get inflate queue event: {:?}",
e
))
})?;
self.process_queue(0).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
}
DEFLATE_QUEUE_EVENT => {
self.deflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get deflate queue event: {:?}",
e
))
})?;
self.process_queue(1).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used deflate queue: {:?}",
e
))
})?;
}
REPORTING_QUEUE_EVENT => {
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
reporting_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get reporting queue event: {:?}",
e
))
})?;
self.process_reporting_queue(2).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
} else {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Invalid reporting queue event as no eventfd registered"
)));
}
}
_ => {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Unknown event for virtio-balloon"
)));
}
}
Ok(())
}
}
#[derive(Versionize)]
pub struct BalloonState {
pub avail_features: u64,
pub acked_features: u64,
pub config: VirtioBalloonConfig,
}
impl VersionMapped for BalloonState {}
// Virtio device for exposing entropy to the guest OS through virtio.
pub struct Balloon {
common: VirtioCommon,
id: String,
config: VirtioBalloonConfig,
seccomp_action: SeccompAction,
exit_evt: EventFd,
interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
}
impl Balloon {
// Create a new virtio-balloon.
pub fn new(
id: String,
size: u64,
deflate_on_oom: bool,
free_page_reporting: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
state: Option<BalloonState>,
) -> io::Result<Self> {
let mut queue_sizes = vec![QUEUE_SIZE; MIN_NUM_QUEUES];
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
info!("Restoring virtio-balloon {}", id);
(
state.avail_features,
state.acked_features,
state.config,
true,
)
} else {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
if deflate_on_oom {
avail_features |= 1u64 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM;
}
if free_page_reporting {
avail_features |= 1u64 << VIRTIO_BALLOON_F_REPORTING;
}
let config = VirtioBalloonConfig {
num_pages: (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32,
..Default::default()
};
(avail_features, 0, config, false)
};
if free_page_reporting {
queue_sizes.push(REPORTING_QUEUE_SIZE);
}
Ok(Balloon {
common: VirtioCommon {
device_type: VirtioDeviceType::Balloon as u32,
avail_features,
acked_features,
paused_sync: Some(Arc::new(Barrier::new(2))),
queue_sizes,
min_queues: MIN_NUM_QUEUES as u16,
paused: Arc::new(AtomicBool::new(paused)),
..Default::default()
},
id,
config,
seccomp_action,
exit_evt,
interrupt_cb: None,
})
}
pub fn resize(&mut self, size: u64) -> Result<(), Error> {
self.config.num_pages = (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32;
if let Some(interrupt_cb) = &self.interrupt_cb {
interrupt_cb
.trigger(VirtioInterruptType::Config)
.map_err(Error::FailedSignal)
} else {
Ok(())
}
}
// Get the actual size of the virtio-balloon.
pub fn get_actual(&self) -> u64 {
(self.config.actual as u64) << VIRTIO_BALLOON_PFN_SHIFT
}
fn state(&self) -> BalloonState {
BalloonState {
avail_features: self.common.avail_features,
acked_features: self.common.acked_features,
config: self.config,
}
}
#[cfg(fuzzing)]
pub fn wait_for_epoll_threads(&mut self) {
self.common.wait_for_epoll_threads();
}
}
impl Drop for Balloon {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
self.common.wait_for_epoll_threads();
}
}
impl VirtioDevice for Balloon {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
self.common.avail_features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value)
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.read_config_from_slice(self.config.as_slice(), offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
// The "actual" field is the only mutable field
if offset != CONFIG_ACTUAL_OFFSET || data.len() != CONFIG_ACTUAL_SIZE {
error!(
"Attempt to write to read-only field: offset {:x} length {}",
offset,
data.len()
);
return;
}
let config = self.config.as_mut_slice();
let config_len = config.len() as u64;
let data_len = data.len() as u64;
if offset + data_len > config_len {
error!(
"Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
config_len,
offset,
data_len,
self.device_type()
);
return;
}
if let Some(end) = offset.checked_add(config.len() as u64) {
let mut offset_config =
&mut config[offset as usize..std::cmp::min(end, config_len) as usize];
offset_config.write_all(data).unwrap();
}
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut virtqueues = Vec::new();
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let inflate_queue_evt = queue_evt;
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let deflate_queue_evt = queue_evt;
let reporting_queue_evt =
if self.common.feature_acked(VIRTIO_BALLOON_F_REPORTING) && !queues.is_empty() {
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
Some(queue_evt)
} else {
None
};
self.interrupt_cb = Some(interrupt_cb.clone());
let mut handler = BalloonEpollHandler {
mem,
queues: virtqueues,
interrupt_cb,
inflate_queue_evt,
deflate_queue_evt,
reporting_queue_evt,
kill_evt,
pause_evt,
pbp: None,
};
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let mut epoll_threads = Vec::new();
spawn_virtio_thread(
&self.id,
&self.seccomp_action,
Thread::VirtioBalloon,
&mut epoll_threads,
&self.exit_evt,
move || handler.run(paused, paused_sync.unwrap()),
)?;
self.common.epoll_threads = Some(epoll_threads);
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
let result = self.common.reset();
event!("virtio-device", "reset", "id", &self.id);
result
}
}
impl Pausable for Balloon {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.common.pause()
}
fn resume(&mut self) -> result::Result<(), MigratableError> {
self.common.resume()
}
}
impl Snapshottable for Balloon {
fn id(&self) -> String {
self.id.clone()
}
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
Snapshot::new_from_versioned_state(&self.state())
}
}
impl Transportable for Balloon {}
impl Migratable for Balloon {}
| set_bit | identifier_name |
balloon.rs | // Copyright (c) 2020 Ant Financial
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
seccomp_filters::Thread, thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper,
EpollHelperError, EpollHelperHandler, GuestMemoryMmap, VirtioCommon, VirtioDevice,
VirtioDeviceType, VirtioInterrupt, VirtioInterruptType, EPOLL_HELPER_EVENT_LAST,
VIRTIO_F_VERSION_1,
};
use anyhow::anyhow;
use seccompiler::SeccompAction;
use std::io::{self, Write};
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::result;
use std::sync::{atomic::AtomicBool, Arc, Barrier};
use thiserror::Error;
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use virtio_queue::{Queue, QueueT};
use vm_allocator::page_size::{align_page_size_down, get_page_size};
use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryError, GuestMemoryRegion,
};
use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
};
use vmm_sys_util::eventfd::EventFd;
const QUEUE_SIZE: u16 = 128;
const REPORTING_QUEUE_SIZE: u16 = 32;
const MIN_NUM_QUEUES: usize = 2;
// Inflate virtio queue event.
const INFLATE_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
// Deflate virtio queue event.
const DEFLATE_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
// Reporting virtio queue event.
const REPORTING_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
// Size of a PFN in the balloon interface.
const VIRTIO_BALLOON_PFN_SHIFT: u64 = 12;
// Deflate balloon on OOM
const VIRTIO_BALLOON_F_DEFLATE_ON_OOM: u64 = 2;
// Enable an additional virtqueue to let the guest notify the host about free
// pages.
const VIRTIO_BALLOON_F_REPORTING: u64 = 5;
#[derive(Error, Debug)]
pub enum Error {
#[error("Guest gave us bad memory addresses.: {0}")]
GuestMemory(GuestMemoryError),
#[error("Guest gave us a write only descriptor that protocol says to read from")]
UnexpectedWriteOnlyDescriptor,
#[error("Guest sent us invalid request")]
InvalidRequest,
#[error("Fallocate fail.: {0}")]
FallocateFail(std::io::Error),
#[error("Madvise fail.: {0}")]
MadviseFail(std::io::Error),
#[error("Failed to EventFd write.: {0}")]
EventFdWriteFail(std::io::Error),
#[error("Invalid queue index: {0}")]
InvalidQueueIndex(usize),
#[error("Fail tp signal: {0}")]
FailedSignal(io::Error),
#[error("Descriptor chain is too short")]
DescriptorChainTooShort,
#[error("Failed adding used index: {0}")]
QueueAddUsed(virtio_queue::Error),
#[error("Failed creating an iterator over the queue: {0}")]
QueueIterator(virtio_queue::Error),
}
// Got from include/uapi/linux/virtio_balloon.h
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Versionize)]
pub struct VirtioBalloonConfig {
// Number of pages host wants Guest to give up.
num_pages: u32,
// Number of pages we've actually got in balloon.
actual: u32,
}
#[derive(Clone, Debug)]
struct PartiallyBalloonedPage {
addr: u64,
bitmap: Vec<u64>,
page_size: u64,
}
impl PartiallyBalloonedPage {
fn new() -> Self {
let page_size = get_page_size();
let len = ((page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
// Initial each padding bit as 1 in bitmap.
let mut bitmap = vec![0_u64; len as usize];
let pad_num = len * 64 - (page_size >> VIRTIO_BALLOON_PFN_SHIFT);
bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
Self {
addr: 0,
bitmap,
page_size,
}
}
fn pfn_match(&self, addr: u64) -> bool {
self.addr == addr & !(self.page_size - 1)
}
fn bitmap_full(&self) -> bool {
self.bitmap.iter().all(|b| *b == u64::MAX)
}
fn set_bit(&mut self, addr: u64) {
let addr_offset = (addr % self.page_size) >> VIRTIO_BALLOON_PFN_SHIFT;
self.bitmap[(addr_offset / 64) as usize] |= 1 << (addr_offset % 64);
}
fn reset(&mut self) {
let len = ((self.page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
self.addr = 0;
self.bitmap = vec![0; len as usize];
let pad_num = len * 64 - (self.page_size >> VIRTIO_BALLOON_PFN_SHIFT);
self.bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
}
}
const CONFIG_ACTUAL_OFFSET: u64 = 4;
const CONFIG_ACTUAL_SIZE: usize = 4;
// SAFETY: it only has data and has no implicit padding.
unsafe impl ByteValued for VirtioBalloonConfig {}
struct BalloonEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd,
reporting_queue_evt: Option<EventFd>,
kill_evt: EventFd,
pause_evt: EventFd,
pbp: Option<PartiallyBalloonedPage>,
}
impl BalloonEpollHandler {
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
self.interrupt_cb.trigger(int_type).map_err(|e| {
error!("Failed to signal used queue: {:?}", e);
Error::FailedSignal(e)
})
}
fn advise_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
advice: libc::c_int,
) -> result::Result<(), Error> {
let hva = memory
.get_host_address(range_base)
.map_err(Error::GuestMemory)?;
let res =
// SAFETY: Need unsafe to do syscall madvise
unsafe { libc::madvise(hva as *mut libc::c_void, range_len as libc::size_t, advice) };
if res != 0 {
return Err(Error::MadviseFail(io::Error::last_os_error()));
}
Ok(())
}
fn release_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
) -> result::Result<(), Error> {
let region = memory.find_region(range_base).ok_or(Error::GuestMemory(
GuestMemoryError::InvalidGuestAddress(range_base),
))?;
if let Some(f_off) = region.file_offset() {
let offset = range_base.0 - region.start_addr().0;
// SAFETY: FFI call with valid arguments
let res = unsafe {
libc::fallocate64(
f_off.file().as_raw_fd(),
libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE,
(offset + f_off.start()) as libc::off64_t,
range_len as libc::off64_t,
)
};
if res != 0 {
return Err(Error::FallocateFail(io::Error::last_os_error()));
}
}
Self::advise_memory_range(memory, range_base, range_len, libc::MADV_DONTNEED)
}
fn release_memory_range_4k(
pbp: &mut Option<PartiallyBalloonedPage>,
memory: &GuestMemoryMmap,
pfn: u32,
) -> result::Result<(), Error> {
let range_base = GuestAddress((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
let range_len = 1 << VIRTIO_BALLOON_PFN_SHIFT;
let page_size: u64 = get_page_size();
if page_size == 1 << VIRTIO_BALLOON_PFN_SHIFT {
return Self::release_memory_range(memory, range_base, range_len);
}
if pbp.is_none() {
*pbp = Some(PartiallyBalloonedPage::new());
}
if !pbp.as_ref().unwrap().pfn_match(range_base.0) {
// We are trying to free memory region in a different pfn with current pbp. Flush pbp.
pbp.as_mut().unwrap().reset();
pbp.as_mut().unwrap().addr = align_page_size_down(range_base.0);
}
pbp.as_mut().unwrap().set_bit(range_base.0);
if pbp.as_ref().unwrap().bitmap_full() {
Self::release_memory_range(
memory,
vm_memory::GuestAddress(pbp.as_ref().unwrap().addr),
page_size as usize,
)?;
pbp.as_mut().unwrap().reset();
}
Ok(())
}
fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
let data_chunk_size = size_of::<u32>();
// The head contains the request type which MUST be readable.
if desc.is_write_only() {
error!("The head contains the request type is not right");
return Err(Error::UnexpectedWriteOnlyDescriptor);
}
if desc.len() as usize % data_chunk_size != 0 {
error!("the request size {} is not right", desc.len());
return Err(Error::InvalidRequest);
}
let mut offset = 0u64;
while offset < desc.len() as u64 {
let addr = desc.addr().checked_add(offset).unwrap();
let pfn: u32 = desc_chain
.memory()
.read_obj(addr)
.map_err(Error::GuestMemory)?;
offset += data_chunk_size as u64;
match queue_index {
0 => |
1 => {
let page_size = get_page_size() as usize;
let rbase = align_page_size_down((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
Self::advise_memory_range(
desc_chain.memory(),
vm_memory::GuestAddress(rbase),
page_size,
libc::MADV_WILLNEED,
)?;
}
_ => return Err(Error::InvalidQueueIndex(queue_index)),
}
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), desc.len())
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let mut descs_len = 0;
while let Some(desc) = desc_chain.next() {
descs_len += desc.len();
Self::release_memory_range(desc_chain.memory(), desc.addr(), desc.len() as usize)?;
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), descs_len)
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn run(
&mut self,
paused: Arc<AtomicBool>,
paused_sync: Arc<Barrier>,
) -> result::Result<(), EpollHelperError> {
let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
helper.add_event(self.inflate_queue_evt.as_raw_fd(), INFLATE_QUEUE_EVENT)?;
helper.add_event(self.deflate_queue_evt.as_raw_fd(), DEFLATE_QUEUE_EVENT)?;
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
helper.add_event(reporting_queue_evt.as_raw_fd(), REPORTING_QUEUE_EVENT)?;
}
helper.run(paused, paused_sync, self)?;
Ok(())
}
}
impl EpollHelperHandler for BalloonEpollHandler {
fn handle_event(
&mut self,
_helper: &mut EpollHelper,
event: &epoll::Event,
) -> result::Result<(), EpollHelperError> {
let ev_type = event.data as u16;
match ev_type {
INFLATE_QUEUE_EVENT => {
self.inflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get inflate queue event: {:?}",
e
))
})?;
self.process_queue(0).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
}
DEFLATE_QUEUE_EVENT => {
self.deflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get deflate queue event: {:?}",
e
))
})?;
self.process_queue(1).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used deflate queue: {:?}",
e
))
})?;
}
REPORTING_QUEUE_EVENT => {
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
reporting_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get reporting queue event: {:?}",
e
))
})?;
self.process_reporting_queue(2).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
} else {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Invalid reporting queue event as no eventfd registered"
)));
}
}
_ => {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Unknown event for virtio-balloon"
)));
}
}
Ok(())
}
}
#[derive(Versionize)]
pub struct BalloonState {
pub avail_features: u64,
pub acked_features: u64,
pub config: VirtioBalloonConfig,
}
impl VersionMapped for BalloonState {}
// Virtio device for exposing entropy to the guest OS through virtio.
pub struct Balloon {
common: VirtioCommon,
id: String,
config: VirtioBalloonConfig,
seccomp_action: SeccompAction,
exit_evt: EventFd,
interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
}
impl Balloon {
// Create a new virtio-balloon.
pub fn new(
id: String,
size: u64,
deflate_on_oom: bool,
free_page_reporting: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
state: Option<BalloonState>,
) -> io::Result<Self> {
let mut queue_sizes = vec![QUEUE_SIZE; MIN_NUM_QUEUES];
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
info!("Restoring virtio-balloon {}", id);
(
state.avail_features,
state.acked_features,
state.config,
true,
)
} else {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
if deflate_on_oom {
avail_features |= 1u64 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM;
}
if free_page_reporting {
avail_features |= 1u64 << VIRTIO_BALLOON_F_REPORTING;
}
let config = VirtioBalloonConfig {
num_pages: (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32,
..Default::default()
};
(avail_features, 0, config, false)
};
if free_page_reporting {
queue_sizes.push(REPORTING_QUEUE_SIZE);
}
Ok(Balloon {
common: VirtioCommon {
device_type: VirtioDeviceType::Balloon as u32,
avail_features,
acked_features,
paused_sync: Some(Arc::new(Barrier::new(2))),
queue_sizes,
min_queues: MIN_NUM_QUEUES as u16,
paused: Arc::new(AtomicBool::new(paused)),
..Default::default()
},
id,
config,
seccomp_action,
exit_evt,
interrupt_cb: None,
})
}
pub fn resize(&mut self, size: u64) -> Result<(), Error> {
self.config.num_pages = (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32;
if let Some(interrupt_cb) = &self.interrupt_cb {
interrupt_cb
.trigger(VirtioInterruptType::Config)
.map_err(Error::FailedSignal)
} else {
Ok(())
}
}
// Get the actual size of the virtio-balloon.
pub fn get_actual(&self) -> u64 {
(self.config.actual as u64) << VIRTIO_BALLOON_PFN_SHIFT
}
fn state(&self) -> BalloonState {
BalloonState {
avail_features: self.common.avail_features,
acked_features: self.common.acked_features,
config: self.config,
}
}
#[cfg(fuzzing)]
pub fn wait_for_epoll_threads(&mut self) {
self.common.wait_for_epoll_threads();
}
}
impl Drop for Balloon {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
self.common.wait_for_epoll_threads();
}
}
impl VirtioDevice for Balloon {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
self.common.avail_features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value)
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.read_config_from_slice(self.config.as_slice(), offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
// The "actual" field is the only mutable field
if offset != CONFIG_ACTUAL_OFFSET || data.len() != CONFIG_ACTUAL_SIZE {
error!(
"Attempt to write to read-only field: offset {:x} length {}",
offset,
data.len()
);
return;
}
let config = self.config.as_mut_slice();
let config_len = config.len() as u64;
let data_len = data.len() as u64;
if offset + data_len > config_len {
error!(
"Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
config_len,
offset,
data_len,
self.device_type()
);
return;
}
if let Some(end) = offset.checked_add(config.len() as u64) {
let mut offset_config =
&mut config[offset as usize..std::cmp::min(end, config_len) as usize];
offset_config.write_all(data).unwrap();
}
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut virtqueues = Vec::new();
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let inflate_queue_evt = queue_evt;
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let deflate_queue_evt = queue_evt;
let reporting_queue_evt =
if self.common.feature_acked(VIRTIO_BALLOON_F_REPORTING) && !queues.is_empty() {
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
Some(queue_evt)
} else {
None
};
self.interrupt_cb = Some(interrupt_cb.clone());
let mut handler = BalloonEpollHandler {
mem,
queues: virtqueues,
interrupt_cb,
inflate_queue_evt,
deflate_queue_evt,
reporting_queue_evt,
kill_evt,
pause_evt,
pbp: None,
};
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let mut epoll_threads = Vec::new();
spawn_virtio_thread(
&self.id,
&self.seccomp_action,
Thread::VirtioBalloon,
&mut epoll_threads,
&self.exit_evt,
move || handler.run(paused, paused_sync.unwrap()),
)?;
self.common.epoll_threads = Some(epoll_threads);
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
let result = self.common.reset();
event!("virtio-device", "reset", "id", &self.id);
result
}
}
impl Pausable for Balloon {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.common.pause()
}
fn resume(&mut self) -> result::Result<(), MigratableError> {
self.common.resume()
}
}
impl Snapshottable for Balloon {
fn id(&self) -> String {
self.id.clone()
}
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
Snapshot::new_from_versioned_state(&self.state())
}
}
impl Transportable for Balloon {}
impl Migratable for Balloon {}
| {
Self::release_memory_range_4k(&mut self.pbp, desc_chain.memory(), pfn)?;
} | conditional_block |
balloon.rs | // Copyright (c) 2020 Ant Financial
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
seccomp_filters::Thread, thread_helper::spawn_virtio_thread, ActivateResult, EpollHelper,
EpollHelperError, EpollHelperHandler, GuestMemoryMmap, VirtioCommon, VirtioDevice,
VirtioDeviceType, VirtioInterrupt, VirtioInterruptType, EPOLL_HELPER_EVENT_LAST,
VIRTIO_F_VERSION_1,
};
use anyhow::anyhow;
use seccompiler::SeccompAction;
use std::io::{self, Write};
use std::mem::size_of;
use std::os::unix::io::AsRawFd;
use std::result;
use std::sync::{atomic::AtomicBool, Arc, Barrier};
use thiserror::Error;
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use virtio_queue::{Queue, QueueT};
use vm_allocator::page_size::{align_page_size_down, get_page_size};
use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryAtomic,
GuestMemoryError, GuestMemoryRegion,
};
use vm_migration::{
Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable, VersionMapped,
};
use vmm_sys_util::eventfd::EventFd;
const QUEUE_SIZE: u16 = 128;
const REPORTING_QUEUE_SIZE: u16 = 32;
const MIN_NUM_QUEUES: usize = 2;
// Inflate virtio queue event.
const INFLATE_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
// Deflate virtio queue event.
const DEFLATE_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
// Reporting virtio queue event.
const REPORTING_QUEUE_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 3;
// Size of a PFN in the balloon interface.
const VIRTIO_BALLOON_PFN_SHIFT: u64 = 12;
// Deflate balloon on OOM
const VIRTIO_BALLOON_F_DEFLATE_ON_OOM: u64 = 2;
// Enable an additional virtqueue to let the guest notify the host about free
// pages.
const VIRTIO_BALLOON_F_REPORTING: u64 = 5;
#[derive(Error, Debug)]
pub enum Error {
#[error("Guest gave us bad memory addresses.: {0}")]
GuestMemory(GuestMemoryError),
#[error("Guest gave us a write only descriptor that protocol says to read from")]
UnexpectedWriteOnlyDescriptor,
#[error("Guest sent us invalid request")]
InvalidRequest,
#[error("Fallocate fail.: {0}")]
FallocateFail(std::io::Error),
#[error("Madvise fail.: {0}")]
MadviseFail(std::io::Error),
#[error("Failed to EventFd write.: {0}")]
EventFdWriteFail(std::io::Error),
#[error("Invalid queue index: {0}")]
InvalidQueueIndex(usize),
#[error("Fail tp signal: {0}")]
FailedSignal(io::Error),
#[error("Descriptor chain is too short")]
DescriptorChainTooShort,
#[error("Failed adding used index: {0}")]
QueueAddUsed(virtio_queue::Error),
#[error("Failed creating an iterator over the queue: {0}")]
QueueIterator(virtio_queue::Error),
}
// Got from include/uapi/linux/virtio_balloon.h
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Versionize)]
pub struct VirtioBalloonConfig {
// Number of pages host wants Guest to give up.
num_pages: u32,
// Number of pages we've actually got in balloon.
actual: u32,
}
#[derive(Clone, Debug)]
struct PartiallyBalloonedPage {
addr: u64,
bitmap: Vec<u64>,
page_size: u64,
}
impl PartiallyBalloonedPage {
fn new() -> Self {
let page_size = get_page_size();
let len = ((page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
// Initial each padding bit as 1 in bitmap.
let mut bitmap = vec![0_u64; len as usize];
let pad_num = len * 64 - (page_size >> VIRTIO_BALLOON_PFN_SHIFT);
bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
Self {
addr: 0,
bitmap,
page_size,
}
}
fn pfn_match(&self, addr: u64) -> bool {
self.addr == addr & !(self.page_size - 1)
}
fn bitmap_full(&self) -> bool {
self.bitmap.iter().all(|b| *b == u64::MAX)
}
fn set_bit(&mut self, addr: u64) {
let addr_offset = (addr % self.page_size) >> VIRTIO_BALLOON_PFN_SHIFT;
self.bitmap[(addr_offset / 64) as usize] |= 1 << (addr_offset % 64);
}
fn reset(&mut self) {
let len = ((self.page_size >> VIRTIO_BALLOON_PFN_SHIFT) + 63) / 64;
self.addr = 0;
self.bitmap = vec![0; len as usize];
let pad_num = len * 64 - (self.page_size >> VIRTIO_BALLOON_PFN_SHIFT);
self.bitmap[(len - 1) as usize] = !((1 << (64 - pad_num)) - 1);
}
}
const CONFIG_ACTUAL_OFFSET: u64 = 4;
const CONFIG_ACTUAL_SIZE: usize = 4;
// SAFETY: it only has data and has no implicit padding.
unsafe impl ByteValued for VirtioBalloonConfig {}
struct BalloonEpollHandler {
mem: GuestMemoryAtomic<GuestMemoryMmap>,
queues: Vec<Queue>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
inflate_queue_evt: EventFd,
deflate_queue_evt: EventFd,
reporting_queue_evt: Option<EventFd>,
kill_evt: EventFd,
pause_evt: EventFd,
pbp: Option<PartiallyBalloonedPage>,
}
impl BalloonEpollHandler {
fn signal(&self, int_type: VirtioInterruptType) -> result::Result<(), Error> {
self.interrupt_cb.trigger(int_type).map_err(|e| {
error!("Failed to signal used queue: {:?}", e);
Error::FailedSignal(e)
})
}
fn advise_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
advice: libc::c_int,
) -> result::Result<(), Error> {
let hva = memory
.get_host_address(range_base)
.map_err(Error::GuestMemory)?;
let res =
// SAFETY: Need unsafe to do syscall madvise
unsafe { libc::madvise(hva as *mut libc::c_void, range_len as libc::size_t, advice) };
if res != 0 {
return Err(Error::MadviseFail(io::Error::last_os_error()));
}
Ok(())
}
fn release_memory_range(
memory: &GuestMemoryMmap,
range_base: GuestAddress,
range_len: usize,
) -> result::Result<(), Error> {
let region = memory.find_region(range_base).ok_or(Error::GuestMemory(
GuestMemoryError::InvalidGuestAddress(range_base),
))?;
if let Some(f_off) = region.file_offset() {
let offset = range_base.0 - region.start_addr().0;
// SAFETY: FFI call with valid arguments
let res = unsafe {
libc::fallocate64(
f_off.file().as_raw_fd(),
libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE,
(offset + f_off.start()) as libc::off64_t,
range_len as libc::off64_t,
)
};
if res != 0 {
return Err(Error::FallocateFail(io::Error::last_os_error()));
}
}
Self::advise_memory_range(memory, range_base, range_len, libc::MADV_DONTNEED)
}
fn release_memory_range_4k(
pbp: &mut Option<PartiallyBalloonedPage>,
memory: &GuestMemoryMmap,
pfn: u32,
) -> result::Result<(), Error> {
let range_base = GuestAddress((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
let range_len = 1 << VIRTIO_BALLOON_PFN_SHIFT;
let page_size: u64 = get_page_size();
if page_size == 1 << VIRTIO_BALLOON_PFN_SHIFT {
return Self::release_memory_range(memory, range_base, range_len);
}
if pbp.is_none() {
*pbp = Some(PartiallyBalloonedPage::new());
}
if !pbp.as_ref().unwrap().pfn_match(range_base.0) {
// We are trying to free memory region in a different pfn with current pbp. Flush pbp.
pbp.as_mut().unwrap().reset();
pbp.as_mut().unwrap().addr = align_page_size_down(range_base.0);
}
pbp.as_mut().unwrap().set_bit(range_base.0);
if pbp.as_ref().unwrap().bitmap_full() {
Self::release_memory_range(
memory,
vm_memory::GuestAddress(pbp.as_ref().unwrap().addr),
page_size as usize,
)?;
pbp.as_mut().unwrap().reset();
}
Ok(())
}
fn process_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
let data_chunk_size = size_of::<u32>();
// The head contains the request type which MUST be readable.
if desc.is_write_only() {
error!("The head contains the request type is not right");
return Err(Error::UnexpectedWriteOnlyDescriptor);
}
if desc.len() as usize % data_chunk_size != 0 {
error!("the request size {} is not right", desc.len());
return Err(Error::InvalidRequest);
}
let mut offset = 0u64;
while offset < desc.len() as u64 {
let addr = desc.addr().checked_add(offset).unwrap();
let pfn: u32 = desc_chain
.memory()
.read_obj(addr)
.map_err(Error::GuestMemory)?;
offset += data_chunk_size as u64;
match queue_index {
0 => {
Self::release_memory_range_4k(&mut self.pbp, desc_chain.memory(), pfn)?;
}
1 => {
let page_size = get_page_size() as usize;
let rbase = align_page_size_down((pfn as u64) << VIRTIO_BALLOON_PFN_SHIFT);
Self::advise_memory_range(
desc_chain.memory(),
vm_memory::GuestAddress(rbase),
page_size,
libc::MADV_WILLNEED,
)?;
}
_ => return Err(Error::InvalidQueueIndex(queue_index)),
}
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), desc.len())
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn process_reporting_queue(&mut self, queue_index: usize) -> result::Result<(), Error> {
let mut used_descs = false;
while let Some(mut desc_chain) =
self.queues[queue_index].pop_descriptor_chain(self.mem.memory())
{
let mut descs_len = 0;
while let Some(desc) = desc_chain.next() {
descs_len += desc.len();
Self::release_memory_range(desc_chain.memory(), desc.addr(), desc.len() as usize)?;
}
self.queues[queue_index]
.add_used(desc_chain.memory(), desc_chain.head_index(), descs_len)
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}
if used_descs {
self.signal(VirtioInterruptType::Queue(queue_index as u16))
} else {
Ok(())
}
}
fn run(
&mut self,
paused: Arc<AtomicBool>,
paused_sync: Arc<Barrier>,
) -> result::Result<(), EpollHelperError> {
let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
helper.add_event(self.inflate_queue_evt.as_raw_fd(), INFLATE_QUEUE_EVENT)?;
helper.add_event(self.deflate_queue_evt.as_raw_fd(), DEFLATE_QUEUE_EVENT)?;
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
helper.add_event(reporting_queue_evt.as_raw_fd(), REPORTING_QUEUE_EVENT)?;
}
helper.run(paused, paused_sync, self)?;
Ok(())
}
}
impl EpollHelperHandler for BalloonEpollHandler {
fn handle_event(
&mut self,
_helper: &mut EpollHelper,
event: &epoll::Event,
) -> result::Result<(), EpollHelperError> {
let ev_type = event.data as u16;
match ev_type {
INFLATE_QUEUE_EVENT => {
self.inflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get inflate queue event: {:?}",
e
))
})?;
self.process_queue(0).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
}
DEFLATE_QUEUE_EVENT => {
self.deflate_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get deflate queue event: {:?}",
e
))
})?;
self.process_queue(1).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used deflate queue: {:?}",
e
))
})?;
}
REPORTING_QUEUE_EVENT => {
if let Some(reporting_queue_evt) = self.reporting_queue_evt.as_ref() {
reporting_queue_evt.read().map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to get reporting queue event: {:?}",
e
))
})?;
self.process_reporting_queue(2).map_err(|e| {
EpollHelperError::HandleEvent(anyhow!(
"Failed to signal used inflate queue: {:?}",
e
))
})?;
} else {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Invalid reporting queue event as no eventfd registered"
)));
}
}
_ => {
return Err(EpollHelperError::HandleEvent(anyhow!(
"Unknown event for virtio-balloon"
)));
}
}
Ok(())
}
}
#[derive(Versionize)]
pub struct BalloonState {
pub avail_features: u64,
pub acked_features: u64,
pub config: VirtioBalloonConfig,
}
impl VersionMapped for BalloonState {}
// Virtio device for exposing entropy to the guest OS through virtio.
pub struct Balloon {
common: VirtioCommon,
id: String,
config: VirtioBalloonConfig,
seccomp_action: SeccompAction,
exit_evt: EventFd,
interrupt_cb: Option<Arc<dyn VirtioInterrupt>>,
}
impl Balloon {
// Create a new virtio-balloon.
pub fn new(
id: String,
size: u64,
deflate_on_oom: bool,
free_page_reporting: bool,
seccomp_action: SeccompAction,
exit_evt: EventFd,
state: Option<BalloonState>,
) -> io::Result<Self> {
let mut queue_sizes = vec![QUEUE_SIZE; MIN_NUM_QUEUES];
let (avail_features, acked_features, config, paused) = if let Some(state) = state {
info!("Restoring virtio-balloon {}", id);
(
state.avail_features,
state.acked_features,
state.config,
true,
)
} else {
let mut avail_features = 1u64 << VIRTIO_F_VERSION_1;
if deflate_on_oom {
avail_features |= 1u64 << VIRTIO_BALLOON_F_DEFLATE_ON_OOM;
}
if free_page_reporting {
avail_features |= 1u64 << VIRTIO_BALLOON_F_REPORTING;
}
let config = VirtioBalloonConfig {
num_pages: (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32,
..Default::default()
};
(avail_features, 0, config, false)
};
if free_page_reporting {
queue_sizes.push(REPORTING_QUEUE_SIZE);
}
Ok(Balloon {
common: VirtioCommon {
device_type: VirtioDeviceType::Balloon as u32,
avail_features,
acked_features,
paused_sync: Some(Arc::new(Barrier::new(2))),
queue_sizes,
min_queues: MIN_NUM_QUEUES as u16,
paused: Arc::new(AtomicBool::new(paused)),
..Default::default()
},
id,
config,
seccomp_action,
exit_evt,
interrupt_cb: None,
})
}
pub fn resize(&mut self, size: u64) -> Result<(), Error> {
self.config.num_pages = (size >> VIRTIO_BALLOON_PFN_SHIFT) as u32;
if let Some(interrupt_cb) = &self.interrupt_cb {
interrupt_cb
.trigger(VirtioInterruptType::Config)
.map_err(Error::FailedSignal)
} else {
Ok(())
}
}
// Get the actual size of the virtio-balloon.
pub fn get_actual(&self) -> u64 {
(self.config.actual as u64) << VIRTIO_BALLOON_PFN_SHIFT
}
fn state(&self) -> BalloonState {
BalloonState {
avail_features: self.common.avail_features,
acked_features: self.common.acked_features,
config: self.config,
}
}
#[cfg(fuzzing)]
pub fn wait_for_epoll_threads(&mut self) {
self.common.wait_for_epoll_threads();
}
}
impl Drop for Balloon {
fn drop(&mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
self.common.wait_for_epoll_threads();
}
}
impl VirtioDevice for Balloon {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
self.common.avail_features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value)
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.read_config_from_slice(self.config.as_slice(), offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
// The "actual" field is the only mutable field
if offset != CONFIG_ACTUAL_OFFSET || data.len() != CONFIG_ACTUAL_SIZE {
error!(
"Attempt to write to read-only field: offset {:x} length {}",
offset,
data.len()
);
return;
}
let config = self.config.as_mut_slice();
let config_len = config.len() as u64;
let data_len = data.len() as u64;
if offset + data_len > config_len {
error!(
"Out-of-bound access to configuration: config_len = {} offset = {:x} length = {} for {}",
config_len,
offset,
data_len,
self.device_type()
);
return;
}
if let Some(end) = offset.checked_add(config.len() as u64) {
let mut offset_config =
&mut config[offset as usize..std::cmp::min(end, config_len) as usize];
offset_config.write_all(data).unwrap();
}
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
mut queues: Vec<(usize, Queue, EventFd)>,
) -> ActivateResult {
self.common.activate(&queues, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut virtqueues = Vec::new();
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let inflate_queue_evt = queue_evt;
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
let deflate_queue_evt = queue_evt;
let reporting_queue_evt =
if self.common.feature_acked(VIRTIO_BALLOON_F_REPORTING) && !queues.is_empty() {
let (_, queue, queue_evt) = queues.remove(0);
virtqueues.push(queue);
Some(queue_evt)
} else {
None
};
self.interrupt_cb = Some(interrupt_cb.clone());
let mut handler = BalloonEpollHandler {
mem,
queues: virtqueues,
interrupt_cb,
inflate_queue_evt,
deflate_queue_evt,
reporting_queue_evt,
kill_evt,
pause_evt,
pbp: None,
};
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let mut epoll_threads = Vec::new();
spawn_virtio_thread(
&self.id,
&self.seccomp_action,
Thread::VirtioBalloon,
&mut epoll_threads,
&self.exit_evt,
move || handler.run(paused, paused_sync.unwrap()),
)?;
self.common.epoll_threads = Some(epoll_threads);
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
let result = self.common.reset();
event!("virtio-device", "reset", "id", &self.id);
result
}
}
impl Pausable for Balloon {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.common.pause()
}
| self.common.resume()
}
}
impl Snapshottable for Balloon {
fn id(&self) -> String {
self.id.clone()
}
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
Snapshot::new_from_versioned_state(&self.state())
}
}
impl Transportable for Balloon {}
impl Migratable for Balloon {} | fn resume(&mut self) -> result::Result<(), MigratableError> { | random_line_split |
authenticate-helper.ts | const jwt = require('jsonwebtoken');
import { ApolloError, AuthenticationError } from 'apollo-server-express';
import config from '../../config/config'
import { getDb } from '../../helper/db.helper';
import { getNow } from '../../../share/time.helper';
import { CUSTOMER_TYPE } from '../../../share/constant';
import { YOU_MUST_BE_LOGGED_IN, MSG_SYSTEM_ERROR, PASSWORD_IS_WRONG } from '../../config/constant';
const crypto = require('crypto-js');
const bcrypt = require('bcrypt');
const verifyJWT = (token: string) => {
return jwt.verify(token, config.JWT_SECRET, (err: any, decoded: any) => {
return {
err,
data: decoded
}
});
}
export const checkAuthorization = async (req: any, res: any) => {
let token = req.headers.authorization || null;
const now = getNow();
const deviceName = getUA(req.headers['user-agent']);
if (!token) {
if (!!req.cookies.refreshToken) {
const ignoredOperationList = ['getProducts', 'getCategories', 'getNewspapers'];
if (ignoredOperationList.includes(req.body.operationName)) {
// console.log(`Ignore ` + req.body.operationName );
return {};
}
return getNewJwtToken(req.cookies.refreshToken);
} else {
return {};
}
} else {
if (token.startsWith('Bearer')) {
token = token.replace('Bearer ', '');
let customerJWT = verifyJWT(token);
if (customerJWT.data) {
customerJWT = customerJWT.data;
if (req.body.operationName == 'getCustomer') {
//get main customer
let customer = await findAndProcessCustomer({ _id: customerJWT._id });
//reset refresh token
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}`);
await setNewRefreshToken(customer, req.cookies.refreshToken, newRefreshToken, res);
return {
customer
}
} else if (req.body.operationName == 'getChildCustomer') {
if (!!req.body.variables.customer_id) {
checkCustomerBelong(req.body.variables.customer_id, customerJWT)
customerJWT.affectedCustomerId = req.body.variables.customer_id;
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
await setNewRefreshToken(customerJWT, req.cookies.refreshToken, newRefreshToken, res);
} else {
throw new AuthenticationError("Customer Id is missing");
}
let jwtToken = jwt.sign(
customerJWT,
config.JWT_SECRET
);
return {
customer: {
...customerJWT,
},
accessToken: jwtToken,
}
} else {
return {
customer: customerJWT
}
}
} else if (customerJWT.err.name == 'TokenExpiredError' && !!req.cookies.refreshToken) {
return getNewJwtToken(req.cookies.refreshToken);
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
if (token.startsWith('Basic')) {
if (!config.REFRESH_TOKEN_EXPIRE_LIMIT || !config.JWT_EXPIRE_LIMIT) {
throw new AuthenticationError(MSG_SYSTEM_ERROR);
}
//login
token = token.replace('Basic ', '');
let query = Buffer.from(token, 'base64').toString('binary').split(':');
let customer = await findAndProcessCustomer({
'$or': [
{
'email': query[0],
},
{
'username': query[0],
},
]
}, true);
if (customer) {
//match password
const match = await bcrypt.compare(query[1], customer.password);
if (match) {
let sessionId = now;
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
store_id: customer.store_id,
};
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
affectedCustomerId: customer._id,
sessionId,
type: customer.type
},
config.JWT_SECRET
);
const refreshToken = encryptAES(`${customer._id}-${deviceName}-${now}`);
setRefreshTokenCookie(refreshToken, res);
//update customer
let setObject: any = {};
if (!!customer.session) {
setObject[`session.${refreshToken}`] = now + config.REFRESH_TOKEN_EXPIRE_LIMIT;
} else {
setObject = {
session: {
[refreshToken]: now + config.REFRESH_TOKEN_EXPIRE_LIMIT
}
}
}
let db = await getDb();
await db.collection('customer').updateOne({
_id: customer._id,
}, {
'$set': setObject
});
return {
customer,
accessToken: jwtToken,
}
} else {
throw new AuthenticationError(PASSWORD_IS_WRONG);
}
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
return {};
}
}
export const checkCustomerBelong = (customerId: any, parentCustomer: any) => {
if (!parentCustomer.customer_list || !parentCustomer.customer_list.map((x: any) => x._id).includes(customerId)) {
throw new AuthenticationError('This customer not belong to you');
}
return true;
}
const encryptAES = (text: string) => {
let encrypted = crypto.AES.encrypt(text, config.JWT_SECRET).toString();
return encrypted;
}
const decryptAES = (text: string) => {
let bytesDecrypted = crypto.AES.decrypt(text, config.JWT_SECRET);
return bytesDecrypted.toString(crypto.enc.Utf8);
}
export const getUA = (userAgent: string) => {
let device = "Unknown";
const ua: any = {
"Generic Linux": /Linux/i,
"Android": /Android/i,
"BlackBerry": /BlackBerry/i,
"Bluebird": /EF500/i,
"Chrome OS": /CrOS/i,
"Datalogic": /DL-AXIS/i,
"Honeywell": /CT50/i,
"iPad": /iPad/i,
"iPhone": /iPhone/i,
"iPod": /iPod/i,
"macOS": /Macintosh/i,
"Windows": /IEMobile|Windows/i,
"Zebra": /TC70|TC55/i,
}
Object.keys(ua).map(v => userAgent.match(ua[v]) && (device = v));
return device;
}
const getNewJwtToken = async (refreshToken: string) => {
const now = getNow();
let sessionId = now;
let condition: any = {};
condition[`session.${refreshToken}`] = {
'$gte': now
}
let customer = await findAndProcessCustomer(condition);
if (customer) {
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
type: customer.type,
store_id: customer.store_id,
};
let customerChild = null;
//Must return child customer info when customer is GROUP_ADMIN
if (customer.type === CUSTOMER_TYPE.GROUP_ADMIN) {
let decryptResult = decryptAES(refreshToken);
//const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
let affectedCustomerId = decryptResult.split('-')[3] ?? null;
affectedCustomerId = parseInt(affectedCustomerId);
if (affectedCustomerId) {
customerChild = await findAndProcessCustomer({ _id: affectedCustomerId });
}
}
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
sessionId,
affectedCustomerId: customerChild ? customerChild._id : customer._id,
store_id: customerChild ? customerChild.store_id : customer.store_id,
type: customer.type
},
config.JWT_SECRET
);
return {
customer: customerChild ?? customer,
accessToken: jwtToken
}
} else {
//remove refresh token
let db = await getDb();
condition[`session.${refreshToken}`] = {
'$ne': null
};
let unsetObject: any = {};
unsetObject[`session.${refreshToken}`] = 1;
await db.collection('user').updateOne(condition, {
'$unset': unsetObject
});
throw new ApolloError('Invalid/Expired Refresh Token', 'INVALID/EXPIRED_REFRESH_TOKEN');
}
}
export const findAndProcessCustomer = async (condition: any, projectPassword: boolean = false) => {
let db = await getDb();
let projection: any = {
projection: 0
}
if (!projectPassword) |
let customer = await db.collection('customer').aggregate([
{ '$match': condition },
{ '$project': projection },
{
'$lookup': {
from: 'store',
localField: 'store_id',
foreignField: '_id',
as: 'store'
}
},
{
'$lookup': {
from: 'zip_code',
localField: 'zip_code_id',
foreignField: '_id',
as: 'zip_code'
}
},
{
'$lookup': {
from: 'municipality',
localField: 'zip_code.municipality_id',
foreignField: '_id',
as: 'municipality'
}
},
{
'$set': {
municipality: {
$arrayElemAt: ['$municipality', 0]
},
store: {
'$arrayElemAt': ['$store', 0]
},
zip_code: {
'$arrayElemAt': ['$zip_code', 0]
},
}
},
{
$addFields: {
'municipality.overweight_price': {
$toDouble: '$municipality.overweight_price',
}
}
},
{
'$lookup': {
from: 'zip_code',
localField: 'store.zip_code_id',
foreignField: '_id',
as: 'store_zip_code'
}
},
{
'$set': {
store_zip_code: {
'$first': '$store_zip_code'
},
}
},
]).toArray();
customer = customer[0];
if (customer) {
customer.customer_list = customer?.customer_list ? Object.values(customer.customer_list) : null;
return customer;
}
return null;
}
export const checkCustomerContext = (context: any, customerId: number | null = null) => {
if (!context.customer) {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
} else if (customerId) {
checkCustomerBelong(customerId, context.customer)
}
}
const setRefreshTokenCookie = (refreshToken: any, res: any) => {
res.cookie('refreshToken', refreshToken, {
httpOnly: true,
expires: new Date((getNow() + config.REFRESH_TOKEN_EXPIRE_LIMIT) * 1000)
});
}
const setNewRefreshToken = async (customer: any, oldRefreshToken: string, newRefreshToken: string, res: any) => {
if (customer.type === CUSTOMER_TYPE.GROUP_ADMIN) {
const now = getNow();
const setObject: any = {};
setObject[`session.${newRefreshToken}`] = now + config.REFRESH_TOKEN_EXPIRE_LIMIT;
const unsetObject: any = {};
unsetObject[`session.${oldRefreshToken}`] = 1;
let db = await getDb();
await db.collection('customer').updateOne({
_id: customer._id,
}, {
'$set': setObject,
'$unset': unsetObject
});
await setRefreshTokenCookie(newRefreshToken, res);
}
} | {
projection = {
'password': 0
}
} | conditional_block |
authenticate-helper.ts | const jwt = require('jsonwebtoken');
import { ApolloError, AuthenticationError } from 'apollo-server-express';
import config from '../../config/config'
import { getDb } from '../../helper/db.helper';
import { getNow } from '../../../share/time.helper';
import { CUSTOMER_TYPE } from '../../../share/constant';
import { YOU_MUST_BE_LOGGED_IN, MSG_SYSTEM_ERROR, PASSWORD_IS_WRONG } from '../../config/constant';
const crypto = require('crypto-js');
const bcrypt = require('bcrypt');
const verifyJWT = (token: string) => {
return jwt.verify(token, config.JWT_SECRET, (err: any, decoded: any) => {
return {
err,
data: decoded
}
});
}
export const checkAuthorization = async (req: any, res: any) => {
let token = req.headers.authorization || null;
const now = getNow();
const deviceName = getUA(req.headers['user-agent']);
if (!token) {
if (!!req.cookies.refreshToken) {
const ignoredOperationList = ['getProducts', 'getCategories', 'getNewspapers'];
if (ignoredOperationList.includes(req.body.operationName)) {
// console.log(`Ignore ` + req.body.operationName );
return {};
}
return getNewJwtToken(req.cookies.refreshToken);
} else {
return {};
}
} else {
if (token.startsWith('Bearer')) {
token = token.replace('Bearer ', '');
let customerJWT = verifyJWT(token);
if (customerJWT.data) {
customerJWT = customerJWT.data;
if (req.body.operationName == 'getCustomer') {
//get main customer
let customer = await findAndProcessCustomer({ _id: customerJWT._id });
//reset refresh token
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}`);
await setNewRefreshToken(customer, req.cookies.refreshToken, newRefreshToken, res);
return {
customer
}
} else if (req.body.operationName == 'getChildCustomer') {
if (!!req.body.variables.customer_id) {
checkCustomerBelong(req.body.variables.customer_id, customerJWT)
customerJWT.affectedCustomerId = req.body.variables.customer_id;
const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
await setNewRefreshToken(customerJWT, req.cookies.refreshToken, newRefreshToken, res);
} else {
throw new AuthenticationError("Customer Id is missing");
}
let jwtToken = jwt.sign(
customerJWT,
config.JWT_SECRET
);
return {
customer: {
...customerJWT,
},
accessToken: jwtToken,
}
} else {
return {
customer: customerJWT
}
}
} else if (customerJWT.err.name == 'TokenExpiredError' && !!req.cookies.refreshToken) {
return getNewJwtToken(req.cookies.refreshToken);
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
| //login
token = token.replace('Basic ', '');
let query = Buffer.from(token, 'base64').toString('binary').split(':');
let customer = await findAndProcessCustomer({
'$or': [
{
'email': query[0],
},
{
'username': query[0],
},
]
}, true);
if (customer) {
//match password
const match = await bcrypt.compare(query[1], customer.password);
if (match) {
let sessionId = now;
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
store_id: customer.store_id,
};
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
affectedCustomerId: customer._id,
sessionId,
type: customer.type
},
config.JWT_SECRET
);
const refreshToken = encryptAES(`${customer._id}-${deviceName}-${now}`);
setRefreshTokenCookie(refreshToken, res);
//update customer
let setObject: any = {};
if (!!customer.session) {
setObject[`session.${refreshToken}`] = now + config.REFRESH_TOKEN_EXPIRE_LIMIT;
} else {
setObject = {
session: {
[refreshToken]: now + config.REFRESH_TOKEN_EXPIRE_LIMIT
}
}
}
let db = await getDb();
await db.collection('customer').updateOne({
_id: customer._id,
}, {
'$set': setObject
});
return {
customer,
accessToken: jwtToken,
}
} else {
throw new AuthenticationError(PASSWORD_IS_WRONG);
}
} else {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
}
}
return {};
}
}
export const checkCustomerBelong = (customerId: any, parentCustomer: any) => {
if (!parentCustomer.customer_list || !parentCustomer.customer_list.map((x: any) => x._id).includes(customerId)) {
throw new AuthenticationError('This customer not belong to you');
}
return true;
}
const encryptAES = (text: string) => {
let encrypted = crypto.AES.encrypt(text, config.JWT_SECRET).toString();
return encrypted;
}
const decryptAES = (text: string) => {
let bytesDecrypted = crypto.AES.decrypt(text, config.JWT_SECRET);
return bytesDecrypted.toString(crypto.enc.Utf8);
}
export const getUA = (userAgent: string) => {
let device = "Unknown";
const ua: any = {
"Generic Linux": /Linux/i,
"Android": /Android/i,
"BlackBerry": /BlackBerry/i,
"Bluebird": /EF500/i,
"Chrome OS": /CrOS/i,
"Datalogic": /DL-AXIS/i,
"Honeywell": /CT50/i,
"iPad": /iPad/i,
"iPhone": /iPhone/i,
"iPod": /iPod/i,
"macOS": /Macintosh/i,
"Windows": /IEMobile|Windows/i,
"Zebra": /TC70|TC55/i,
}
Object.keys(ua).map(v => userAgent.match(ua[v]) && (device = v));
return device;
}
const getNewJwtToken = async (refreshToken: string) => {
const now = getNow();
let sessionId = now;
let condition: any = {};
condition[`session.${refreshToken}`] = {
'$gte': now
}
let customer = await findAndProcessCustomer(condition);
if (customer) {
let customerJWT = {
_id: customer._id,
username: customer.username,
name: customer.name,
customer_list: customer.customer_list,
type: customer.type,
store_id: customer.store_id,
};
let customerChild = null;
//Must return child customer info when customer is GROUP_ADMIN
if (customer.type === CUSTOMER_TYPE.GROUP_ADMIN) {
let decryptResult = decryptAES(refreshToken);
//const newRefreshToken = encryptAES(`${customerJWT._id}-${deviceName}-${now}-${customerJWT.affectedCustomerId}`);
let affectedCustomerId = decryptResult.split('-')[3] ?? null;
affectedCustomerId = parseInt(affectedCustomerId);
if (affectedCustomerId) {
customerChild = await findAndProcessCustomer({ _id: affectedCustomerId });
}
}
let jwtToken = jwt.sign(
{
...customerJWT,
exp: now + config.JWT_EXPIRE_LIMIT,
sessionId,
affectedCustomerId: customerChild ? customerChild._id : customer._id,
store_id: customerChild ? customerChild.store_id : customer.store_id,
type: customer.type
},
config.JWT_SECRET
);
return {
customer: customerChild ?? customer,
accessToken: jwtToken
}
} else {
//remove refresh token
let db = await getDb();
condition[`session.${refreshToken}`] = {
'$ne': null
};
let unsetObject: any = {};
unsetObject[`session.${refreshToken}`] = 1;
await db.collection('user').updateOne(condition, {
'$unset': unsetObject
});
throw new ApolloError('Invalid/Expired Refresh Token', 'INVALID/EXPIRED_REFRESH_TOKEN');
}
}
export const findAndProcessCustomer = async (condition: any, projectPassword: boolean = false) => {
let db = await getDb();
let projection: any = {
projection: 0
}
if (!projectPassword) {
projection = {
'password': 0
}
}
let customer = await db.collection('customer').aggregate([
{ '$match': condition },
{ '$project': projection },
{
'$lookup': {
from: 'store',
localField: 'store_id',
foreignField: '_id',
as: 'store'
}
},
{
'$lookup': {
from: 'zip_code',
localField: 'zip_code_id',
foreignField: '_id',
as: 'zip_code'
}
},
{
'$lookup': {
from: 'municipality',
localField: 'zip_code.municipality_id',
foreignField: '_id',
as: 'municipality'
}
},
{
'$set': {
municipality: {
$arrayElemAt: ['$municipality', 0]
},
store: {
'$arrayElemAt': ['$store', 0]
},
zip_code: {
'$arrayElemAt': ['$zip_code', 0]
},
}
},
{
$addFields: {
'municipality.overweight_price': {
$toDouble: '$municipality.overweight_price',
}
}
},
{
'$lookup': {
from: 'zip_code',
localField: 'store.zip_code_id',
foreignField: '_id',
as: 'store_zip_code'
}
},
{
'$set': {
store_zip_code: {
'$first': '$store_zip_code'
},
}
},
]).toArray();
customer = customer[0];
if (customer) {
customer.customer_list = customer?.customer_list ? Object.values(customer.customer_list) : null;
return customer;
}
return null;
}
export const checkCustomerContext = (context: any, customerId: number | null = null) => {
if (!context.customer) {
throw new AuthenticationError(YOU_MUST_BE_LOGGED_IN);
} else if (customerId) {
checkCustomerBelong(customerId, context.customer)
}
}
const setRefreshTokenCookie = (refreshToken: any, res: any) => {
res.cookie('refreshToken', refreshToken, {
httpOnly: true,
expires: new Date((getNow() + config.REFRESH_TOKEN_EXPIRE_LIMIT) * 1000)
});
}
const setNewRefreshToken = async (customer: any, oldRefreshToken: string, newRefreshToken: string, res: any) => {
if (customer.type === CUSTOMER_TYPE.GROUP_ADMIN) {
const now = getNow();
const setObject: any = {};
setObject[`session.${newRefreshToken}`] = now + config.REFRESH_TOKEN_EXPIRE_LIMIT;
const unsetObject: any = {};
unsetObject[`session.${oldRefreshToken}`] = 1;
let db = await getDb();
await db.collection('customer').updateOne({
_id: customer._id,
}, {
'$set': setObject,
'$unset': unsetObject
});
await setRefreshTokenCookie(newRefreshToken, res);
}
} | if (token.startsWith('Basic')) {
if (!config.REFRESH_TOKEN_EXPIRE_LIMIT || !config.JWT_EXPIRE_LIMIT) {
throw new AuthenticationError(MSG_SYSTEM_ERROR);
}
| random_line_split |
disk_location.go | package storage
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type DiskLocation struct {
Directory string
DirectoryUuid string
IdxDirectory string
DiskType types.DiskType
MaxVolumeCount int32
OriginalMaxVolumeCount int32
MinFreeSpace util.MinFreeSpace
volumes map[needle.VolumeId]*Volume
volumesLock sync.RWMutex
// erasure coding
ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume
ecVolumesLock sync.RWMutex
isDiskSpaceLow bool
closeCh chan struct{}
}
func GenerateDirUuid(dir string) (dirUuidString string, err error) {
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUuidString = ""
fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) {
dirUuid, _ := uuid.NewRandom()
dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUuidString = string(uuidData)
}
return dirUuidString, nil
}
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUuid, err := GenerateDirUuid(dir)
if err != nil {
glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err)
}
location := &DiskLocation{
Directory: dir,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,
OriginalMaxVolumeCount: maxVolumeCount,
MinFreeSpace: minFreeSpace,
}
location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
location.closeCh = make(chan struct{})
go func() {
location.CheckDiskSpace()
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location
}
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
if isValidVolume(filename) {
base := filename[:len(filename)-4]
collection, volumeId, err := parseCollectionVolumeId(base)
return volumeId, collection, err
}
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
}
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
i := strings.LastIndex(base, "_")
if i > 0 {
collection, base = base[0:i], base[i+1:]
}
vol, err := needle.NewVolumeId(base)
return collection, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 {
workerNum = 10
}
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
v.Close()
delete(l.volumes, vid)
return nil
}
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
deltaVols := make(map[needle.VolumeId]*Volume, 0)
for k, v := range l.volumes {
if v.Collection == collectionName && !v.isCompacting && !v.isCommitCompacting {
deltaVols[k] = v
}
}
for k := range deltaVols {
delete(l.volumes, k)
}
return deltaVols
}
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
l.volumes[vid] = volume
volume.location = l
}
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) SetStopping() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.SyncToDisk()
}
l.volumesLock.Unlock()
return
}
func (l *DiskLocation) Close() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()
close(l.closeCh)
return
}
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
// println("LocateVolume", vid, "on", l.Directory)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
// println("checking", entry.Name(), "...")
volId, _, err := volumeIdFromFileName(entry.Name())
// println("volId", volId, "err", err)
if vid == volId && err == nil {
return entry, true
}
}
}
return nil, false
}
func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
for _, vol := range l.volumes {
if vol.IsReadOnly() {
continue
}
datSize, idxSize, _ := vol.FileStat()
unUsedSpace += volumeSizeLimit - (datSize + idxSize)
}
return
}
func (l *DiskLocation) CheckDiskSpace() {
if dir, e := filepath.Abs(l.Directory); e == nil {
s := stats.NewDiskStatus(dir)
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
if isLow != l.isDiskSpaceLow {
l.isDiskSpaceLow = !l.isDiskSpaceLow
} |
glog.V(logLevel).Infof("dir %s %s", dir, desc)
}
} |
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
} | random_line_split |
disk_location.go | package storage
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type DiskLocation struct {
Directory string
DirectoryUuid string
IdxDirectory string
DiskType types.DiskType
MaxVolumeCount int32
OriginalMaxVolumeCount int32
MinFreeSpace util.MinFreeSpace
volumes map[needle.VolumeId]*Volume
volumesLock sync.RWMutex
// erasure coding
ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume
ecVolumesLock sync.RWMutex
isDiskSpaceLow bool
closeCh chan struct{}
}
func GenerateDirUuid(dir string) (dirUuidString string, err error) |
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUuid, err := GenerateDirUuid(dir)
if err != nil {
glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err)
}
location := &DiskLocation{
Directory: dir,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,
OriginalMaxVolumeCount: maxVolumeCount,
MinFreeSpace: minFreeSpace,
}
location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
location.closeCh = make(chan struct{})
go func() {
location.CheckDiskSpace()
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location
}
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
if isValidVolume(filename) {
base := filename[:len(filename)-4]
collection, volumeId, err := parseCollectionVolumeId(base)
return volumeId, collection, err
}
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
}
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
i := strings.LastIndex(base, "_")
if i > 0 {
collection, base = base[0:i], base[i+1:]
}
vol, err := needle.NewVolumeId(base)
return collection, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 {
workerNum = 10
}
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
v.Close()
delete(l.volumes, vid)
return nil
}
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
deltaVols := make(map[needle.VolumeId]*Volume, 0)
for k, v := range l.volumes {
if v.Collection == collectionName && !v.isCompacting && !v.isCommitCompacting {
deltaVols[k] = v
}
}
for k := range deltaVols {
delete(l.volumes, k)
}
return deltaVols
}
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
l.volumes[vid] = volume
volume.location = l
}
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) SetStopping() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.SyncToDisk()
}
l.volumesLock.Unlock()
return
}
func (l *DiskLocation) Close() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()
close(l.closeCh)
return
}
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
// println("LocateVolume", vid, "on", l.Directory)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
// println("checking", entry.Name(), "...")
volId, _, err := volumeIdFromFileName(entry.Name())
// println("volId", volId, "err", err)
if vid == volId && err == nil {
return entry, true
}
}
}
return nil, false
}
func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
for _, vol := range l.volumes {
if vol.IsReadOnly() {
continue
}
datSize, idxSize, _ := vol.FileStat()
unUsedSpace += volumeSizeLimit - (datSize + idxSize)
}
return
}
func (l *DiskLocation) CheckDiskSpace() {
if dir, e := filepath.Abs(l.Directory); e == nil {
s := stats.NewDiskStatus(dir)
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
if isLow != l.isDiskSpaceLow {
l.isDiskSpaceLow = !l.isDiskSpaceLow
}
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
}
glog.V(logLevel).Infof("dir %s %s", dir, desc)
}
}
| {
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUuidString = ""
fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) {
dirUuid, _ := uuid.NewRandom()
dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUuidString = string(uuidData)
}
return dirUuidString, nil
} | identifier_body |
disk_location.go | package storage
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type DiskLocation struct {
Directory string
DirectoryUuid string
IdxDirectory string
DiskType types.DiskType
MaxVolumeCount int32
OriginalMaxVolumeCount int32
MinFreeSpace util.MinFreeSpace
volumes map[needle.VolumeId]*Volume
volumesLock sync.RWMutex
// erasure coding
ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume
ecVolumesLock sync.RWMutex
isDiskSpaceLow bool
closeCh chan struct{}
}
func GenerateDirUuid(dir string) (dirUuidString string, err error) {
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUuidString = ""
fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) {
dirUuid, _ := uuid.NewRandom()
dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUuidString = string(uuidData)
}
return dirUuidString, nil
}
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUuid, err := GenerateDirUuid(dir)
if err != nil {
glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err)
}
location := &DiskLocation{
Directory: dir,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,
OriginalMaxVolumeCount: maxVolumeCount,
MinFreeSpace: minFreeSpace,
}
location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
location.closeCh = make(chan struct{})
go func() {
location.CheckDiskSpace()
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location
}
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
if isValidVolume(filename) {
base := filename[:len(filename)-4]
collection, volumeId, err := parseCollectionVolumeId(base)
return volumeId, collection, err
}
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
}
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
i := strings.LastIndex(base, "_")
if i > 0 {
collection, base = base[0:i], base[i+1:]
}
vol, err := needle.NewVolumeId(base)
return collection, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 |
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
v.Close()
delete(l.volumes, vid)
return nil
}
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
deltaVols := make(map[needle.VolumeId]*Volume, 0)
for k, v := range l.volumes {
if v.Collection == collectionName && !v.isCompacting && !v.isCommitCompacting {
deltaVols[k] = v
}
}
for k := range deltaVols {
delete(l.volumes, k)
}
return deltaVols
}
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
l.volumes[vid] = volume
volume.location = l
}
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) SetStopping() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.SyncToDisk()
}
l.volumesLock.Unlock()
return
}
func (l *DiskLocation) Close() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()
close(l.closeCh)
return
}
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
// println("LocateVolume", vid, "on", l.Directory)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
// println("checking", entry.Name(), "...")
volId, _, err := volumeIdFromFileName(entry.Name())
// println("volId", volId, "err", err)
if vid == volId && err == nil {
return entry, true
}
}
}
return nil, false
}
func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
for _, vol := range l.volumes {
if vol.IsReadOnly() {
continue
}
datSize, idxSize, _ := vol.FileStat()
unUsedSpace += volumeSizeLimit - (datSize + idxSize)
}
return
}
func (l *DiskLocation) CheckDiskSpace() {
if dir, e := filepath.Abs(l.Directory); e == nil {
s := stats.NewDiskStatus(dir)
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
if isLow != l.isDiskSpaceLow {
l.isDiskSpaceLow = !l.isDiskSpaceLow
}
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
}
glog.V(logLevel).Infof("dir %s %s", dir, desc)
}
}
| {
workerNum = 10
} | conditional_block |
disk_location.go | package storage
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/stats"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type DiskLocation struct {
Directory string
DirectoryUuid string
IdxDirectory string
DiskType types.DiskType
MaxVolumeCount int32
OriginalMaxVolumeCount int32
MinFreeSpace util.MinFreeSpace
volumes map[needle.VolumeId]*Volume
volumesLock sync.RWMutex
// erasure coding
ecVolumes map[needle.VolumeId]*erasure_coding.EcVolume
ecVolumesLock sync.RWMutex
isDiskSpaceLow bool
closeCh chan struct{}
}
func GenerateDirUuid(dir string) (dirUuidString string, err error) {
glog.V(1).Infof("Getting uuid of volume directory:%s", dir)
dirUuidString = ""
fileName := dir + "/vol_dir.uuid"
if !util.FileExists(fileName) {
dirUuid, _ := uuid.NewRandom()
dirUuidString = dirUuid.String()
writeErr := util.WriteFile(fileName, []byte(dirUuidString), 0644)
if writeErr != nil {
return "", fmt.Errorf("failed to write uuid to %s : %v", fileName, writeErr)
}
} else {
uuidData, readErr := os.ReadFile(fileName)
if readErr != nil {
return "", fmt.Errorf("failed to read uuid from %s : %v", fileName, readErr)
}
dirUuidString = string(uuidData)
}
return dirUuidString, nil
}
func NewDiskLocation(dir string, maxVolumeCount int32, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
dir = util.ResolvePath(dir)
if idxDir == "" {
idxDir = dir
} else {
idxDir = util.ResolvePath(idxDir)
}
dirUuid, err := GenerateDirUuid(dir)
if err != nil {
glog.Fatalf("cannot generate uuid of dir %s: %v", dir, err)
}
location := &DiskLocation{
Directory: dir,
DirectoryUuid: dirUuid,
IdxDirectory: idxDir,
DiskType: diskType,
MaxVolumeCount: maxVolumeCount,
OriginalMaxVolumeCount: maxVolumeCount,
MinFreeSpace: minFreeSpace,
}
location.volumes = make(map[needle.VolumeId]*Volume)
location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
location.closeCh = make(chan struct{})
go func() {
location.CheckDiskSpace()
for {
select {
case <-location.closeCh:
return
case <-time.After(time.Minute):
location.CheckDiskSpace()
}
}
}()
return location
}
func volumeIdFromFileName(filename string) (needle.VolumeId, string, error) {
if isValidVolume(filename) {
base := filename[:len(filename)-4]
collection, volumeId, err := parseCollectionVolumeId(base)
return volumeId, collection, err
}
return 0, "", fmt.Errorf("file is not a volume: %s", filename)
}
func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeId, err error) {
i := strings.LastIndex(base, "_")
if i > 0 {
collection, base = base[0:i], base[i+1:]
}
vol, err := needle.NewVolumeId(base)
return collection, vol, err
}
func isValidVolume(basename string) bool {
return strings.HasSuffix(basename, ".idx") || strings.HasSuffix(basename, ".vif")
}
func getValidVolumeName(basename string) string {
if isValidVolume(basename) {
return basename[:len(basename)-4]
}
return ""
}
func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind NeedleMapKind, skipIfEcVolumesExists bool, ldbTimeout int64) bool {
basename := dirEntry.Name()
if dirEntry.IsDir() {
return false
}
volumeName := getValidVolumeName(basename)
if volumeName == "" {
return false
}
// skip if ec volumes exists
if skipIfEcVolumesExists {
if util.FileExists(l.Directory + "/" + volumeName + ".ecx") {
return false
}
}
// check for incomplete volume
noteFile := l.Directory + "/" + volumeName + ".note"
if util.FileExists(noteFile) {
note, _ := os.ReadFile(noteFile)
glog.Warningf("volume %s was not completed: %s", volumeName, string(note))
removeVolumeFiles(l.Directory + "/" + volumeName)
removeVolumeFiles(l.IdxDirectory + "/" + volumeName)
return false
}
// parse out collection, volume id
vid, collection, err := volumeIdFromFileName(basename)
if err != nil {
glog.Warningf("get volume id failed, %s, err : %s", volumeName, err)
return false
}
// avoid loading one volume more than once
l.volumesLock.RLock()
_, found := l.volumes[vid]
l.volumesLock.RUnlock()
if found {
glog.V(1).Infof("loaded volume, %v", vid)
return true
}
// load the volume
v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0, ldbTimeout)
if e != nil {
glog.V(0).Infof("new volume %s error %s", volumeName, e)
return false
}
l.SetVolume(vid, v)
size, _, _ := v.FileStat()
glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
return true
}
func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapKind, concurrency int, ldbTimeout int64) {
task_queue := make(chan os.DirEntry, 10*concurrency)
go func() {
foundVolumeNames := make(map[string]bool)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
volumeName := getValidVolumeName(entry.Name())
if volumeName == "" {
continue
}
if _, found := foundVolumeNames[volumeName]; !found {
foundVolumeNames[volumeName] = true
task_queue <- entry
}
}
}
close(task_queue)
}()
var wg sync.WaitGroup
for workerNum := 0; workerNum < concurrency; workerNum++ {
wg.Add(1)
go func() {
defer wg.Done()
for fi := range task_queue {
_ = l.loadExistingVolume(fi, needleMapKind, true, ldbTimeout)
}
}()
}
wg.Wait()
}
func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind, ldbTimeout int64) {
workerNum := runtime.NumCPU()
val, ok := os.LookupEnv("GOMAXPROCS")
if ok {
num, err := strconv.Atoi(val)
if err != nil || num < 1 {
num = 10
glog.Warningf("failed to set worker number from GOMAXPROCS , set to default:10")
}
workerNum = num
} else {
if workerNum <= 10 {
workerNum = 10
}
}
l.concurrentLoadingVolumes(needleMapKind, workerNum, ldbTimeout)
glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount)
l.loadAllEcShards()
glog.V(0).Infof("Store started on dir: %s with %d ec shards", l.Directory, len(l.ecVolumes))
}
func (l *DiskLocation) DeleteCollectionFromDiskLocation(collection string) (e error) {
l.volumesLock.Lock()
delVolsMap := l.unmountVolumeByCollection(collection)
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
delEcVolsMap := l.unmountEcVolumeByCollection(collection)
l.ecVolumesLock.Unlock()
errChain := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(2)
go func() {
for _, v := range delVolsMap {
if err := v.Destroy(false); err != nil {
errChain <- err
}
}
wg.Done()
}()
go func() {
for _, v := range delEcVolsMap {
v.Destroy()
}
wg.Done()
}()
go func() {
wg.Wait()
close(errChain)
}()
errBuilder := strings.Builder{}
for err := range errChain {
errBuilder.WriteString(err.Error())
errBuilder.WriteString("; ")
}
if errBuilder.Len() > 0 {
e = fmt.Errorf(errBuilder.String())
}
return
}
func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId, onlyEmpty bool) (found bool, e error) {
v, ok := l.volumes[vid]
if !ok {
return
}
e = v.Destroy(onlyEmpty)
if e != nil {
return
}
found = true
delete(l.volumes, vid)
return
}
func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapKind) bool {
if fileInfo, found := l.LocateVolume(vid); found {
return l.loadExistingVolume(fileInfo, needleMapKind, false, 0)
}
return false
}
var ErrVolumeNotFound = fmt.Errorf("volume not found")
func (l *DiskLocation) DeleteVolume(vid needle.VolumeId, onlyEmpty bool) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
_, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
_, err := l.deleteVolumeById(vid, onlyEmpty)
return err
}
func (l *DiskLocation) UnloadVolume(vid needle.VolumeId) error {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
v, ok := l.volumes[vid]
if !ok {
return ErrVolumeNotFound
}
v.Close()
delete(l.volumes, vid)
return nil
}
func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[needle.VolumeId]*Volume {
deltaVols := make(map[needle.VolumeId]*Volume, 0)
for k, v := range l.volumes {
if v.Collection == collectionName && !v.isCompacting && !v.isCommitCompacting {
deltaVols[k] = v
}
}
for k := range deltaVols {
delete(l.volumes, k)
}
return deltaVols
}
func (l *DiskLocation) SetVolume(vid needle.VolumeId, volume *Volume) {
l.volumesLock.Lock()
defer l.volumesLock.Unlock()
l.volumes[vid] = volume
volume.location = l
}
func (l *DiskLocation) FindVolume(vid needle.VolumeId) (*Volume, bool) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
v, ok := l.volumes[vid]
return v, ok
}
func (l *DiskLocation) VolumesLen() int {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
return len(l.volumes)
}
func (l *DiskLocation) SetStopping() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.SyncToDisk()
}
l.volumesLock.Unlock()
return
}
func (l *DiskLocation) Close() {
l.volumesLock.Lock()
for _, v := range l.volumes {
v.Close()
}
l.volumesLock.Unlock()
l.ecVolumesLock.Lock()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()
close(l.closeCh)
return
}
func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.DirEntry, bool) {
// println("LocateVolume", vid, "on", l.Directory)
if dirEntries, err := os.ReadDir(l.Directory); err == nil {
for _, entry := range dirEntries {
// println("checking", entry.Name(), "...")
volId, _, err := volumeIdFromFileName(entry.Name())
// println("volId", volId, "err", err)
if vid == volId && err == nil {
return entry, true
}
}
}
return nil, false
}
func (l *DiskLocation) | (volumeSizeLimit uint64) (unUsedSpace uint64) {
l.volumesLock.RLock()
defer l.volumesLock.RUnlock()
for _, vol := range l.volumes {
if vol.IsReadOnly() {
continue
}
datSize, idxSize, _ := vol.FileStat()
unUsedSpace += volumeSizeLimit - (datSize + idxSize)
}
return
}
func (l *DiskLocation) CheckDiskSpace() {
if dir, e := filepath.Abs(l.Directory); e == nil {
s := stats.NewDiskStatus(dir)
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
if isLow != l.isDiskSpaceLow {
l.isDiskSpaceLow = !l.isDiskSpaceLow
}
logLevel := glog.Level(4)
if l.isDiskSpaceLow {
logLevel = glog.Level(0)
}
glog.V(logLevel).Infof("dir %s %s", dir, desc)
}
}
| UnUsedSpace | identifier_name |
utility.rs | //! General-purpose utility functions for internal usage within this crate.
use crate::derive_data::{ReflectMeta, StructField};
use crate::field_attributes::ReflectIgnoreBehavior;
use crate::fq_std::{FQAny, FQOption, FQSend, FQSync};
use bevy_macro_utils::BevyManifest;
use bit_set::BitSet;
use proc_macro2::{Ident, Span};
use quote::{quote, ToTokens};
use syn::{spanned::Spanned, LitStr, Member, Path, Type, WhereClause};
/// Returns the correct path for `bevy_reflect`.
pub(crate) fn get_bevy_reflect_path() -> Path {
BevyManifest::get_path_direct("bevy_reflect")
}
/// Returns the "reflected" ident for a given string.
///
/// # Example
///
/// ```ignore
/// let reflected: Ident = get_reflect_ident("Hash");
/// assert_eq!("ReflectHash", reflected.to_string());
/// ```
pub(crate) fn get_reflect_ident(name: &str) -> Ident {
let reflected = format!("Reflect{name}");
Ident::new(&reflected, Span::call_site())
}
/// Helper struct used to process an iterator of `Result<Vec<T>, syn::Error>`,
/// combining errors into one along the way.
pub(crate) struct ResultSifter<T> {
items: Vec<T>,
errors: Option<syn::Error>,
}
/// Returns a [`Member`] made of `ident` or `index` if `ident` is None.
///
/// Rust struct syntax allows for `Struct { foo: "string" }` with explicitly
/// named fields. It allows the `Struct { 0: "string" }` syntax when the struct
/// is declared as a tuple struct.
///
/// ```
/// # fn main() {
/// struct Foo { field: &'static str }
/// struct Bar(&'static str);
/// let Foo { field } = Foo { field: "hi" };
/// let Bar { 0: field } = Bar { 0: "hello" };
/// let Bar(field) = Bar("hello"); // more common syntax
/// # }
/// ```
///
/// This function helps field access in context where you are declaring either
/// a tuple struct or a struct with named fields. If you don't have a field name,
/// it means you need to access the struct through an index.
pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member {
ident.map_or_else(
|| Member::Unnamed(index.into()),
|ident| Member::Named(ident.clone()),
)
}
/// Options defining how to extend the `where` clause in reflection with any additional bounds needed.
pub(crate) struct WhereClauseOptions {
/// Type parameters that need extra trait bounds.
parameter_types: Box<[Ident]>,
/// Trait bounds to add to the type parameters.
parameter_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that will be reflected and need an extra trait bound
active_types: Box<[Type]>,
/// Trait bounds to add to the active types
active_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that won't be reflected and need an extra trait bound
ignored_types: Box<[Type]>,
/// Trait bounds to add to the ignored types
ignored_trait_bounds: Box<[proc_macro2::TokenStream]>,
}
impl Default for WhereClauseOptions {
/// By default, don't add any additional bounds to the `where` clause
fn default() -> Self {
Self {
parameter_types: Box::new([]),
active_types: Box::new([]),
ignored_types: Box::new([]),
active_trait_bounds: Box::new([]),
ignored_trait_bounds: Box::new([]),
parameter_trait_bounds: Box::new([]),
}
}
}
impl WhereClauseOptions {
/// Create [`WhereClauseOptions`] for a struct or enum type.
pub fn new<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
) -> Self {
Self::new_with_bounds(meta, active_fields, ignored_fields, |_| None, |_| None)
}
/// Create [`WhereClauseOptions`] for a simple value type.
pub fn new_value(meta: &ReflectMeta) -> Self {
Self::new_with_bounds(
meta,
std::iter::empty(),
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else | ;
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn extend_where_clause(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not 1!)
///
/// #[reflect(skip_serializing)]
/// non_serialized_field: u32 // index: 1
/// }
/// ```
/// Would convert to the `0b01` bitset (i.e second field is NOT serialized)
///
pub(crate) fn members_to_serialization_denylist<T>(member_iter: T) -> BitSet<u32>
where
T: Iterator<Item = ReflectIgnoreBehavior>,
{
let mut bitset = BitSet::default();
member_iter.fold(0, |next_idx, member| match member {
ReflectIgnoreBehavior::IgnoreAlways => next_idx,
ReflectIgnoreBehavior::IgnoreSerialization => {
bitset.insert(next_idx);
next_idx + 1
}
ReflectIgnoreBehavior::None => next_idx + 1,
});
bitset
}
/// Turns an `Option<TokenStream>` into a `TokenStream` for an `Option`.
pub(crate) fn wrap_in_option(tokens: Option<proc_macro2::TokenStream>) -> proc_macro2::TokenStream {
match tokens {
Some(tokens) => quote! {
#FQOption::Some(#tokens)
},
None => quote! {
#FQOption::None
},
}
}
/// Contains tokens representing different kinds of string.
#[derive(Clone)]
pub(crate) enum StringExpr {
/// A string that is valid at compile time.
///
/// This is either a string literal like `"mystring"`,
/// or a string created by a macro like [`module_path`]
/// or [`concat`].
Const(proc_macro2::TokenStream),
/// A [string slice](str) that is borrowed for a `'static` lifetime.
Borrowed(proc_macro2::TokenStream),
/// An [owned string](String).
Owned(proc_macro2::TokenStream),
}
impl<T: ToString + Spanned> From<T> for StringExpr {
fn from(value: T) -> Self {
Self::from_lit(&LitStr::new(&value.to_string(), value.span()))
}
}
impl StringExpr {
/// Creates a [constant] [`StringExpr`] from a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_lit(lit: &LitStr) -> Self {
Self::Const(lit.to_token_stream())
}
/// Creates a [constant] [`StringExpr`] by interpreting a [string slice][str] as a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_str(string: &str) -> Self {
Self::Const(string.into_token_stream())
}
/// Returns tokens for an [owned string](String).
///
/// The returned expression will allocate unless the [`StringExpr`] is [already owned].
///
/// [already owned]: StringExpr::Owned
pub fn into_owned(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => quote! {
::std::string::ToString::to_string(#tokens)
},
Self::Owned(owned) => owned,
}
}
/// Returns tokens for a statically borrowed [string slice](str).
pub fn into_borrowed(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => tokens,
Self::Owned(owned) => quote! {
&#owned
},
}
}
/// Appends a [`StringExpr`] to another.
///
/// If both expressions are [`StringExpr::Const`] this will use [`concat`] to merge them.
pub fn appended_by(mut self, other: StringExpr) -> Self {
if let Self::Const(tokens) = self {
if let Self::Const(more) = other {
return Self::Const(quote! {
::core::concat!(#tokens, #more)
});
}
self = Self::Const(tokens);
}
let owned = self.into_owned();
let borrowed = other.into_borrowed();
Self::Owned(quote! {
#owned + #borrowed
})
}
}
impl Default for StringExpr {
fn default() -> Self {
StringExpr::from_str("")
}
}
impl FromIterator<StringExpr> for StringExpr {
fn from_iter<T: IntoIterator<Item = StringExpr>>(iter: T) -> Self {
let mut iter = iter.into_iter();
match iter.next() {
Some(mut expr) => {
for next in iter {
expr = expr.appended_by(next);
}
expr
}
None => Default::default(),
}
}
}
| {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
} | conditional_block |
utility.rs | //! General-purpose utility functions for internal usage within this crate.
use crate::derive_data::{ReflectMeta, StructField};
use crate::field_attributes::ReflectIgnoreBehavior;
use crate::fq_std::{FQAny, FQOption, FQSend, FQSync};
use bevy_macro_utils::BevyManifest;
use bit_set::BitSet;
use proc_macro2::{Ident, Span};
use quote::{quote, ToTokens};
use syn::{spanned::Spanned, LitStr, Member, Path, Type, WhereClause};
/// Returns the correct path for `bevy_reflect`.
pub(crate) fn get_bevy_reflect_path() -> Path {
BevyManifest::get_path_direct("bevy_reflect")
}
/// Returns the "reflected" ident for a given string.
///
/// # Example
///
/// ```ignore
/// let reflected: Ident = get_reflect_ident("Hash");
/// assert_eq!("ReflectHash", reflected.to_string());
/// ```
pub(crate) fn get_reflect_ident(name: &str) -> Ident {
let reflected = format!("Reflect{name}");
Ident::new(&reflected, Span::call_site())
}
/// Helper struct used to process an iterator of `Result<Vec<T>, syn::Error>`,
/// combining errors into one along the way.
pub(crate) struct ResultSifter<T> {
items: Vec<T>,
errors: Option<syn::Error>,
}
/// Returns a [`Member`] made of `ident` or `index` if `ident` is None.
///
/// Rust struct syntax allows for `Struct { foo: "string" }` with explicitly
/// named fields. It allows the `Struct { 0: "string" }` syntax when the struct
/// is declared as a tuple struct.
///
/// ```
/// # fn main() {
/// struct Foo { field: &'static str }
/// struct Bar(&'static str);
/// let Foo { field } = Foo { field: "hi" };
/// let Bar { 0: field } = Bar { 0: "hello" };
/// let Bar(field) = Bar("hello"); // more common syntax
/// # }
/// ```
///
/// This function helps field access in context where you are declaring either
/// a tuple struct or a struct with named fields. If you don't have a field name,
/// it means you need to access the struct through an index.
pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member {
ident.map_or_else(
|| Member::Unnamed(index.into()),
|ident| Member::Named(ident.clone()),
)
}
/// Options defining how to extend the `where` clause in reflection with any additional bounds needed.
pub(crate) struct WhereClauseOptions {
/// Type parameters that need extra trait bounds.
parameter_types: Box<[Ident]>,
/// Trait bounds to add to the type parameters.
parameter_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that will be reflected and need an extra trait bound
active_types: Box<[Type]>,
/// Trait bounds to add to the active types
active_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that won't be reflected and need an extra trait bound
ignored_types: Box<[Type]>,
/// Trait bounds to add to the ignored types
ignored_trait_bounds: Box<[proc_macro2::TokenStream]>,
}
impl Default for WhereClauseOptions {
/// By default, don't add any additional bounds to the `where` clause
fn default() -> Self {
Self {
parameter_types: Box::new([]),
active_types: Box::new([]),
ignored_types: Box::new([]),
active_trait_bounds: Box::new([]),
ignored_trait_bounds: Box::new([]),
parameter_trait_bounds: Box::new([]),
}
}
}
impl WhereClauseOptions {
/// Create [`WhereClauseOptions`] for a struct or enum type.
pub fn new<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
) -> Self {
Self::new_with_bounds(meta, active_fields, ignored_fields, |_| None, |_| None)
}
/// Create [`WhereClauseOptions`] for a simple value type.
pub fn new_value(meta: &ReflectMeta) -> Self {
Self::new_with_bounds(
meta,
std::iter::empty(),
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
};
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn extend_where_clause(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not 1!)
///
/// #[reflect(skip_serializing)]
/// non_serialized_field: u32 // index: 1
/// }
/// ```
/// Would convert to the `0b01` bitset (i.e second field is NOT serialized)
///
pub(crate) fn members_to_serialization_denylist<T>(member_iter: T) -> BitSet<u32>
where
T: Iterator<Item = ReflectIgnoreBehavior>,
{
let mut bitset = BitSet::default();
member_iter.fold(0, |next_idx, member| match member {
ReflectIgnoreBehavior::IgnoreAlways => next_idx,
ReflectIgnoreBehavior::IgnoreSerialization => {
bitset.insert(next_idx);
next_idx + 1
}
ReflectIgnoreBehavior::None => next_idx + 1,
});
bitset
}
/// Turns an `Option<TokenStream>` into a `TokenStream` for an `Option`.
pub(crate) fn wrap_in_option(tokens: Option<proc_macro2::TokenStream>) -> proc_macro2::TokenStream {
match tokens {
Some(tokens) => quote! {
#FQOption::Some(#tokens)
},
None => quote! {
#FQOption::None
},
}
}
/// Contains tokens representing different kinds of string.
#[derive(Clone)]
pub(crate) enum StringExpr {
/// A string that is valid at compile time.
///
/// This is either a string literal like `"mystring"`,
/// or a string created by a macro like [`module_path`]
/// or [`concat`].
Const(proc_macro2::TokenStream),
/// A [string slice](str) that is borrowed for a `'static` lifetime.
Borrowed(proc_macro2::TokenStream),
/// An [owned string](String).
Owned(proc_macro2::TokenStream),
}
impl<T: ToString + Spanned> From<T> for StringExpr {
fn from(value: T) -> Self {
Self::from_lit(&LitStr::new(&value.to_string(), value.span()))
}
}
impl StringExpr {
/// Creates a [constant] [`StringExpr`] from a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_lit(lit: &LitStr) -> Self {
Self::Const(lit.to_token_stream()) | /// [constant]: StringExpr::Const
pub fn from_str(string: &str) -> Self {
Self::Const(string.into_token_stream())
}
/// Returns tokens for an [owned string](String).
///
/// The returned expression will allocate unless the [`StringExpr`] is [already owned].
///
/// [already owned]: StringExpr::Owned
pub fn into_owned(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => quote! {
::std::string::ToString::to_string(#tokens)
},
Self::Owned(owned) => owned,
}
}
/// Returns tokens for a statically borrowed [string slice](str).
pub fn into_borrowed(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => tokens,
Self::Owned(owned) => quote! {
&#owned
},
}
}
/// Appends a [`StringExpr`] to another.
///
/// If both expressions are [`StringExpr::Const`] this will use [`concat`] to merge them.
pub fn appended_by(mut self, other: StringExpr) -> Self {
if let Self::Const(tokens) = self {
if let Self::Const(more) = other {
return Self::Const(quote! {
::core::concat!(#tokens, #more)
});
}
self = Self::Const(tokens);
}
let owned = self.into_owned();
let borrowed = other.into_borrowed();
Self::Owned(quote! {
#owned + #borrowed
})
}
}
impl Default for StringExpr {
fn default() -> Self {
StringExpr::from_str("")
}
}
impl FromIterator<StringExpr> for StringExpr {
fn from_iter<T: IntoIterator<Item = StringExpr>>(iter: T) -> Self {
let mut iter = iter.into_iter();
match iter.next() {
Some(mut expr) => {
for next in iter {
expr = expr.appended_by(next);
}
expr
}
None => Default::default(),
}
}
} | }
/// Creates a [constant] [`StringExpr`] by interpreting a [string slice][str] as a [`struct@LitStr`].
/// | random_line_split |
utility.rs | //! General-purpose utility functions for internal usage within this crate.
use crate::derive_data::{ReflectMeta, StructField};
use crate::field_attributes::ReflectIgnoreBehavior;
use crate::fq_std::{FQAny, FQOption, FQSend, FQSync};
use bevy_macro_utils::BevyManifest;
use bit_set::BitSet;
use proc_macro2::{Ident, Span};
use quote::{quote, ToTokens};
use syn::{spanned::Spanned, LitStr, Member, Path, Type, WhereClause};
/// Returns the correct path for `bevy_reflect`.
pub(crate) fn get_bevy_reflect_path() -> Path {
BevyManifest::get_path_direct("bevy_reflect")
}
/// Returns the "reflected" ident for a given string.
///
/// # Example
///
/// ```ignore
/// let reflected: Ident = get_reflect_ident("Hash");
/// assert_eq!("ReflectHash", reflected.to_string());
/// ```
pub(crate) fn get_reflect_ident(name: &str) -> Ident {
let reflected = format!("Reflect{name}");
Ident::new(&reflected, Span::call_site())
}
/// Helper struct used to process an iterator of `Result<Vec<T>, syn::Error>`,
/// combining errors into one along the way.
pub(crate) struct ResultSifter<T> {
items: Vec<T>,
errors: Option<syn::Error>,
}
/// Returns a [`Member`] made of `ident` or `index` if `ident` is None.
///
/// Rust struct syntax allows for `Struct { foo: "string" }` with explicitly
/// named fields. It allows the `Struct { 0: "string" }` syntax when the struct
/// is declared as a tuple struct.
///
/// ```
/// # fn main() {
/// struct Foo { field: &'static str }
/// struct Bar(&'static str);
/// let Foo { field } = Foo { field: "hi" };
/// let Bar { 0: field } = Bar { 0: "hello" };
/// let Bar(field) = Bar("hello"); // more common syntax
/// # }
/// ```
///
/// This function helps field access in context where you are declaring either
/// a tuple struct or a struct with named fields. If you don't have a field name,
/// it means you need to access the struct through an index.
pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member {
ident.map_or_else(
|| Member::Unnamed(index.into()),
|ident| Member::Named(ident.clone()),
)
}
/// Options defining how to extend the `where` clause in reflection with any additional bounds needed.
pub(crate) struct WhereClauseOptions {
/// Type parameters that need extra trait bounds.
parameter_types: Box<[Ident]>,
/// Trait bounds to add to the type parameters.
parameter_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that will be reflected and need an extra trait bound
active_types: Box<[Type]>,
/// Trait bounds to add to the active types
active_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that won't be reflected and need an extra trait bound
ignored_types: Box<[Type]>,
/// Trait bounds to add to the ignored types
ignored_trait_bounds: Box<[proc_macro2::TokenStream]>,
}
impl Default for WhereClauseOptions {
/// By default, don't add any additional bounds to the `where` clause
fn default() -> Self {
Self {
parameter_types: Box::new([]),
active_types: Box::new([]),
ignored_types: Box::new([]),
active_trait_bounds: Box::new([]),
ignored_trait_bounds: Box::new([]),
parameter_trait_bounds: Box::new([]),
}
}
}
impl WhereClauseOptions {
/// Create [`WhereClauseOptions`] for a struct or enum type.
pub fn new<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
) -> Self {
Self::new_with_bounds(meta, active_fields, ignored_fields, |_| None, |_| None)
}
/// Create [`WhereClauseOptions`] for a simple value type.
pub fn new_value(meta: &ReflectMeta) -> Self {
Self::new_with_bounds(
meta,
std::iter::empty(),
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
};
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn extend_where_clause(
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not 1!)
///
/// #[reflect(skip_serializing)]
/// non_serialized_field: u32 // index: 1
/// }
/// ```
/// Would convert to the `0b01` bitset (i.e second field is NOT serialized)
///
pub(crate) fn members_to_serialization_denylist<T>(member_iter: T) -> BitSet<u32>
where
T: Iterator<Item = ReflectIgnoreBehavior>,
{
let mut bitset = BitSet::default();
member_iter.fold(0, |next_idx, member| match member {
ReflectIgnoreBehavior::IgnoreAlways => next_idx,
ReflectIgnoreBehavior::IgnoreSerialization => {
bitset.insert(next_idx);
next_idx + 1
}
ReflectIgnoreBehavior::None => next_idx + 1,
});
bitset
}
/// Turns an `Option<TokenStream>` into a `TokenStream` for an `Option`.
pub(crate) fn wrap_in_option(tokens: Option<proc_macro2::TokenStream>) -> proc_macro2::TokenStream {
match tokens {
Some(tokens) => quote! {
#FQOption::Some(#tokens)
},
None => quote! {
#FQOption::None
},
}
}
/// Contains tokens representing different kinds of string.
#[derive(Clone)]
pub(crate) enum StringExpr {
/// A string that is valid at compile time.
///
/// This is either a string literal like `"mystring"`,
/// or a string created by a macro like [`module_path`]
/// or [`concat`].
Const(proc_macro2::TokenStream),
/// A [string slice](str) that is borrowed for a `'static` lifetime.
Borrowed(proc_macro2::TokenStream),
/// An [owned string](String).
Owned(proc_macro2::TokenStream),
}
impl<T: ToString + Spanned> From<T> for StringExpr {
fn from(value: T) -> Self {
Self::from_lit(&LitStr::new(&value.to_string(), value.span()))
}
}
impl StringExpr {
/// Creates a [constant] [`StringExpr`] from a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_lit(lit: &LitStr) -> Self {
Self::Const(lit.to_token_stream())
}
/// Creates a [constant] [`StringExpr`] by interpreting a [string slice][str] as a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_str(string: &str) -> Self {
Self::Const(string.into_token_stream())
}
/// Returns tokens for an [owned string](String).
///
/// The returned expression will allocate unless the [`StringExpr`] is [already owned].
///
/// [already owned]: StringExpr::Owned
pub fn into_owned(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => quote! {
::std::string::ToString::to_string(#tokens)
},
Self::Owned(owned) => owned,
}
}
/// Returns tokens for a statically borrowed [string slice](str).
pub fn into_borrowed(self) -> proc_macro2::TokenStream |
/// Appends a [`StringExpr`] to another.
///
/// If both expressions are [`StringExpr::Const`] this will use [`concat`] to merge them.
pub fn appended_by(mut self, other: StringExpr) -> Self {
if let Self::Const(tokens) = self {
if let Self::Const(more) = other {
return Self::Const(quote! {
::core::concat!(#tokens, #more)
});
}
self = Self::Const(tokens);
}
let owned = self.into_owned();
let borrowed = other.into_borrowed();
Self::Owned(quote! {
#owned + #borrowed
})
}
}
impl Default for StringExpr {
fn default() -> Self {
StringExpr::from_str("")
}
}
impl FromIterator<StringExpr> for StringExpr {
fn from_iter<T: IntoIterator<Item = StringExpr>>(iter: T) -> Self {
let mut iter = iter.into_iter();
match iter.next() {
Some(mut expr) => {
for next in iter {
expr = expr.appended_by(next);
}
expr
}
None => Default::default(),
}
}
}
| {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => tokens,
Self::Owned(owned) => quote! {
&#owned
},
}
} | identifier_body |
utility.rs | //! General-purpose utility functions for internal usage within this crate.
use crate::derive_data::{ReflectMeta, StructField};
use crate::field_attributes::ReflectIgnoreBehavior;
use crate::fq_std::{FQAny, FQOption, FQSend, FQSync};
use bevy_macro_utils::BevyManifest;
use bit_set::BitSet;
use proc_macro2::{Ident, Span};
use quote::{quote, ToTokens};
use syn::{spanned::Spanned, LitStr, Member, Path, Type, WhereClause};
/// Returns the correct path for `bevy_reflect`.
pub(crate) fn get_bevy_reflect_path() -> Path {
BevyManifest::get_path_direct("bevy_reflect")
}
/// Returns the "reflected" ident for a given string.
///
/// # Example
///
/// ```ignore
/// let reflected: Ident = get_reflect_ident("Hash");
/// assert_eq!("ReflectHash", reflected.to_string());
/// ```
pub(crate) fn get_reflect_ident(name: &str) -> Ident {
let reflected = format!("Reflect{name}");
Ident::new(&reflected, Span::call_site())
}
/// Helper struct used to process an iterator of `Result<Vec<T>, syn::Error>`,
/// combining errors into one along the way.
pub(crate) struct ResultSifter<T> {
items: Vec<T>,
errors: Option<syn::Error>,
}
/// Returns a [`Member`] made of `ident` or `index` if `ident` is None.
///
/// Rust struct syntax allows for `Struct { foo: "string" }` with explicitly
/// named fields. It allows the `Struct { 0: "string" }` syntax when the struct
/// is declared as a tuple struct.
///
/// ```
/// # fn main() {
/// struct Foo { field: &'static str }
/// struct Bar(&'static str);
/// let Foo { field } = Foo { field: "hi" };
/// let Bar { 0: field } = Bar { 0: "hello" };
/// let Bar(field) = Bar("hello"); // more common syntax
/// # }
/// ```
///
/// This function helps field access in context where you are declaring either
/// a tuple struct or a struct with named fields. If you don't have a field name,
/// it means you need to access the struct through an index.
pub(crate) fn ident_or_index(ident: Option<&Ident>, index: usize) -> Member {
ident.map_or_else(
|| Member::Unnamed(index.into()),
|ident| Member::Named(ident.clone()),
)
}
/// Options defining how to extend the `where` clause in reflection with any additional bounds needed.
pub(crate) struct WhereClauseOptions {
/// Type parameters that need extra trait bounds.
parameter_types: Box<[Ident]>,
/// Trait bounds to add to the type parameters.
parameter_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that will be reflected and need an extra trait bound
active_types: Box<[Type]>,
/// Trait bounds to add to the active types
active_trait_bounds: Box<[proc_macro2::TokenStream]>,
/// Any types that won't be reflected and need an extra trait bound
ignored_types: Box<[Type]>,
/// Trait bounds to add to the ignored types
ignored_trait_bounds: Box<[proc_macro2::TokenStream]>,
}
impl Default for WhereClauseOptions {
/// By default, don't add any additional bounds to the `where` clause
fn default() -> Self {
Self {
parameter_types: Box::new([]),
active_types: Box::new([]),
ignored_types: Box::new([]),
active_trait_bounds: Box::new([]),
ignored_trait_bounds: Box::new([]),
parameter_trait_bounds: Box::new([]),
}
}
}
impl WhereClauseOptions {
/// Create [`WhereClauseOptions`] for a struct or enum type.
pub fn new<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
) -> Self {
Self::new_with_bounds(meta, active_fields, ignored_fields, |_| None, |_| None)
}
/// Create [`WhereClauseOptions`] for a simple value type.
pub fn new_value(meta: &ReflectMeta) -> Self {
Self::new_with_bounds(
meta,
std::iter::empty(),
std::iter::empty(),
|_| None,
|_| None,
)
}
/// Create [`WhereClauseOptions`] for a struct or enum type.
///
/// Compared to [`WhereClauseOptions::new`], this version allows you to specify
/// custom trait bounds for each field.
pub fn new_with_bounds<'a: 'b, 'b>(
meta: &ReflectMeta,
active_fields: impl Iterator<Item = &'b StructField<'a>>,
ignored_fields: impl Iterator<Item = &'b StructField<'a>>,
active_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
ignored_bounds: impl Fn(&StructField<'a>) -> Option<proc_macro2::TokenStream>,
) -> Self {
let bevy_reflect_path = meta.bevy_reflect_path();
let is_from_reflect = meta.from_reflect().should_auto_derive();
let (active_types, active_trait_bounds): (Vec<_>, Vec<_>) = active_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = active_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = if is_from_reflect {
quote!(#bevy_reflect_path::FromReflect #custom_bounds)
} else {
quote!(#bevy_reflect_path::Reflect #custom_bounds)
};
(ty, bounds)
})
.unzip();
let (ignored_types, ignored_trait_bounds): (Vec<_>, Vec<_>) = ignored_fields
.map(|field| {
let ty = field.data.ty.clone();
let custom_bounds = ignored_bounds(field).map(|bounds| quote!(+ #bounds));
let bounds = quote!(#FQAny + #FQSend + #FQSync #custom_bounds);
(ty, bounds)
})
.unzip();
let (parameter_types, parameter_trait_bounds): (Vec<_>, Vec<_>) =
if meta.traits().type_path_attrs().should_auto_derive() {
meta.type_path()
.generics()
.type_params()
.map(|param| {
let ident = param.ident.clone();
let bounds = quote!(#bevy_reflect_path::TypePath);
(ident, bounds)
})
.unzip()
} else {
// If we don't need to derive `TypePath` for the type parameters,
// we can skip adding its bound to the `where` clause.
(Vec::new(), Vec::new())
};
Self {
active_types: active_types.into_boxed_slice(),
active_trait_bounds: active_trait_bounds.into_boxed_slice(),
ignored_types: ignored_types.into_boxed_slice(),
ignored_trait_bounds: ignored_trait_bounds.into_boxed_slice(),
parameter_types: parameter_types.into_boxed_slice(),
parameter_trait_bounds: parameter_trait_bounds.into_boxed_slice(),
}
}
}
/// Extends the `where` clause in reflection with any additional bounds needed.
///
/// This is mostly used to add additional bounds to reflected objects with generic types.
/// For reflection purposes, we usually have:
/// * `active_trait_bounds: Reflect`
/// * `ignored_trait_bounds: Any + Send + Sync`
///
/// # Arguments
///
/// * `where_clause`: existing `where` clause present on the object to be derived
/// * `where_clause_options`: additional parameters defining which trait bounds to add to the `where` clause
///
/// # Example
///
/// The struct:
/// ```ignore
/// #[derive(Reflect)]
/// struct Foo<T, U> {
/// a: T,
/// #[reflect(ignore)]
/// b: U
/// }
/// ```
/// will have active types: `[T]` and ignored types: `[U]`
///
/// The `extend_where_clause` function will yield the following `where` clause:
/// ```ignore
/// where
/// T: Reflect, // active_trait_bounds
/// U: Any + Send + Sync, // ignored_trait_bounds
/// ```
pub(crate) fn | (
where_clause: Option<&WhereClause>,
where_clause_options: &WhereClauseOptions,
) -> proc_macro2::TokenStream {
let parameter_types = &where_clause_options.parameter_types;
let active_types = &where_clause_options.active_types;
let ignored_types = &where_clause_options.ignored_types;
let parameter_trait_bounds = &where_clause_options.parameter_trait_bounds;
let active_trait_bounds = &where_clause_options.active_trait_bounds;
let ignored_trait_bounds = &where_clause_options.ignored_trait_bounds;
let mut generic_where_clause = if let Some(where_clause) = where_clause {
let predicates = where_clause.predicates.iter();
quote! {where #(#predicates,)*}
} else if !(parameter_types.is_empty() && active_types.is_empty() && ignored_types.is_empty()) {
quote! {where}
} else {
quote!()
};
// The nested parentheses here are required to properly scope HRTBs coming
// from field types to the type itself, as the compiler will scope them to
// the whole bound by default, resulting in a failure to prove trait
// adherence.
generic_where_clause.extend(quote! {
#((#active_types): #active_trait_bounds,)*
#((#ignored_types): #ignored_trait_bounds,)*
// Leave parameter bounds to the end for more sane error messages.
#((#parameter_types): #parameter_trait_bounds,)*
});
generic_where_clause
}
impl<T> Default for ResultSifter<T> {
fn default() -> Self {
Self {
items: Vec::new(),
errors: None,
}
}
}
impl<T> ResultSifter<T> {
/// Sift the given result, combining errors if necessary.
pub fn sift(&mut self, result: Result<T, syn::Error>) {
match result {
Ok(data) => self.items.push(data),
Err(err) => {
if let Some(ref mut errors) = self.errors {
errors.combine(err);
} else {
self.errors = Some(err);
}
}
}
}
/// Associated method that provides a convenient implementation for [`Iterator::fold`].
pub fn fold(mut sifter: Self, result: Result<T, syn::Error>) -> Self {
sifter.sift(result);
sifter
}
/// Complete the sifting process and return the final result.
pub fn finish(self) -> Result<Vec<T>, syn::Error> {
if let Some(errors) = self.errors {
Err(errors)
} else {
Ok(self.items)
}
}
}
/// Converts an iterator over ignore behavior of members to a bitset of ignored members.
///
/// Takes into account the fact that always ignored (non-reflected) members are skipped.
///
/// # Example
/// ```rust,ignore
/// pub struct HelloWorld {
/// reflected_field: u32 // index: 0
///
/// #[reflect(ignore)]
/// non_reflected_field: u32 // index: N/A (not 1!)
///
/// #[reflect(skip_serializing)]
/// non_serialized_field: u32 // index: 1
/// }
/// ```
/// Would convert to the `0b01` bitset (i.e second field is NOT serialized)
///
pub(crate) fn members_to_serialization_denylist<T>(member_iter: T) -> BitSet<u32>
where
T: Iterator<Item = ReflectIgnoreBehavior>,
{
let mut bitset = BitSet::default();
member_iter.fold(0, |next_idx, member| match member {
ReflectIgnoreBehavior::IgnoreAlways => next_idx,
ReflectIgnoreBehavior::IgnoreSerialization => {
bitset.insert(next_idx);
next_idx + 1
}
ReflectIgnoreBehavior::None => next_idx + 1,
});
bitset
}
/// Turns an `Option<TokenStream>` into a `TokenStream` for an `Option`.
pub(crate) fn wrap_in_option(tokens: Option<proc_macro2::TokenStream>) -> proc_macro2::TokenStream {
match tokens {
Some(tokens) => quote! {
#FQOption::Some(#tokens)
},
None => quote! {
#FQOption::None
},
}
}
/// Contains tokens representing different kinds of string.
#[derive(Clone)]
pub(crate) enum StringExpr {
/// A string that is valid at compile time.
///
/// This is either a string literal like `"mystring"`,
/// or a string created by a macro like [`module_path`]
/// or [`concat`].
Const(proc_macro2::TokenStream),
/// A [string slice](str) that is borrowed for a `'static` lifetime.
Borrowed(proc_macro2::TokenStream),
/// An [owned string](String).
Owned(proc_macro2::TokenStream),
}
impl<T: ToString + Spanned> From<T> for StringExpr {
fn from(value: T) -> Self {
Self::from_lit(&LitStr::new(&value.to_string(), value.span()))
}
}
impl StringExpr {
/// Creates a [constant] [`StringExpr`] from a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_lit(lit: &LitStr) -> Self {
Self::Const(lit.to_token_stream())
}
/// Creates a [constant] [`StringExpr`] by interpreting a [string slice][str] as a [`struct@LitStr`].
///
/// [constant]: StringExpr::Const
pub fn from_str(string: &str) -> Self {
Self::Const(string.into_token_stream())
}
/// Returns tokens for an [owned string](String).
///
/// The returned expression will allocate unless the [`StringExpr`] is [already owned].
///
/// [already owned]: StringExpr::Owned
pub fn into_owned(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => quote! {
::std::string::ToString::to_string(#tokens)
},
Self::Owned(owned) => owned,
}
}
/// Returns tokens for a statically borrowed [string slice](str).
pub fn into_borrowed(self) -> proc_macro2::TokenStream {
match self {
Self::Const(tokens) | Self::Borrowed(tokens) => tokens,
Self::Owned(owned) => quote! {
&#owned
},
}
}
/// Appends a [`StringExpr`] to another.
///
/// If both expressions are [`StringExpr::Const`] this will use [`concat`] to merge them.
pub fn appended_by(mut self, other: StringExpr) -> Self {
if let Self::Const(tokens) = self {
if let Self::Const(more) = other {
return Self::Const(quote! {
::core::concat!(#tokens, #more)
});
}
self = Self::Const(tokens);
}
let owned = self.into_owned();
let borrowed = other.into_borrowed();
Self::Owned(quote! {
#owned + #borrowed
})
}
}
impl Default for StringExpr {
fn default() -> Self {
StringExpr::from_str("")
}
}
impl FromIterator<StringExpr> for StringExpr {
fn from_iter<T: IntoIterator<Item = StringExpr>>(iter: T) -> Self {
let mut iter = iter.into_iter();
match iter.next() {
Some(mut expr) => {
for next in iter {
expr = expr.appended_by(next);
}
expr
}
None => Default::default(),
}
}
}
| extend_where_clause | identifier_name |
views_ajax.py | # -*- coding: UTF-8 -*-
import re
import json
import datetime
import multiprocessing
import pdb
from django.db.models import Q
from django.db.utils import IntegrityError
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
if settings.ENABLE_LDAP:
from django_auth_ldap.backend import LDAPBackend
from .daoora import DaoOra
from .const import Const
from .aes_decryptor import Prpcrypt
from .models import users, workflow,ora_primary_config, ora_tables,ora_tab_privs,operation_ctl,operation_record
from sql.sendmail import MailSender
from .getnow import getNow
import logging
from sql.tasks import syncDictData,mailDba,wechatDba,dingDba
logger = logging.getLogger('default')
mailSender = MailSender()
daoora = DaoOra()
prpCryptor = Prpcrypt()
login_failure_counter = {} #登录失败锁定计数器,给loginAuthenticate用的
sqlSHA1_cache = {} #存储SQL文本与SHA1值的对应关系,尽量减少与数据库的交互次数,提高效率。格式: {工单ID1:{SQL内容1:sqlSHA1值1, SQL内容2:sqlSHA1值2},}
def log_mail_record(login_failed_message):
mail_title = 'login inception'
logger.warning(login_failed_message)
mailSender.sendEmail(mail_title, login_failed_message, getattr(settings, 'MAIL_REVIEW_DBA_ADDR'))
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def loginAuthenticate(username, password):
"""登录认证,包含一个登录失败计数器,5分钟内连续失败5次的账号,会被锁定5分钟"""
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
#服务端二次验证参数
strUsername = username
strPassword = password
if strUsername == "" or strPassword == "" or strUsername is None or strPassword is None:
result = {'status':2, 'msg':'登录用户名或密码为空,请重新输入!', 'data':''}
elif strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status':3, 'msg':'登录失败超过5次,该账号已被锁定5分钟!', 'data':''}
else:
correct_users = users.objects.filter(username=strUsername)
if len(correct_users) == 1 and correct_users[0].is_active and check_password(strPassword, correct_users[0].password) == True:
#调用了django内置函数check_password函数检测输入的密码是否与django默认的PBKDF2算法相匹配
if strUsername in login_failure_counter:
#如果登录失败计数器中存在该用户名,则清除之
login_failure_counter.pop(strUsername)
result = {'status':0, 'msg':'ok', 'data':''}
else:
if strUsername not in login_failure_counter:
#第一次登录失败,登录失败计数器中不存在该用户,则创建一个该用户的计数器
login_failure_counter[strUsername] = {"cnt":1, "last_failure_time":datetime.datetime.now()}
else:
if (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
login_failure_counter[strUsername]["cnt"] += 1
else:
#上一次登录失败时间早于5分钟前,则重新计数。以达到超过5分钟自动解锁的目的。
login_failure_counter[strUsername]["cnt"] = 1
login_failure_counter[strUsername]["last_failure_time"] = datetime.datetime.now()
if login_failure_counter[strUsername]["cnt"]%10==0:
log_mail_record('user:{},login failed, fail count:{}'.format(strUsername,login_failure_counter[strUsername]["cnt"]))
result = {'status':1, 'msg':'用户名或密码错误,请重新输入!', 'data':''}
return result
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def authenticateEntry(request):
"""接收http请求,然后把请求中的用户名密码传给loginAuthenticate去验证"""
if request.is_ajax():
strUsername = request.POST.get('username')
strPassword = request.POST.get('password')
else:
strUsername = request.POST['username']
strPassword = request.POST['password']
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
if settings.ENABLE_LDAP:
ldap = LDAPBackend()
try:
user = ldap.authenticate(username=strUsername, password=strPassword)
except Exception as err:
result = {'msg': 'ldap authorization failed'}
return HttpResponse(json.dumps(result), content_type='application/json')
if strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[strUsername][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpResponse(json.dumps(result), content_type='application/json')
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}} |
@csrf_exempt
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None :
context = {"status":-1 ,'msg': 'workflowId参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status":workflowStatus, "msg":"", "data":""}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
loginUser = request.session.get('login_username', False)
workflowDetail = workflow.objects.get(id=workflowId)
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
#服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status":-1, "msg":'当前工单状态不是"执行中",请刷新当前页面!', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
if loginUser is None or loginUser not in listAllReviewMen:
context = {"status":-1 ,'msg': '当前登录用户不是审核人,请重新登录.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
optResult = inceptionDao.stopOscProgress(sqlSHA1)
else:
optResult = {"status":4, "msg":"不是由pt-OSC执行的", "data":""}
return HttpRespense(json.dumps(optResult), content_type='application/json')
@csrf_exempt
def manExec(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],))
except Exception:
result = {'msg': '已经在处理'}
return HttpResponse(json.dumps(result), content_type='application/json')
try:
reviewMen = json.loads(workflowDetail.review_man)
except Exception:
reviewMen = workflowDetail.review_man
if not loginUser in reviewMen:
result = {'msg': '你不在审核人之列'}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.status = Const.workflowStatus['manexec']
workflowDetail.operator = loginUser
try:
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '更改状态为手工执行'
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
#获取当前请求url
def _getDetailUrl(request):
scheme = request.scheme
#host = request.META['HTTP_HOST']
host = getattr(settings,'WAN_HOST')
return "%s://%s/detail/" % (scheme, host)
@csrf_exempt
def manFinish(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
executeStatus = request.POST['status']
executeResult = request.POST['content']
workflowDetail = workflow.objects.get(id=workflowId)
if loginUser != workflowDetail.operator:
result = {"status":-1,"msg":"需要处理人操作"}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.execute_result = executeResult
if executeStatus == '0':
workflowDetail.status = Const.workflowStatus['manexcept']
elif executeStatus == '1':
workflowDetail.status = Const.workflowStatus['manfinish']
try:
workflowDetail.operator = loginUser
workflowDetail.finish_time = getNow()
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '保存成功'
#如果执行完毕了,则根据settings.py里的配置决定是否给提交者和DBA一封邮件提醒.DBA需要知晓审核并执行过的单子
url = _getDetailUrl(request) + str(workflowId) + '/'
#给主、副审核人,申请人,DBA各发一封邮件
engineer = workflowDetail.engineer
operator = workflowDetail.operator
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单执行完毕 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n操作人:" + operator + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent,objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privMod(request,operation):
loginUser = request.session.get('login_username', False)
hasTableId = [tab.table_id for tab in ora_tab_privs.objects.filter(username = request.GET.get('username'))]
if operation == 'add':
tables = ora_tables.objects.all().exclude(id__in=hasTableId)
elif operation == 'delete':
tables = ora_tables.objects.all().filter(id__in=hasTableId)
clusterName = request.GET.get('cluster_name')
schema = request.GET.get('schema')
cluster_list = []
schema_list = []
table_list = []
table_dict = {}
if not clusterName and not schema:
cluster_list = [primary.cluster_name for primary in ora_primary_config.objects.all()]
cluster_list.sort()
elif clusterName and not schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
schema_list = list(set([table.schema_name for table in tables.filter(instance_id=instanceId)]))
schema_list.sort()
elif clusterName and schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
table_list = [table.table for table in tables.filter(instance_id=instanceId).filter(schema_name=schema)]
table_dict = {}
for table in tables.filter(instance_id=instanceId).filter(schema_name=schema).order_by('table'):
table_dict[table.id] = table.table
result = {'cluster_list':cluster_list,'schema_list':schema_list,'table_list':table_dict}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privCommit(request,operation):
loginUser = request.session.get('login_username', False)
if loginUser != 'admin':
context = {'errMsg': '无权限访问该页面'}
return render(request, 'error.html', context)
username = request.POST.get('username')
table_list = request.POST.get('table_list')
if table_list:
oriTabList = json.loads(table_list)
else:
status = 'error'
msg = '选择为空'
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
extra_inst_list = request.POST.get('extra_inst_list')
if extra_inst_list:
extraInstList = json.loads(extra_inst_list)
#if len(extraInstList) > 0:
extraTabList = []
for extraInst in extraInstList:
instance_id = ora_primary_config.objects.get(cluster_name=extraInst).id
for i in range(0,len(oriTabList)):
table_id = oriTabList[i]
oraTab = ora_tables.objects.get(id = int(table_id))
try:
extraId = ora_tables.objects.get(instance_id = instance_id,schema_name=oraTab.schema_name,table=oraTab.table).id
except Exception as err:
pass
else:
extraTabList.append(extraId)
tabList = oriTabList + extraTabList
status = 'saved'
msg = '保存成功'
for table_id in tabList:
try:
if operation == 'add':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if p.count() != 0:
status = 'save failed'
msg = '请勿重复保存'
else:
ora_tab_privs(username = username,table = ora_tables(id = int(table_id))).save()
elif operation == 'delete':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if len(p) == 0:
status = 'save failed'
msg = '请勿重复删除'
else:
p.delete()
except IntegrityError:
status = 'save failed'
msg = '请勿重复保存'
except Exception as e:
print(str(e))
status = 'save failed'
msg = '有数据保存/删除失败'
else:
continue
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getResult(request):
logon_user = request.session.get('login_username', False)
clusterName = request.POST.get('cluster_name')
sqlContent = request.POST.get('sql_content')
finalStatus,msg,headerList,queryResult = daoora.query(logon_user,clusterName,sqlContent)
paginator = Paginator(queryResult, 10)
after_range_num = 5
before_range_num = 4
try:
page = int(request.GET.get('page','1'))
if page < 1:
page = 1
except ValueError:
page = 1
try:
queryResultP = paginator.page(page)
except (EmptyPage,InvalidPage,PageNotAnInteger):
queryResultP = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+before_range_num]
else:
page_range = paginator.page_range[0:int(page)+before_range_num]
result = {'final_status':finalStatus,'msg':msg,'header_list':headerList,'query_result':queryResultP}
return HttpResponse(locals(), content_type='application/json') | elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status":4, "msg":"该行SQL不是由pt-OSC执行的", "data":""}
else:
pctResult = {"status":-2, "msg":"整个工单不由pt-OSC执行", "data":""}
return HttpResponse(json.dumps(pctResult), content_type='application/json') | random_line_split |
views_ajax.py | # -*- coding: UTF-8 -*-
import re
import json
import datetime
import multiprocessing
import pdb
from django.db.models import Q
from django.db.utils import IntegrityError
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
if settings.ENABLE_LDAP:
from django_auth_ldap.backend import LDAPBackend
from .daoora import DaoOra
from .const import Const
from .aes_decryptor import Prpcrypt
from .models import users, workflow,ora_primary_config, ora_tables,ora_tab_privs,operation_ctl,operation_record
from sql.sendmail import MailSender
from .getnow import getNow
import logging
from sql.tasks import syncDictData,mailDba,wechatDba,dingDba
logger = logging.getLogger('default')
mailSender = MailSender()
daoora = DaoOra()
prpCryptor = Prpcrypt()
login_failure_counter = {} #登录失败锁定计数器,给loginAuthenticate用的
sqlSHA1_cache = {} #存储SQL文本与SHA1值的对应关系,尽量减少与数据库的交互次数,提高效率。格式: {工单ID1:{SQL内容1:sqlSHA1值1, SQL内容2:sqlSHA1值2},}
def log_mail_record(login_failed_message):
mail_title = 'login inception'
logger.warning(login_failed_message)
mailSender.sendEmail(mail_title, login_failed_message, getattr(settings, 'MAIL_REVIEW_DBA_ADDR'))
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def loginAuthenticate(username, password):
"""登录认证,包含一个登录失败计数器,5分钟内连续失败5次的账号,会被锁定5分钟"""
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
#服务端二次验证参数
strUsername = username
strPassword = password
if strUsername == "" or strPassword == "" or strUsername is None or strPassword is None:
result = {'status':2, 'msg':'登录用户名或密码为空,请重新输入!', 'data':''}
elif strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status':3, 'msg':'登录失败超过5次,该账号已被锁定5分钟!', 'data':''}
else:
correct_users = users.objects.filter(username=strUsername)
if len(correct_users) == 1 and correct_users[0].is_active and check_password(strPassword, correct_users[0].password) == True:
#调用了django内置函数check_password函数检测输入的密码是否与django默认的PBKDF2算法相匹配
if strUsername in login_failure_counter:
#如果登录失败计数器中存在该用户名,则清除之
login_failure_counter.pop(strUsername)
result = {'status':0, 'msg':'ok', 'data':''}
else:
if strUsername not in login_failure_counter:
#第一次登录失败,登录失败计数器中不存在该用户,则创建一个该用户的计数器
login_failure_counter[strUsername] = {"cnt":1, "last_failure_time":datetime.datetime.now()}
else:
if (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
login_failure_counter[strUsername]["cnt"] += 1
else:
#上一次登录失败时间早于5分钟前,则重新计数。以达到超过5分钟自动解锁的目的。
login_failure_counter[strUsername]["cnt"] = 1
login_failure_counter[strUsername]["last_failure_time"] = datetime.datetime.now()
if login_failure_counter[strUsername]["cnt"]%10==0:
log_mail_record('user:{},login failed, fail count:{}'.format(strUsername,login_failure_counter[strUsername]["cnt"]))
result = {'status':1, 'msg':'用户名或密码错误,请重新输入!', 'data':''}
return result
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def authenticateEntry(request):
"""接收http请求,然后把请求中的用户名密码传给loginAuthenticate去验证"""
if request.is_ajax():
strUsername = request.POST.get('username')
strPassword = request.POST.get('password')
else:
strUsername = request.POST['username']
strPassword = request.POST['password']
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
if settings.ENABLE_LDAP:
ldap = LDAPBackend()
try:
user = ldap.authenticate(username=strUsername, password=strPassword)
except Exception as err:
result = {'msg': 'l | ] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}}
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status":4, "msg":"该行SQL不是由pt-OSC执行的", "data":""}
else:
pctResult = {"status":-2, "msg":"整个工单不由pt-OSC执行", "data":""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
@csrf_exempt
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None :
context = {"status":-1 ,'msg': 'workflowId参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status":workflowStatus, "msg":"", "data":""}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
loginUser = request.session.get('login_username', False)
workflowDetail = workflow.objects.get(id=workflowId)
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
#服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status":-1, "msg":'当前工单状态不是"执行中",请刷新当前页面!', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
if loginUser is None or loginUser not in listAllReviewMen:
context = {"status":-1 ,'msg': '当前登录用户不是审核人,请重新登录.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
optResult = inceptionDao.stopOscProgress(sqlSHA1)
else:
optResult = {"status":4, "msg":"不是由pt-OSC执行的", "data":""}
return HttpRespense(json.dumps(optResult), content_type='application/json')
@csrf_exempt
def manExec(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],))
except Exception:
result = {'msg': '已经在处理'}
return HttpResponse(json.dumps(result), content_type='application/json')
try:
reviewMen = json.loads(workflowDetail.review_man)
except Exception:
reviewMen = workflowDetail.review_man
if not loginUser in reviewMen:
result = {'msg': '你不在审核人之列'}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.status = Const.workflowStatus['manexec']
workflowDetail.operator = loginUser
try:
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '更改状态为手工执行'
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
#获取当前请求url
def _getDetailUrl(request):
scheme = request.scheme
#host = request.META['HTTP_HOST']
host = getattr(settings,'WAN_HOST')
return "%s://%s/detail/" % (scheme, host)
@csrf_exempt
def manFinish(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
executeStatus = request.POST['status']
executeResult = request.POST['content']
workflowDetail = workflow.objects.get(id=workflowId)
if loginUser != workflowDetail.operator:
result = {"status":-1,"msg":"需要处理人操作"}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.execute_result = executeResult
if executeStatus == '0':
workflowDetail.status = Const.workflowStatus['manexcept']
elif executeStatus == '1':
workflowDetail.status = Const.workflowStatus['manfinish']
try:
workflowDetail.operator = loginUser
workflowDetail.finish_time = getNow()
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '保存成功'
#如果执行完毕了,则根据settings.py里的配置决定是否给提交者和DBA一封邮件提醒.DBA需要知晓审核并执行过的单子
url = _getDetailUrl(request) + str(workflowId) + '/'
#给主、副审核人,申请人,DBA各发一封邮件
engineer = workflowDetail.engineer
operator = workflowDetail.operator
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单执行完毕 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n操作人:" + operator + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent,objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privMod(request,operation):
loginUser = request.session.get('login_username', False)
hasTableId = [tab.table_id for tab in ora_tab_privs.objects.filter(username = request.GET.get('username'))]
if operation == 'add':
tables = ora_tables.objects.all().exclude(id__in=hasTableId)
elif operation == 'delete':
tables = ora_tables.objects.all().filter(id__in=hasTableId)
clusterName = request.GET.get('cluster_name')
schema = request.GET.get('schema')
cluster_list = []
schema_list = []
table_list = []
table_dict = {}
if not clusterName and not schema:
cluster_list = [primary.cluster_name for primary in ora_primary_config.objects.all()]
cluster_list.sort()
elif clusterName and not schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
schema_list = list(set([table.schema_name for table in tables.filter(instance_id=instanceId)]))
schema_list.sort()
elif clusterName and schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
table_list = [table.table for table in tables.filter(instance_id=instanceId).filter(schema_name=schema)]
table_dict = {}
for table in tables.filter(instance_id=instanceId).filter(schema_name=schema).order_by('table'):
table_dict[table.id] = table.table
result = {'cluster_list':cluster_list,'schema_list':schema_list,'table_list':table_dict}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privCommit(request,operation):
loginUser = request.session.get('login_username', False)
if loginUser != 'admin':
context = {'errMsg': '无权限访问该页面'}
return render(request, 'error.html', context)
username = request.POST.get('username')
table_list = request.POST.get('table_list')
if table_list:
oriTabList = json.loads(table_list)
else:
status = 'error'
msg = '选择为空'
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
extra_inst_list = request.POST.get('extra_inst_list')
if extra_inst_list:
extraInstList = json.loads(extra_inst_list)
#if len(extraInstList) > 0:
extraTabList = []
for extraInst in extraInstList:
instance_id = ora_primary_config.objects.get(cluster_name=extraInst).id
for i in range(0,len(oriTabList)):
table_id = oriTabList[i]
oraTab = ora_tables.objects.get(id = int(table_id))
try:
extraId = ora_tables.objects.get(instance_id = instance_id,schema_name=oraTab.schema_name,table=oraTab.table).id
except Exception as err:
pass
else:
extraTabList.append(extraId)
tabList = oriTabList + extraTabList
status = 'saved'
msg = '保存成功'
for table_id in tabList:
try:
if operation == 'add':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if p.count() != 0:
status = 'save failed'
msg = '请勿重复保存'
else:
ora_tab_privs(username = username,table = ora_tables(id = int(table_id))).save()
elif operation == 'delete':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if len(p) == 0:
status = 'save failed'
msg = '请勿重复删除'
else:
p.delete()
except IntegrityError:
status = 'save failed'
msg = '请勿重复保存'
except Exception as e:
print(str(e))
status = 'save failed'
msg = '有数据保存/删除失败'
else:
continue
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getResult(request):
logon_user = request.session.get('login_username', False)
clusterName = request.POST.get('cluster_name')
sqlContent = request.POST.get('sql_content')
finalStatus,msg,headerList,queryResult = daoora.query(logon_user,clusterName,sqlContent)
paginator = Paginator(queryResult, 10)
after_range_num = 5
before_range_num = 4
try:
page = int(request.GET.get('page','1'))
if page < 1:
page = 1
except ValueError:
page = 1
try:
queryResultP = paginator.page(page)
except (EmptyPage,InvalidPage,PageNotAnInteger):
queryResultP = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+before_range_num]
else:
page_range = paginator.page_range[0:int(page)+before_range_num]
result = {'final_status':finalStatus,'msg':msg,'header_list':headerList,'query_result':queryResultP}
return HttpResponse(locals(), content_type='application/json')
| dap authorization failed'}
return HttpResponse(json.dumps(result), content_type='application/json')
if strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[strUsername][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpResponse(json.dumps(result), content_type='application/json')
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1 | identifier_body |
views_ajax.py | # -*- coding: UTF-8 -*-
import re
import json
import datetime
import multiprocessing
import pdb
from django.db.models import Q
from django.db.utils import IntegrityError
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
if settings.ENABLE_LDAP:
from django_auth_ldap.backend import LDAPBackend
from .daoora import DaoOra
from .const import Const
from .aes_decryptor import Prpcrypt
from .models import users, workflow,ora_primary_config, ora_tables,ora_tab_privs,operation_ctl,operation_record
from sql.sendmail import MailSender
from .getnow import getNow
import logging
from sql.tasks import syncDictData,mailDba,wechatDba,dingDba
logger = logging.getLogger('default')
mailSender = MailSender()
daoora = DaoOra()
prpCryptor = Prpcrypt()
login_failure_counter = {} #登录失败锁定计数器,给loginAuthenticate用的
sqlSHA1_cache = {} #存储SQL文本与SHA1值的对应关系,尽量减少与数据库的交互次数,提高效率。格式: {工单ID1:{SQL内容1:sqlSHA1值1, SQL内容2:sqlSHA1值2},}
def log_mail_record(login_failed_message):
mail_title = 'login inception'
logger.warning(login_failed_message)
mailSender.sendEmail(mail_title, login_failed_message, getattr(settings, 'MAIL_REVIEW_DBA_ADDR'))
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def loginAuthenticate(username, password):
"""登录认证,包含一个登录失败计数器,5分钟内连续失败5次的账号,会被锁定5分钟"""
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
#服务端二次验证参数
strUsername = username
strPassword = password
if strUsername == "" or strPassword == "" or strUsername is None or strPassword is None:
result = {'status':2, 'msg':'登录用户名或密码为空,请重新输入!', 'data':''}
elif strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status':3, 'msg':'登录失败超过5次,该账号已被锁定5分钟!', 'data':''}
else:
correct_users = users.objects.filter(username=strUsername)
if len(correct_users) == 1 and correct_users[0].is_active and check_password(strPassword, correct_users[0].password) == True:
#调用了django内置函数check_password函数检测输入的密码是否与django默认的PBKDF2算法相匹配
if strUsername in login_failure_counter:
#如果登录失败计数器中存在该用户名,则清除之
login_failure_counter.pop(strUsername)
result = {'status':0, 'msg':'ok', 'data':''}
else:
if strUsername not in login_failure_counter:
#第一次登录失败,登录失败计数器中不存在该用户,则创建一个该用户的计数器
login_failure_counter[strUsername] = {"cnt":1, "last_failure_time":datetime.datetime.now()}
else:
if (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
login_failure_counter[strUsername]["cnt"] += 1
else:
#上一次登录失败时间早于5分钟前,则重新计数。以达到超过5分钟自动解锁的目的。
login_failure_counter[strUsername]["cnt"] = 1
login_failure_counter[strUsername]["last_failure_time"] = datetime.datetime.now()
if login_failure_counter[strUsername]["cnt"]%10==0:
log_mail_record('user:{},login failed, fail count:{}'.format(strUsername,login_failure_counter[strUsername]["cnt"]))
result = {'status':1, 'msg':'用户名或密码错误,请重新输入!', 'data':''}
return result
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def authenticateEntry(request):
"""接收http请求,然后把请求中的用户名密码传给loginAuthenticate去验证"""
if request.is_ajax():
strUsername = request.POST.get('username')
strPassword = request.POST.get('password')
else:
strUsername = request.POST['username']
strPassword = request.POST['password']
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
if settings.ENABLE_LDAP:
ldap = LDAPBackend()
try:
user = ldap.authenticate(username=strUsername, password=strPassword)
except Exception as err:
result = {'msg': 'ldap authorization failed'}
return HttpResponse(json.dumps(result), content_type='application/json')
if strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[strUsername][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpRespo | sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}}
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status":4, "msg":"该行SQL不是由pt-OSC执行的", "data":""}
else:
pctResult = {"status":-2, "msg":"整个工单不由pt-OSC执行", "data":""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
@csrf_exempt
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None :
context = {"status":-1 ,'msg': 'workflowId参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status":workflowStatus, "msg":"", "data":""}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
loginUser = request.session.get('login_username', False)
workflowDetail = workflow.objects.get(id=workflowId)
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
#服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status":-1, "msg":'当前工单状态不是"执行中",请刷新当前页面!', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
if loginUser is None or loginUser not in listAllReviewMen:
context = {"status":-1 ,'msg': '当前登录用户不是审核人,请重新登录.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
optResult = inceptionDao.stopOscProgress(sqlSHA1)
else:
optResult = {"status":4, "msg":"不是由pt-OSC执行的", "data":""}
return HttpRespense(json.dumps(optResult), content_type='application/json')
@csrf_exempt
def manExec(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],))
except Exception:
result = {'msg': '已经在处理'}
return HttpResponse(json.dumps(result), content_type='application/json')
try:
reviewMen = json.loads(workflowDetail.review_man)
except Exception:
reviewMen = workflowDetail.review_man
if not loginUser in reviewMen:
result = {'msg': '你不在审核人之列'}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.status = Const.workflowStatus['manexec']
workflowDetail.operator = loginUser
try:
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '更改状态为手工执行'
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
#获取当前请求url
def _getDetailUrl(request):
scheme = request.scheme
#host = request.META['HTTP_HOST']
host = getattr(settings,'WAN_HOST')
return "%s://%s/detail/" % (scheme, host)
@csrf_exempt
def manFinish(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
executeStatus = request.POST['status']
executeResult = request.POST['content']
workflowDetail = workflow.objects.get(id=workflowId)
if loginUser != workflowDetail.operator:
result = {"status":-1,"msg":"需要处理人操作"}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.execute_result = executeResult
if executeStatus == '0':
workflowDetail.status = Const.workflowStatus['manexcept']
elif executeStatus == '1':
workflowDetail.status = Const.workflowStatus['manfinish']
try:
workflowDetail.operator = loginUser
workflowDetail.finish_time = getNow()
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '保存成功'
#如果执行完毕了,则根据settings.py里的配置决定是否给提交者和DBA一封邮件提醒.DBA需要知晓审核并执行过的单子
url = _getDetailUrl(request) + str(workflowId) + '/'
#给主、副审核人,申请人,DBA各发一封邮件
engineer = workflowDetail.engineer
operator = workflowDetail.operator
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单执行完毕 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n操作人:" + operator + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent,objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privMod(request,operation):
loginUser = request.session.get('login_username', False)
hasTableId = [tab.table_id for tab in ora_tab_privs.objects.filter(username = request.GET.get('username'))]
if operation == 'add':
tables = ora_tables.objects.all().exclude(id__in=hasTableId)
elif operation == 'delete':
tables = ora_tables.objects.all().filter(id__in=hasTableId)
clusterName = request.GET.get('cluster_name')
schema = request.GET.get('schema')
cluster_list = []
schema_list = []
table_list = []
table_dict = {}
if not clusterName and not schema:
cluster_list = [primary.cluster_name for primary in ora_primary_config.objects.all()]
cluster_list.sort()
elif clusterName and not schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
schema_list = list(set([table.schema_name for table in tables.filter(instance_id=instanceId)]))
schema_list.sort()
elif clusterName and schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
table_list = [table.table for table in tables.filter(instance_id=instanceId).filter(schema_name=schema)]
table_dict = {}
for table in tables.filter(instance_id=instanceId).filter(schema_name=schema).order_by('table'):
table_dict[table.id] = table.table
result = {'cluster_list':cluster_list,'schema_list':schema_list,'table_list':table_dict}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privCommit(request,operation):
loginUser = request.session.get('login_username', False)
if loginUser != 'admin':
context = {'errMsg': '无权限访问该页面'}
return render(request, 'error.html', context)
username = request.POST.get('username')
table_list = request.POST.get('table_list')
if table_list:
oriTabList = json.loads(table_list)
else:
status = 'error'
msg = '选择为空'
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
extra_inst_list = request.POST.get('extra_inst_list')
if extra_inst_list:
extraInstList = json.loads(extra_inst_list)
#if len(extraInstList) > 0:
extraTabList = []
for extraInst in extraInstList:
instance_id = ora_primary_config.objects.get(cluster_name=extraInst).id
for i in range(0,len(oriTabList)):
table_id = oriTabList[i]
oraTab = ora_tables.objects.get(id = int(table_id))
try:
extraId = ora_tables.objects.get(instance_id = instance_id,schema_name=oraTab.schema_name,table=oraTab.table).id
except Exception as err:
pass
else:
extraTabList.append(extraId)
tabList = oriTabList + extraTabList
status = 'saved'
msg = '保存成功'
for table_id in tabList:
try:
if operation == 'add':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if p.count() != 0:
status = 'save failed'
msg = '请勿重复保存'
else:
ora_tab_privs(username = username,table = ora_tables(id = int(table_id))).save()
elif operation == 'delete':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if len(p) == 0:
status = 'save failed'
msg = '请勿重复删除'
else:
p.delete()
except IntegrityError:
status = 'save failed'
msg = '请勿重复保存'
except Exception as e:
print(str(e))
status = 'save failed'
msg = '有数据保存/删除失败'
else:
continue
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getResult(request):
logon_user = request.session.get('login_username', False)
clusterName = request.POST.get('cluster_name')
sqlContent = request.POST.get('sql_content')
finalStatus,msg,headerList,queryResult = daoora.query(logon_user,clusterName,sqlContent)
paginator = Paginator(queryResult, 10)
after_range_num = 5
before_range_num = 4
try:
page = int(request.GET.get('page','1'))
if page < 1:
page = 1
except ValueError:
page = 1
try:
queryResultP = paginator.page(page)
except (EmptyPage,InvalidPage,PageNotAnInteger):
queryResultP = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+before_range_num]
else:
page_range = paginator.page_range[0:int(page)+before_range_num]
result = {'final_status':finalStatus,'msg':msg,'header_list':headerList,'query_result':queryResultP}
return HttpResponse(locals(), content_type='application/json')
| nse(json.dumps(result), content_type='application/json')
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
| conditional_block |
views_ajax.py | # -*- coding: UTF-8 -*-
import re
import json
import datetime
import multiprocessing
import pdb
from django.db.models import Q
from django.db.utils import IntegrityError
from django.db import transaction
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger
if settings.ENABLE_LDAP:
from django_auth_ldap.backend import LDAPBackend
from .daoora import DaoOra
from .const import Const
from .aes_decryptor import Prpcrypt
from .models import users, workflow,ora_primary_config, ora_tables,ora_tab_privs,operation_ctl,operation_record
from sql.sendmail import MailSender
from .getnow import getNow
import logging
from sql.tasks import syncDictData,mailDba,wechatDba,dingDba
logger = logging.getLogger('default')
mailSender = MailSender()
daoora = DaoOra()
prpCryptor = Prpcrypt()
login_failure_counter = {} #登录失败锁定计数器,给loginAuthenticate用的
sqlSHA1_cache = {} #存储SQL文本与SHA1值的对应关系,尽量减少与数据库的交互次数,提高效率。格式: {工单ID1:{SQL内容1:sqlSHA1值1, SQL内容2:sqlSHA1值2},}
def log_mail_record(login_failed_message):
mail_title = 'login inception'
logger.warning(login_failed_message)
mailSender.sendEmail(mail_title, login_failed_message, getattr(settings, 'MAIL_REVIEW_DBA_ADDR'))
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def loginAuthenticate(username, password):
"""登录认证,包含一个登录失败计数器,5分钟内连续失败5次的账号,会被锁定5分钟"""
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
#服务端二次验证参数
strUsername = username
strPassword = password
if strUsername == "" or strPassword == "" or strUsername is None or strPassword is None:
result = {'status':2, 'msg':'登录用户名或密码为空,请重新输入!', 'data':''}
elif strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status':3, 'msg':'登录失败超过5次,该账号已被锁定5分钟!', 'data':''}
else:
correct_users = users.objects.filter(username=strUsername)
if len(correct_users) == 1 and correct_users[0].is_active and check_password(strPassword, correct_users[0].password) == True:
#调用了django内置函数check_password函数检测输入的密码是否与django默认的PBKDF2算法相匹配
if strUsername in login_failure_counter:
#如果登录失败计数器中存在该用户名,则清除之
login_failure_counter.pop(strUsername)
result = {'status':0, 'msg':'ok', 'data':''}
else:
if strUsername not in login_failure_counter:
#第一次登录失败,登录失败计数器中不存在该用户,则创建一个该用户的计数器
login_failure_counter[strUsername] = {"cnt":1, "last_failure_time":datetime.datetime.now()}
else:
if (datetime.datetime.now() - login_failure_counter[strUsername]["last_failure_time"]).seconds <= lockTimeThreshold:
login_failure_counter[strUsername]["cnt"] += 1
else:
#上一次登录失败时间早于5分钟前,则重新计数。以达到超过5分钟自动解锁的目的。
login_failure_counter[strUsername]["cnt"] = 1
login_failure_counter[strUsername]["last_failure_time"] = datetime.datetime.now()
if login_failure_counter[strUsername]["cnt"]%10==0:
log_mail_record('user:{},login failed, fail count:{}'.format(strUsername,login_failure_counter[strUsername]["cnt"]))
result = {'status':1, 'msg':'用户名或密码错误,请重新输入!', 'data':''}
return result
#ajax接口,登录页面调用,用来验证用户名密码
@csrf_exempt
def authenticateEntry(request):
"""接收http请求,然后把请求中的用户名密码传给loginAuthenticate去验证"""
if request.is_ajax():
strUsername = request.POST.get('username')
strPassword = request.POST.get('password')
else:
strUsername = request.POST['username']
strPassword = request.POST['password']
lockCntThreshold = settings.LOCK_CNT_THRESHOLD
lockTimeThreshold = settings.LOCK_TIME_THRESHOLD
if settings.ENABLE_LDAP:
ldap = LDAPBackend()
try:
user = ldap.authenticate(username=strUsername, password=strPassword)
except Exception as err:
result = {'msg': 'ldap authorization failed'}
return HttpResponse(json.dumps(result), content_type='application/json')
if strUsername in login_failure_counter and login_failure_counter[strUsername]["cnt"] >= lockCntThreshold and (
datetime.datetime.now() - login_failure_counter[strUsername][
"last_failure_time"]).seconds <= lockTimeThreshold:
log_mail_record('user:{},login failed, account locking...'.format(strUsername))
result = {'status': 3, 'msg': '登录失败超过5次,该账号已被锁定5分钟!', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if user and user.is_active:
request.session['login_username'] = strUsername
result = {'status': 0, 'msg': 'ok', 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
result = loginAuthenticate(strUsername, strPassword)
if result['status'] == 0:
request.session['login_username'] = strUsername
return HttpResponse(json.dumps(result), content_type='application/json')
#Oracle SQL简单审核
@csrf_exempt
def orasimplecheck(request):
if request.is_ajax():
sqlContent = request.POST.get('sql_content')
clusterName = request.POST.get('cluster_name')
else:
sqlContent = request.POST['sql_content']
clusterName = request.POST['cluster_name']
finalResult = {'status':'ok', 'msg':'检测通过', 'data':[]}
#服务器端参数验证
if sqlContent is None or clusterName is None:
finalResult['status'] = 'error'
finalResult['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip()
if sqlContent[-1] != ";":
finalResult['status'] = 'error'
finalResult['msg'] = 'Oracle SQL语句结尾没有以;结尾,请重新修改并提交!'
return HttpResponse(json.dumps(finalResult), content_type='application/json')
sqlContent = sqlContent.rstrip(';')
#使用explain plan进行自动审核
try:
resultList = daoora.sqlAutoreview(sqlContent, clusterName)
except Exception as err:
finalResult['status'] = 'error'
finalResult['msg'] = str(err)
else:
for result in resultList:
if result['stage'] != 'CHECKED':
finalResult['status'] = 'error'
finalResult['msg'] = result['errormessage']+' -- '+result['sql']
#return HttpResponse(json.dumps(finalResult), content_type='application/json')
#要把result转成JSON存进数据库里,方便SQL单子详细信息展示
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步表数据字典
@csrf_exempt
def syncoradict(request):
primaries = ora_primary_config.objects.all().order_by('cluster_name')
listCluster = [primary.cluster_name for primary in primaries]
clusterListSync = request.POST.get('cluster_list_sync')
if clusterListSync:
clusterListSync=json.loads(clusterListSync)
ctl = operation_ctl.objects.get(data_type='数据字典' ,opt_type='同步')
if ctl.status == '进行中':
finalResult = {'status':'error','msg':'有任务进行中'}
else:
ctl.status='进行中'
ctl.save()
syncDictData.delay(clusterListSync)
finalResult = {'status':'ok'}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
finalResult = {'listCluster':listCluster}
return HttpResponse(json.dumps(finalResult), content_type='application/json')
#同步ldap用户到数据库
@csrf_exempt
def syncldapuser(request):
if not settings.ENABLE_LDAP:
result = {'msg': 'LDAP支持未开启'}
return HttpResponse(json.dumps(result), content_type='application/json')
ldapback = LDAPBackend()
ldap = ldapback.ldap
ldapconn = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
tls = getattr(settings, 'AUTH_LDAP_START_TLS', None)
if tls:
ldapconn.start_tls_s()
binddn = settings.AUTH_LDAP_BIND_DN
bind_password = settings.AUTH_LDAP_BIND_PASSWORD
basedn = settings.AUTH_LDAP_BASEDN
ldapconn.simple_bind_s(binddn, bind_password)
ldapusers = ldapconn.search_s(basedn, ldap.SCOPE_SUBTREE, 'objectclass=*', attrlist=settings.AUTH_LDAP_USER_ATTRLIST)
#ldap中username存在条目的第一个元素的uid中,定义的username_field不再使用,改为截取user_tag
display_field = settings.AUTH_LDAP_USER_ATTR_MAP['display']
email_field = settings.AUTH_LDAP_USER_ATTR_MAP['email']
count = 0
try:
for user in ldapusers:
user_tag=user[0].split(',')
user_attr = user[1]
if user_tag and user_attr:
username = user_tag[0][user_tag[0].find('=')+1:].encode()
display = user_attr.get(display_field,['none'.encode(),])[0]
email = user_attr.get(email_field,['none'.encode(),])[0]
already_user = users.objects.filter(username=username.decode()).filter(is_ldapuser=True)
if len(already_user) == 0:
u = users(username=username.decode(), display=display.decode(), email=email.decode(), is_ldapuser=True,is_active=0)
u.save()
count += 1
except Exception as err:
result = {'msg': '用户{0}导入错误:{1}'.format(username,str(err))}
return HttpResponse(json.dumps(result))
else:
result = {'msg': '同步{}个用户.'.format(count)}
return HttpResponse(json.dumps(result), content_type='application/json')
#请求图表数据
@csrf_exempt
def getMonthCharts(request):
result = daoora.getWorkChartsByMonth()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getPersonCharts(request):
result = daoora.getWorkChartsByPerson()
return HttpResponse(json.dumps(result), content_type='application/json')
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(workflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
@csrf_exempt
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
result = inceptionDao.getOscPercent(sqlSHA1) #成功获取到SHA1值,去inception里面查询进度
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = workflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID-1:
if dictSHA1[sqlID] in listExecResult[sqlID-1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status":0, "msg":"ok", "data":{"percent":100, "timeRemained":""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status":-3, "msg":"进度未知", "data":{"percent":-100, "timeRemained":""}}
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status":4, "msg":"该行SQL不是由pt-OSC执行的", "data":""}
else:
pctResult = {"status":-2, "msg":"整个工单不由pt-OSC执行", "data":""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
@csrf_exempt
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None :
context = {"status":-1 ,'msg': 'workflowId参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(workflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status":workflowStatus, "msg":"", "data":""}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status":-1 ,'msg': 'workflowId或sqlID参数为空.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
loginUser = request.session.get('login_username', False)
workflowDetail = workflow.objects.get(id=workflowId)
try:
listAllReviewMen = json.loads(workflowDetail.review_man)
except ValueError:
listAllReviewMen = (workflowDetail.review_man, )
#服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status":-1, "msg":'当前工单状态不是"执行中",请刷新当前页面!', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
if loginUser is None or loginUser not in listAllReviewMen:
context = {"status":-1 ,'msg': '当前登录用户不是审核人,请重新登录.', "data":""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
optResult = inceptionDao.stopOscProgress(sqlSHA1)
else:
optResult = {"status":4, "msg":"不是由pt-OSC执行的", "data":""}
return HttpRespense(json.dumps(optResult), content_type='application/json')
@csrf_exempt
def manExec(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
with transaction.atomic():
try:
workflowDetail = workflow.objects.select_for_update().get(id=workflowId,status__in=(Const.workflowStatus['manreviewing'],Const.workflowStatus['autoreviewwrong'],))
except Exception:
result = {'msg': '已经在处理'}
return HttpResponse(json.dumps(result), content_type='application/json')
try:
reviewMen = json.loads(workflowDetail.review_man)
except Exception:
reviewMen = workflowDetail.review_man
if not loginUser in reviewMen:
result = {'msg': '你不在审核人之列'}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.status = Const.workflowStatus['manexec']
workflowDetail.operator = loginUser
try:
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '更改状态为手工执行'
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='application/json')
#获取当前请求url
def _getDetailUrl(request):
scheme = request.scheme
#host = request.META['HTTP_HOST']
host = getattr(settings,'WAN_HOST')
return "%s://%s/detail/" % (scheme, host)
@csrf_exempt
def manFinish(request):
loginUser = request.session.get('login_username')
workflowId = request.POST['workflowid']
executeStatus = request.POST['status']
executeResult = request.POST['content']
workflowDetail = workflow.objects.get(id=workflowId)
if loginUser != workflowDetail.operator:
result = {"status":-1,"msg":"需要处理人操作"}
return HttpResponse(json.dumps(result), content_type='application/json')
workflowDetail.execute_result = executeResult
if executeStatus == '0':
workflowDetail.status = Const.workflowStatus['manexcept']
elif executeStatus == '1':
workflowDetail.status = Const.workflowStatus['manfinish']
try:
workflowDetail.operator = loginUser
workflowDetail.finish_time = getNow()
workflowDetail.save()
except Exception as e:
status = -1
msg = str(e)
else:
status = 2
msg = '保存成功'
#如果执行完毕了,则根据settings.py里的配置决定是否给提交者和DBA一封邮件提醒.DBA需要知晓审核并执行过的单子
url = _getDetailUrl(request) + str(workflowId) + '/'
#给主、副审核人,申请人,DBA各发一封邮件
engineer = workflowDetail.engineer
operator = workflowDetail.operator
workflowStatus = workflowDetail.status
workflowName = workflowDetail.workflow_name
objEngineer = users.objects.get(username=engineer)
strTitle = "SQL上线工单执行完毕 # " + str(workflowId)
strContent = "发起人:" + engineer + "\n操作人:" + operator + "\n工单地址:" + url + "\n工单名称: " + workflowName +"\n执行结果:" + workflowStatus
mailDba.delay(strTitle, strContent, [objEngineer.email])
wechatDba.delay(strTitle, strContent,objEngineer.wechat_account)
dingDba.delay(strContent,objEngineer.mobile)
result = {"status":status,"msg":msg}
return HttpResponse(json.dumps(result), content_type='applicatio |
@csrf_exempt
def privMod(request,operation):
loginUser = request.session.get('login_username', False)
hasTableId = [tab.table_id for tab in ora_tab_privs.objects.filter(username = request.GET.get('username'))]
if operation == 'add':
tables = ora_tables.objects.all().exclude(id__in=hasTableId)
elif operation == 'delete':
tables = ora_tables.objects.all().filter(id__in=hasTableId)
clusterName = request.GET.get('cluster_name')
schema = request.GET.get('schema')
cluster_list = []
schema_list = []
table_list = []
table_dict = {}
if not clusterName and not schema:
cluster_list = [primary.cluster_name for primary in ora_primary_config.objects.all()]
cluster_list.sort()
elif clusterName and not schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
schema_list = list(set([table.schema_name for table in tables.filter(instance_id=instanceId)]))
schema_list.sort()
elif clusterName and schema:
instanceId = ora_primary_config.objects.get(cluster_name=clusterName).id
table_list = [table.table for table in tables.filter(instance_id=instanceId).filter(schema_name=schema)]
table_dict = {}
for table in tables.filter(instance_id=instanceId).filter(schema_name=schema).order_by('table'):
table_dict[table.id] = table.table
result = {'cluster_list':cluster_list,'schema_list':schema_list,'table_list':table_dict}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def privCommit(request,operation):
loginUser = request.session.get('login_username', False)
if loginUser != 'admin':
context = {'errMsg': '无权限访问该页面'}
return render(request, 'error.html', context)
username = request.POST.get('username')
table_list = request.POST.get('table_list')
if table_list:
oriTabList = json.loads(table_list)
else:
status = 'error'
msg = '选择为空'
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
extra_inst_list = request.POST.get('extra_inst_list')
if extra_inst_list:
extraInstList = json.loads(extra_inst_list)
#if len(extraInstList) > 0:
extraTabList = []
for extraInst in extraInstList:
instance_id = ora_primary_config.objects.get(cluster_name=extraInst).id
for i in range(0,len(oriTabList)):
table_id = oriTabList[i]
oraTab = ora_tables.objects.get(id = int(table_id))
try:
extraId = ora_tables.objects.get(instance_id = instance_id,schema_name=oraTab.schema_name,table=oraTab.table).id
except Exception as err:
pass
else:
extraTabList.append(extraId)
tabList = oriTabList + extraTabList
status = 'saved'
msg = '保存成功'
for table_id in tabList:
try:
if operation == 'add':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if p.count() != 0:
status = 'save failed'
msg = '请勿重复保存'
else:
ora_tab_privs(username = username,table = ora_tables(id = int(table_id))).save()
elif operation == 'delete':
p = ora_tab_privs.objects.filter(username = username,table = ora_tables(id = int(table_id)))
if len(p) == 0:
status = 'save failed'
msg = '请勿重复删除'
else:
p.delete()
except IntegrityError:
status = 'save failed'
msg = '请勿重复保存'
except Exception as e:
print(str(e))
status = 'save failed'
msg = '有数据保存/删除失败'
else:
continue
result = {'status':status,'msg':msg}
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getResult(request):
logon_user = request.session.get('login_username', False)
clusterName = request.POST.get('cluster_name')
sqlContent = request.POST.get('sql_content')
finalStatus,msg,headerList,queryResult = daoora.query(logon_user,clusterName,sqlContent)
paginator = Paginator(queryResult, 10)
after_range_num = 5
before_range_num = 4
try:
page = int(request.GET.get('page','1'))
if page < 1:
page = 1
except ValueError:
page = 1
try:
queryResultP = paginator.page(page)
except (EmptyPage,InvalidPage,PageNotAnInteger):
queryResultP = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+before_range_num]
else:
page_range = paginator.page_range[0:int(page)+before_range_num]
result = {'final_status':finalStatus,'msg':msg,'header_list':headerList,'query_result':queryResultP}
return HttpResponse(locals(), content_type='application/json')
| n/json')
| identifier_name |
consts.go | package renter
import (
"fmt"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/modules"
)
// Version and system parameters.
const (
// persistVersion defines the Sia version that the persistence was
// last updated
persistVersion = "1.4.2"
)
const (
// AlertMSGSiafileLowRedundancy indicates that a file is below 75% redundancy.
AlertMSGSiafileLowRedundancy = "The SiaFile mentioned in the 'Cause' is below 75% redundancy"
// AlertSiafileLowRedundancyThreshold is the health threshold at which we start
// registering the LowRedundancy alert for a Siafile.
AlertSiafileLowRedundancyThreshold = 0.75
)
// AlertCauseSiafileLowRedundancy creates a customized "cause" for a siafile
// with a certain path and health.
func AlertCauseSiafileLowRedundancy(siaPath modules.SiaPath, health, redundancy float64) string |
// Default redundancy parameters.
var (
// syncCheckInterval is how often the repair heap checks the consensus code
// to see if the renter is synced. This is created because the contractor
// may not update the synced channel until a block is received under some
// conditions.
syncCheckInterval = build.Select(build.Var{
Dev: time.Second * 3,
Standard: time.Second * 5,
Testing: time.Second,
}).(time.Duration)
// cachedUtilitiesUpdateInterval is how often the renter updates the
// cachedUtilities.
cachedUtilitiesUpdateInterval = build.Select(build.Var{
Dev: time.Minute,
Standard: time.Minute * 10,
Testing: time.Second * 3,
}).(time.Duration)
)
// Default memory usage parameters.
var (
// registryMemoryDefault establishes the default amount of memory that the
// renter will use when performing registry operations. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
registryMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userUploadMemoryDefault establishes the default amount of memory that the
// renter will use when performing user-initiated uploads. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
userUploadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userDownloadMemoryDefault establishes the default amount of memory that
// the renter will use when performing user-initiated downloads. The mapping
// is currently not perfect due to GC overhead and other places where we
// don't count all of the memory usage accurately.
userDownloadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// repairMemoryDefault establishes the default amount of memory that the
// renter will use when performing system-scheduld uploads and downloads.
// The mapping is currently not perfect due to GC overhead and other places
// where we don't count all of the memory usage accurately.
repairMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 31), // 2.0 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// registryMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
registryMemoryPriorityDefault = uint64(0)
// userUploadMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
userUploadMemoryPriorityDefault = uint64(0)
// userDownloadMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
userDownloadMemoryPriorityDefault = uint64(0)
// repairMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
repairMemoryPriorityDefault = repairMemoryDefault / 4
// gcMemoryThreshold is the amount of memory after which a memory manager
// triggers a garbage collection.
gcMemoryThreshold = uint64(1 << 28) // 256 MiB
// initialStreamerCacheSize defines the cache size that each streamer will
// start using when it is created. A lower initial cache size will mean that
// it will take more requests / round trips for the cache to grow, however
// the cache size gets set to at least 2x the minimum read size initially
// anyway, which means any application doing very large reads is going to
// automatically have the cache size stepped up without having to do manual
// growth.
initialStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 13), // 8 KiB
Standard: int64(1 << 19), // 512 KiB
Testing: int64(1 << 10), // 1 KiB
}).(int64)
// maxStreamerCacheSize defines the maximum cache size that each streamer
// will use before it no longer increases its own cache size. The value has
// been set fairly low because some applications like mpv will request very
// large buffer sizes, taking as much data as fast as they can. This results
// in the cache size on Sia's end growing to match the size of the
// requesting application's buffer, and harms seek times. Maintaining a low
// maximum ensures that runaway growth is kept under at least a bit of
// control.
//
// This would be best resolved by knowing the actual bitrate of the data
// being fed to the user instead of trying to guess a bitrate, however as of
// time of writing we don't have an easy way to get that information.
maxStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 20), // 1 MiB
Standard: int64(1 << 25), // 32 MiB
Testing: int64(1 << 13), // 8 KiB
}).(int64)
)
// Default bandwidth usage parameters.
const (
// DefaultMaxDownloadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxDownloadSpeed through the API
DefaultMaxDownloadSpeed = 0
// DefaultMaxUploadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxUploadSpeed through the API
DefaultMaxUploadSpeed = 0
)
// Naming conventions for code readability.
const (
// destinationTypeSeekStream is the destination type used for downloads
// from the /renter/stream endpoint.
destinationTypeSeekStream = "httpseekstream"
// memoryPriorityLow is used to request low priority memory
memoryPriorityLow = false
// memoryPriorityHigh is used to request high priority memory
memoryPriorityHigh = true
)
// Constants that tune the health and repair processes.
const (
// maxConsecutiveDirHeapFailures is the maximum number of consecutive times
// the repair heap is allowed to fail to get a directory from the Directory
// Heap
maxConsecutiveDirHeapFailures = 5
// maxRandomStuckChunksAddToHeap is the maximum number of random stuck
// chunks that the stuck loop will add to the uploadHeap at a time. Random
// stuck chunks are the stuck chunks chosen at random from the file system
// as opposed to stuck chunks chosen from a previously successful file
maxRandomStuckChunksAddToHeap = 5
// maxRandomStuckChunksInHeap is the maximum number of random stuck chunks
// that the stuck loop will try to keep in the uploadHeap. Random stuck
// chunks are the stuck chunks chosen at random from the file system as
// opposed to stuck chunks chosen from previously successful file
maxRandomStuckChunksInHeap = 10
// maxStuckChunksInHeap is the maximum number of stuck chunks that the stuck
// loop will try to keep in the uploadHeap
maxStuckChunksInHeap = 25
)
var (
// healthCheckInterval defines the maximum amount of time that should pass
// in between checking the health of a file or directory.
healthCheckInterval = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 5 * time.Second,
}).(time.Duration)
// healthLoopErrorSleepDuration indicates how long the health loop should
// sleep before retrying if there is an error preventing progress.
healthLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 30 * time.Second,
Testing: 3 * time.Second,
}).(time.Duration)
// healthLoopNumBatchFiles defines the number of files the health loop will
// try to batch together in a subtree when updating the filesystem.
healthLoopNumBatchFiles = build.Select(build.Var{
Dev: uint64(1e3),
Standard: uint64(10e3),
Testing: uint64(5),
}).(uint64)
// healthLoopNumBatchSubDirs defines the number of sub directories the health
// loop will try to batch together in a subtree when updating the filesystem.
healthLoopNumBatchSubDirs = build.Select(build.Var{
Dev: uint64(100),
Standard: uint64(1e3),
Testing: uint64(2),
}).(uint64)
// maxRepairLoopTime indicates the maximum amount of time that the repair
// loop will spend popping chunks off of the repair heap.
maxRepairLoopTime = build.Select(build.Var{
Dev: 1 * time.Minute,
Standard: 15 * time.Minute,
Testing: 15 * time.Second,
}).(time.Duration)
// maxSuccessfulStuckRepairFiles is the maximum number of files that the
// stuck loop will track when there is a successful stuck chunk repair
maxSuccessfulStuckRepairFiles = build.Select(build.Var{
Dev: 3,
Standard: 20,
Testing: 2,
}).(int)
// maxUploadHeapChunks is the maximum number of chunks that we should add to
// the upload heap. This also will be used as the target number of chunks to
// add to the upload heap which which will mean for small directories we
// will add multiple directories.
maxUploadHeapChunks = build.Select(build.Var{
Dev: 25,
Standard: 250,
Testing: 5,
}).(int)
// minUploadHeapSize is the minimum number of chunks we want in the upload
// heap before trying to add more in order to maintain back pressure on the
// workers, repairs, and uploads.
minUploadHeapSize = build.Select(build.Var{
Dev: 5,
Standard: 20,
Testing: 1,
}).(int)
// numBubbleWorkerThreads is the number of threads used when using worker
// groups in various bubble methods
numBubbleWorkerThreads = 20
// offlineCheckFrequency is how long the renter will wait to check the
// online status if it is offline.
offlineCheckFrequency = build.Select(build.Var{
Dev: 3 * time.Second,
Standard: 10 * time.Second,
Testing: 250 * time.Millisecond,
}).(time.Duration)
// repairLoopResetFrequency is the frequency with which the repair loop will
// reset entirely, pushing the root directory back on top. This is a
// temporary measure to ensure that even if a user is continuously
// uploading, the repair heap is occasionally reset to push the root
// directory on top.
repairLoopResetFrequency = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 40 * time.Second,
}).(time.Duration)
// repairStuckChunkInterval defines how long the renter sleeps between
// trying to repair a stuck chunk. The uploadHeap prioritizes stuck chunks
// so this interval is to allow time for unstuck chunks to be repaired.
// Ideally the uploadHeap is spending 95% of its time repairing unstuck
// chunks.
repairStuckChunkInterval = build.Select(build.Var{
Dev: 90 * time.Second,
Standard: 10 * time.Minute,
Testing: 5 * time.Second,
}).(time.Duration)
// stuckLoopErrorSleepDuration indicates how long the stuck loop should
// sleep before retrying if there is an error preventing progress.
stuckLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 30 * time.Second,
Testing: 3 * time.Second,
}).(time.Duration)
// uploadAndRepairErrorSleepDuration indicates how long a repair process
// should sleep before retrying if there is an error fetching the metadata
// of the root directory of the renter's filesystem.
uploadAndRepairErrorSleepDuration = build.Select(build.Var{
Dev: 20 * time.Second,
Standard: 15 * time.Minute,
Testing: 3 * time.Second,
}).(time.Duration)
// snapshotSyncSleepDuration defines how long the renter sleeps between
// trying to synchronize snapshots across hosts.
snapshotSyncSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 5 * time.Minute,
Testing: 5 * time.Second,
}).(time.Duration)
)
// Constants that tune the worker swarm.
var (
// downloadFailureCooldown defines how long to wait for a worker after a
// worker has experienced a download failure.
downloadFailureCooldown = time.Second * 3
// maxConsecutivePenalty determines how many times the timeout/cooldown for
// being a bad host can be doubled before a maximum cooldown is reached.
maxConsecutivePenalty = build.Select(build.Var{
Dev: 4,
Standard: 10,
Testing: 3,
}).(int)
// uploadFailureCooldown is how long a worker will wait initially if an
// upload fails. This number is prime to increase the chance to avoid
// intersecting with regularly occurring events which may cause failures.
uploadFailureCooldown = build.Select(build.Var{
Dev: time.Second * 7,
Standard: time.Second * 61,
Testing: time.Second,
}).(time.Duration)
// workerPoolUpdateTimeout is the amount of time that can pass before the
// worker pool should be updated.
workerPoolUpdateTimeout = build.Select(build.Var{
Dev: 30 * time.Second,
Standard: 5 * time.Minute,
Testing: 3 * time.Second,
}).(time.Duration)
)
// Constants which don't fit into another category very well.
const (
// defaultFilePerm defines the default permissions used for a new file if no
// permissions are supplied.
defaultFilePerm = 0666
// PriceEstimationSafetyFactor is the factor of safety used in the price
// estimation to account for any missed costs
PriceEstimationSafetyFactor = 1.2
)
// Deprecated consts.
//
// TODO: Tear out all related code and drop these consts.
const (
// DefaultStreamCacheSize is the default cache size of the /renter/stream cache in
// chunks, the user can set a custom cache size through the API
DefaultStreamCacheSize = 2
)
| {
return fmt.Sprintf("Siafile '%v' has a health of %v and redundancy of %v", siaPath.String(), health, redundancy)
} | identifier_body |
consts.go | package renter
import (
"fmt"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/modules"
)
// Version and system parameters.
const (
// persistVersion defines the Sia version that the persistence was
// last updated
persistVersion = "1.4.2"
)
const (
// AlertMSGSiafileLowRedundancy indicates that a file is below 75% redundancy.
AlertMSGSiafileLowRedundancy = "The SiaFile mentioned in the 'Cause' is below 75% redundancy"
// AlertSiafileLowRedundancyThreshold is the health threshold at which we start
// registering the LowRedundancy alert for a Siafile.
AlertSiafileLowRedundancyThreshold = 0.75
)
// AlertCauseSiafileLowRedundancy creates a customized "cause" for a siafile
// with a certain path and health.
func | (siaPath modules.SiaPath, health, redundancy float64) string {
return fmt.Sprintf("Siafile '%v' has a health of %v and redundancy of %v", siaPath.String(), health, redundancy)
}
// Default redundancy parameters.
var (
// syncCheckInterval is how often the repair heap checks the consensus code
// to see if the renter is synced. This is created because the contractor
// may not update the synced channel until a block is received under some
// conditions.
syncCheckInterval = build.Select(build.Var{
Dev: time.Second * 3,
Standard: time.Second * 5,
Testing: time.Second,
}).(time.Duration)
// cachedUtilitiesUpdateInterval is how often the renter updates the
// cachedUtilities.
cachedUtilitiesUpdateInterval = build.Select(build.Var{
Dev: time.Minute,
Standard: time.Minute * 10,
Testing: time.Second * 3,
}).(time.Duration)
)
// Default memory usage parameters.
var (
// registryMemoryDefault establishes the default amount of memory that the
// renter will use when performing registry operations. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
registryMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userUploadMemoryDefault establishes the default amount of memory that the
// renter will use when performing user-initiated uploads. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
userUploadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userDownloadMemoryDefault establishes the default amount of memory that
// the renter will use when performing user-initiated downloads. The mapping
// is currently not perfect due to GC overhead and other places where we
// don't count all of the memory usage accurately.
userDownloadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// repairMemoryDefault establishes the default amount of memory that the
// renter will use when performing system-scheduld uploads and downloads.
// The mapping is currently not perfect due to GC overhead and other places
// where we don't count all of the memory usage accurately.
repairMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 31), // 2.0 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// registryMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
registryMemoryPriorityDefault = uint64(0)
// userUploadMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
userUploadMemoryPriorityDefault = uint64(0)
// userDownloadMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
userDownloadMemoryPriorityDefault = uint64(0)
// repairMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
repairMemoryPriorityDefault = repairMemoryDefault / 4
// gcMemoryThreshold is the amount of memory after which a memory manager
// triggers a garbage collection.
gcMemoryThreshold = uint64(1 << 28) // 256 MiB
// initialStreamerCacheSize defines the cache size that each streamer will
// start using when it is created. A lower initial cache size will mean that
// it will take more requests / round trips for the cache to grow, however
// the cache size gets set to at least 2x the minimum read size initially
// anyway, which means any application doing very large reads is going to
// automatically have the cache size stepped up without having to do manual
// growth.
initialStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 13), // 8 KiB
Standard: int64(1 << 19), // 512 KiB
Testing: int64(1 << 10), // 1 KiB
}).(int64)
// maxStreamerCacheSize defines the maximum cache size that each streamer
// will use before it no longer increases its own cache size. The value has
// been set fairly low because some applications like mpv will request very
// large buffer sizes, taking as much data as fast as they can. This results
// in the cache size on Sia's end growing to match the size of the
// requesting application's buffer, and harms seek times. Maintaining a low
// maximum ensures that runaway growth is kept under at least a bit of
// control.
//
// This would be best resolved by knowing the actual bitrate of the data
// being fed to the user instead of trying to guess a bitrate, however as of
// time of writing we don't have an easy way to get that information.
maxStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 20), // 1 MiB
Standard: int64(1 << 25), // 32 MiB
Testing: int64(1 << 13), // 8 KiB
}).(int64)
)
// Default bandwidth usage parameters.
const (
// DefaultMaxDownloadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxDownloadSpeed through the API
DefaultMaxDownloadSpeed = 0
// DefaultMaxUploadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxUploadSpeed through the API
DefaultMaxUploadSpeed = 0
)
// Naming conventions for code readability.
const (
// destinationTypeSeekStream is the destination type used for downloads
// from the /renter/stream endpoint.
destinationTypeSeekStream = "httpseekstream"
// memoryPriorityLow is used to request low priority memory
memoryPriorityLow = false
// memoryPriorityHigh is used to request high priority memory
memoryPriorityHigh = true
)
// Constants that tune the health and repair processes.
const (
// maxConsecutiveDirHeapFailures is the maximum number of consecutive times
// the repair heap is allowed to fail to get a directory from the Directory
// Heap
maxConsecutiveDirHeapFailures = 5
// maxRandomStuckChunksAddToHeap is the maximum number of random stuck
// chunks that the stuck loop will add to the uploadHeap at a time. Random
// stuck chunks are the stuck chunks chosen at random from the file system
// as opposed to stuck chunks chosen from a previously successful file
maxRandomStuckChunksAddToHeap = 5
// maxRandomStuckChunksInHeap is the maximum number of random stuck chunks
// that the stuck loop will try to keep in the uploadHeap. Random stuck
// chunks are the stuck chunks chosen at random from the file system as
// opposed to stuck chunks chosen from previously successful file
maxRandomStuckChunksInHeap = 10
// maxStuckChunksInHeap is the maximum number of stuck chunks that the stuck
// loop will try to keep in the uploadHeap
maxStuckChunksInHeap = 25
)
var (
// healthCheckInterval defines the maximum amount of time that should pass
// in between checking the health of a file or directory.
healthCheckInterval = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 5 * time.Second,
}).(time.Duration)
// healthLoopErrorSleepDuration indicates how long the health loop should
// sleep before retrying if there is an error preventing progress.
healthLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 30 * time.Second,
Testing: 3 * time.Second,
}).(time.Duration)
// healthLoopNumBatchFiles defines the number of files the health loop will
// try to batch together in a subtree when updating the filesystem.
healthLoopNumBatchFiles = build.Select(build.Var{
Dev: uint64(1e3),
Standard: uint64(10e3),
Testing: uint64(5),
}).(uint64)
// healthLoopNumBatchSubDirs defines the number of sub directories the health
// loop will try to batch together in a subtree when updating the filesystem.
healthLoopNumBatchSubDirs = build.Select(build.Var{
Dev: uint64(100),
Standard: uint64(1e3),
Testing: uint64(2),
}).(uint64)
// maxRepairLoopTime indicates the maximum amount of time that the repair
// loop will spend popping chunks off of the repair heap.
maxRepairLoopTime = build.Select(build.Var{
Dev: 1 * time.Minute,
Standard: 15 * time.Minute,
Testing: 15 * time.Second,
}).(time.Duration)
// maxSuccessfulStuckRepairFiles is the maximum number of files that the
// stuck loop will track when there is a successful stuck chunk repair
maxSuccessfulStuckRepairFiles = build.Select(build.Var{
Dev: 3,
Standard: 20,
Testing: 2,
}).(int)
// maxUploadHeapChunks is the maximum number of chunks that we should add to
// the upload heap. This also will be used as the target number of chunks to
// add to the upload heap which which will mean for small directories we
// will add multiple directories.
maxUploadHeapChunks = build.Select(build.Var{
Dev: 25,
Standard: 250,
Testing: 5,
}).(int)
// minUploadHeapSize is the minimum number of chunks we want in the upload
// heap before trying to add more in order to maintain back pressure on the
// workers, repairs, and uploads.
minUploadHeapSize = build.Select(build.Var{
Dev: 5,
Standard: 20,
Testing: 1,
}).(int)
// numBubbleWorkerThreads is the number of threads used when using worker
// groups in various bubble methods
numBubbleWorkerThreads = 20
// offlineCheckFrequency is how long the renter will wait to check the
// online status if it is offline.
offlineCheckFrequency = build.Select(build.Var{
Dev: 3 * time.Second,
Standard: 10 * time.Second,
Testing: 250 * time.Millisecond,
}).(time.Duration)
// repairLoopResetFrequency is the frequency with which the repair loop will
// reset entirely, pushing the root directory back on top. This is a
// temporary measure to ensure that even if a user is continuously
// uploading, the repair heap is occasionally reset to push the root
// directory on top.
repairLoopResetFrequency = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 40 * time.Second,
}).(time.Duration)
// repairStuckChunkInterval defines how long the renter sleeps between
// trying to repair a stuck chunk. The uploadHeap prioritizes stuck chunks
// so this interval is to allow time for unstuck chunks to be repaired.
// Ideally the uploadHeap is spending 95% of its time repairing unstuck
// chunks.
repairStuckChunkInterval = build.Select(build.Var{
Dev: 90 * time.Second,
Standard: 10 * time.Minute,
Testing: 5 * time.Second,
}).(time.Duration)
// stuckLoopErrorSleepDuration indicates how long the stuck loop should
// sleep before retrying if there is an error preventing progress.
stuckLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 30 * time.Second,
Testing: 3 * time.Second,
}).(time.Duration)
// uploadAndRepairErrorSleepDuration indicates how long a repair process
// should sleep before retrying if there is an error fetching the metadata
// of the root directory of the renter's filesystem.
uploadAndRepairErrorSleepDuration = build.Select(build.Var{
Dev: 20 * time.Second,
Standard: 15 * time.Minute,
Testing: 3 * time.Second,
}).(time.Duration)
// snapshotSyncSleepDuration defines how long the renter sleeps between
// trying to synchronize snapshots across hosts.
snapshotSyncSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 5 * time.Minute,
Testing: 5 * time.Second,
}).(time.Duration)
)
// Constants that tune the worker swarm.
var (
// downloadFailureCooldown defines how long to wait for a worker after a
// worker has experienced a download failure.
downloadFailureCooldown = time.Second * 3
// maxConsecutivePenalty determines how many times the timeout/cooldown for
// being a bad host can be doubled before a maximum cooldown is reached.
maxConsecutivePenalty = build.Select(build.Var{
Dev: 4,
Standard: 10,
Testing: 3,
}).(int)
// uploadFailureCooldown is how long a worker will wait initially if an
// upload fails. This number is prime to increase the chance to avoid
// intersecting with regularly occurring events which may cause failures.
uploadFailureCooldown = build.Select(build.Var{
Dev: time.Second * 7,
Standard: time.Second * 61,
Testing: time.Second,
}).(time.Duration)
// workerPoolUpdateTimeout is the amount of time that can pass before the
// worker pool should be updated.
workerPoolUpdateTimeout = build.Select(build.Var{
Dev: 30 * time.Second,
Standard: 5 * time.Minute,
Testing: 3 * time.Second,
}).(time.Duration)
)
// Constants which don't fit into another category very well.
const (
// defaultFilePerm defines the default permissions used for a new file if no
// permissions are supplied.
defaultFilePerm = 0666
// PriceEstimationSafetyFactor is the factor of safety used in the price
// estimation to account for any missed costs
PriceEstimationSafetyFactor = 1.2
)
// Deprecated consts.
//
// TODO: Tear out all related code and drop these consts.
const (
// DefaultStreamCacheSize is the default cache size of the /renter/stream cache in
// chunks, the user can set a custom cache size through the API
DefaultStreamCacheSize = 2
)
| AlertCauseSiafileLowRedundancy | identifier_name |
consts.go | package renter
import (
"fmt"
"time"
"gitlab.com/NebulousLabs/Sia/build"
"gitlab.com/NebulousLabs/Sia/modules"
)
// Version and system parameters.
const (
// persistVersion defines the Sia version that the persistence was
// last updated
persistVersion = "1.4.2"
)
const (
// AlertMSGSiafileLowRedundancy indicates that a file is below 75% redundancy.
AlertMSGSiafileLowRedundancy = "The SiaFile mentioned in the 'Cause' is below 75% redundancy"
// AlertSiafileLowRedundancyThreshold is the health threshold at which we start
// registering the LowRedundancy alert for a Siafile.
AlertSiafileLowRedundancyThreshold = 0.75
)
// AlertCauseSiafileLowRedundancy creates a customized "cause" for a siafile
// with a certain path and health.
func AlertCauseSiafileLowRedundancy(siaPath modules.SiaPath, health, redundancy float64) string { | // syncCheckInterval is how often the repair heap checks the consensus code
// to see if the renter is synced. This is created because the contractor
// may not update the synced channel until a block is received under some
// conditions.
syncCheckInterval = build.Select(build.Var{
Dev: time.Second * 3,
Standard: time.Second * 5,
Testing: time.Second,
}).(time.Duration)
// cachedUtilitiesUpdateInterval is how often the renter updates the
// cachedUtilities.
cachedUtilitiesUpdateInterval = build.Select(build.Var{
Dev: time.Minute,
Standard: time.Minute * 10,
Testing: time.Second * 3,
}).(time.Duration)
)
// Default memory usage parameters.
var (
// registryMemoryDefault establishes the default amount of memory that the
// renter will use when performing registry operations. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
registryMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userUploadMemoryDefault establishes the default amount of memory that the
// renter will use when performing user-initiated uploads. The mapping is
// currently not perfect due to GC overhead and other places where we don't
// count all of the memory usage accurately.
userUploadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// userDownloadMemoryDefault establishes the default amount of memory that
// the renter will use when performing user-initiated downloads. The mapping
// is currently not perfect due to GC overhead and other places where we
// don't count all of the memory usage accurately.
userDownloadMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 29), // 0.5 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// repairMemoryDefault establishes the default amount of memory that the
// renter will use when performing system-scheduld uploads and downloads.
// The mapping is currently not perfect due to GC overhead and other places
// where we don't count all of the memory usage accurately.
repairMemoryDefault = build.Select(build.Var{
Dev: uint64(1 << 28), // 256 MiB
Standard: uint64(1 << 31), // 2.0 GiB
Testing: uint64(1 << 17), // 128 KiB - 4 KiB sector size, need to test memory exhaustion
}).(uint64)
// registryMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
registryMemoryPriorityDefault = uint64(0)
// userUploadMemoryPriorityDefault is the amount of memory that is held in reserve
// explicitly for priority actions.
userUploadMemoryPriorityDefault = uint64(0)
// userDownloadMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
userDownloadMemoryPriorityDefault = uint64(0)
// repairMemoryPriorityDefault is the amount of memory that is held in
// reserve explicitly for priority actions.
repairMemoryPriorityDefault = repairMemoryDefault / 4
// gcMemoryThreshold is the amount of memory after which a memory manager
// triggers a garbage collection.
gcMemoryThreshold = uint64(1 << 28) // 256 MiB
// initialStreamerCacheSize defines the cache size that each streamer will
// start using when it is created. A lower initial cache size will mean that
// it will take more requests / round trips for the cache to grow, however
// the cache size gets set to at least 2x the minimum read size initially
// anyway, which means any application doing very large reads is going to
// automatically have the cache size stepped up without having to do manual
// growth.
initialStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 13), // 8 KiB
Standard: int64(1 << 19), // 512 KiB
Testing: int64(1 << 10), // 1 KiB
}).(int64)
// maxStreamerCacheSize defines the maximum cache size that each streamer
// will use before it no longer increases its own cache size. The value has
// been set fairly low because some applications like mpv will request very
// large buffer sizes, taking as much data as fast as they can. This results
// in the cache size on Sia's end growing to match the size of the
// requesting application's buffer, and harms seek times. Maintaining a low
// maximum ensures that runaway growth is kept under at least a bit of
// control.
//
// This would be best resolved by knowing the actual bitrate of the data
// being fed to the user instead of trying to guess a bitrate, however as of
// time of writing we don't have an easy way to get that information.
maxStreamerCacheSize = build.Select(build.Var{
Dev: int64(1 << 20), // 1 MiB
Standard: int64(1 << 25), // 32 MiB
Testing: int64(1 << 13), // 8 KiB
}).(int64)
)
// Default bandwidth usage parameters.
const (
// DefaultMaxDownloadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxDownloadSpeed through the API
DefaultMaxDownloadSpeed = 0
// DefaultMaxUploadSpeed is set to zero to indicate no limit, the user
// can set a custom MaxUploadSpeed through the API
DefaultMaxUploadSpeed = 0
)
// Naming conventions for code readability.
const (
// destinationTypeSeekStream is the destination type used for downloads
// from the /renter/stream endpoint.
destinationTypeSeekStream = "httpseekstream"
// memoryPriorityLow is used to request low priority memory
memoryPriorityLow = false
// memoryPriorityHigh is used to request high priority memory
memoryPriorityHigh = true
)
// Constants that tune the health and repair processes.
const (
// maxConsecutiveDirHeapFailures is the maximum number of consecutive times
// the repair heap is allowed to fail to get a directory from the Directory
// Heap
maxConsecutiveDirHeapFailures = 5
// maxRandomStuckChunksAddToHeap is the maximum number of random stuck
// chunks that the stuck loop will add to the uploadHeap at a time. Random
// stuck chunks are the stuck chunks chosen at random from the file system
// as opposed to stuck chunks chosen from a previously successful file
maxRandomStuckChunksAddToHeap = 5
// maxRandomStuckChunksInHeap is the maximum number of random stuck chunks
// that the stuck loop will try to keep in the uploadHeap. Random stuck
// chunks are the stuck chunks chosen at random from the file system as
// opposed to stuck chunks chosen from previously successful file
maxRandomStuckChunksInHeap = 10
// maxStuckChunksInHeap is the maximum number of stuck chunks that the stuck
// loop will try to keep in the uploadHeap
maxStuckChunksInHeap = 25
)
var (
// healthCheckInterval defines the maximum amount of time that should pass
// in between checking the health of a file or directory.
healthCheckInterval = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 5 * time.Second,
}).(time.Duration)
// healthLoopErrorSleepDuration indicates how long the health loop should
// sleep before retrying if there is an error preventing progress.
healthLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 30 * time.Second,
Testing: 3 * time.Second,
}).(time.Duration)
// healthLoopNumBatchFiles defines the number of files the health loop will
// try to batch together in a subtree when updating the filesystem.
healthLoopNumBatchFiles = build.Select(build.Var{
Dev: uint64(1e3),
Standard: uint64(10e3),
Testing: uint64(5),
}).(uint64)
// healthLoopNumBatchSubDirs defines the number of sub directories the health
// loop will try to batch together in a subtree when updating the filesystem.
healthLoopNumBatchSubDirs = build.Select(build.Var{
Dev: uint64(100),
Standard: uint64(1e3),
Testing: uint64(2),
}).(uint64)
// maxRepairLoopTime indicates the maximum amount of time that the repair
// loop will spend popping chunks off of the repair heap.
maxRepairLoopTime = build.Select(build.Var{
Dev: 1 * time.Minute,
Standard: 15 * time.Minute,
Testing: 15 * time.Second,
}).(time.Duration)
// maxSuccessfulStuckRepairFiles is the maximum number of files that the
// stuck loop will track when there is a successful stuck chunk repair
maxSuccessfulStuckRepairFiles = build.Select(build.Var{
Dev: 3,
Standard: 20,
Testing: 2,
}).(int)
// maxUploadHeapChunks is the maximum number of chunks that we should add to
// the upload heap. This also will be used as the target number of chunks to
// add to the upload heap which which will mean for small directories we
// will add multiple directories.
maxUploadHeapChunks = build.Select(build.Var{
Dev: 25,
Standard: 250,
Testing: 5,
}).(int)
// minUploadHeapSize is the minimum number of chunks we want in the upload
// heap before trying to add more in order to maintain back pressure on the
// workers, repairs, and uploads.
minUploadHeapSize = build.Select(build.Var{
Dev: 5,
Standard: 20,
Testing: 1,
}).(int)
// numBubbleWorkerThreads is the number of threads used when using worker
// groups in various bubble methods
numBubbleWorkerThreads = 20
// offlineCheckFrequency is how long the renter will wait to check the
// online status if it is offline.
offlineCheckFrequency = build.Select(build.Var{
Dev: 3 * time.Second,
Standard: 10 * time.Second,
Testing: 250 * time.Millisecond,
}).(time.Duration)
// repairLoopResetFrequency is the frequency with which the repair loop will
// reset entirely, pushing the root directory back on top. This is a
// temporary measure to ensure that even if a user is continuously
// uploading, the repair heap is occasionally reset to push the root
// directory on top.
repairLoopResetFrequency = build.Select(build.Var{
Dev: 15 * time.Minute,
Standard: 1 * time.Hour,
Testing: 40 * time.Second,
}).(time.Duration)
// repairStuckChunkInterval defines how long the renter sleeps between
// trying to repair a stuck chunk. The uploadHeap prioritizes stuck chunks
// so this interval is to allow time for unstuck chunks to be repaired.
// Ideally the uploadHeap is spending 95% of its time repairing unstuck
// chunks.
repairStuckChunkInterval = build.Select(build.Var{
Dev: 90 * time.Second,
Standard: 10 * time.Minute,
Testing: 5 * time.Second,
}).(time.Duration)
// stuckLoopErrorSleepDuration indicates how long the stuck loop should
// sleep before retrying if there is an error preventing progress.
stuckLoopErrorSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 30 * time.Second,
Testing: 3 * time.Second,
}).(time.Duration)
// uploadAndRepairErrorSleepDuration indicates how long a repair process
// should sleep before retrying if there is an error fetching the metadata
// of the root directory of the renter's filesystem.
uploadAndRepairErrorSleepDuration = build.Select(build.Var{
Dev: 20 * time.Second,
Standard: 15 * time.Minute,
Testing: 3 * time.Second,
}).(time.Duration)
// snapshotSyncSleepDuration defines how long the renter sleeps between
// trying to synchronize snapshots across hosts.
snapshotSyncSleepDuration = build.Select(build.Var{
Dev: 10 * time.Second,
Standard: 5 * time.Minute,
Testing: 5 * time.Second,
}).(time.Duration)
)
// Constants that tune the worker swarm.
var (
// downloadFailureCooldown defines how long to wait for a worker after a
// worker has experienced a download failure.
downloadFailureCooldown = time.Second * 3
// maxConsecutivePenalty determines how many times the timeout/cooldown for
// being a bad host can be doubled before a maximum cooldown is reached.
maxConsecutivePenalty = build.Select(build.Var{
Dev: 4,
Standard: 10,
Testing: 3,
}).(int)
// uploadFailureCooldown is how long a worker will wait initially if an
// upload fails. This number is prime to increase the chance to avoid
// intersecting with regularly occurring events which may cause failures.
uploadFailureCooldown = build.Select(build.Var{
Dev: time.Second * 7,
Standard: time.Second * 61,
Testing: time.Second,
}).(time.Duration)
// workerPoolUpdateTimeout is the amount of time that can pass before the
// worker pool should be updated.
workerPoolUpdateTimeout = build.Select(build.Var{
Dev: 30 * time.Second,
Standard: 5 * time.Minute,
Testing: 3 * time.Second,
}).(time.Duration)
)
// Constants which don't fit into another category very well.
const (
// defaultFilePerm defines the default permissions used for a new file if no
// permissions are supplied.
defaultFilePerm = 0666
// PriceEstimationSafetyFactor is the factor of safety used in the price
// estimation to account for any missed costs
PriceEstimationSafetyFactor = 1.2
)
// Deprecated consts.
//
// TODO: Tear out all related code and drop these consts.
const (
// DefaultStreamCacheSize is the default cache size of the /renter/stream cache in
// chunks, the user can set a custom cache size through the API
DefaultStreamCacheSize = 2
) | return fmt.Sprintf("Siafile '%v' has a health of %v and redundancy of %v", siaPath.String(), health, redundancy)
}
// Default redundancy parameters.
var ( | random_line_split |
MADDPGAgent.py | import numpy as np
import random
from collections import namedtuple, deque
from MADDPG.Models.MADDPGCritic import Critic
from MADDPG.Models.MADDPGActor import Actor
import torch
import torch.optim as optim
import MADDPG.random_p as rm
from MADDPG.schedule import LinearSchedule
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
|
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
all_actions = torch.from_numpy(np.vstack([e.all_actions for e in experiences if e is not None])).float().to(
device)
# actions.requires_grad=True
# print(actions.requires_grad,"grad")
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
all_next_state = torch.from_numpy(
np.vstack([e.all_next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, all_states, actions, all_actions, rewards, next_states, all_next_state, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1 | random_line_split |
MADDPGAgent.py | import numpy as np
import random
from collections import namedtuple, deque
from MADDPG.Models.MADDPGCritic import Critic
from MADDPG.Models.MADDPGActor import Actor
import torch
import torch.optim as optim
import MADDPG.random_p as rm
from MADDPG.schedule import LinearSchedule
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
|
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
all_actions = torch.from_numpy(np.vstack([e.all_actions for e in experiences if e is not None])).float().to(
device)
# actions.requires_grad=True
# print(actions.requires_grad,"grad")
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
all_next_state = torch.from_numpy(
np.vstack([e.all_next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, all_states, actions, all_actions, rewards, next_states, all_next_state, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy | conditional_block |
MADDPGAgent.py | import numpy as np
import random
from collections import namedtuple, deque
from MADDPG.Models.MADDPGCritic import Critic
from MADDPG.Models.MADDPGActor import Actor
import torch
import torch.optim as optim
import MADDPG.random_p as rm
from MADDPG.schedule import LinearSchedule
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""In | def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
all_actions = torch.from_numpy(np.vstack([e.all_actions for e in experiences if e is not None])).float().to(
device)
# actions.requires_grad=True
# print(actions.requires_grad,"grad")
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
all_next_state = torch.from_numpy(
np.vstack([e.all_next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, all_states, actions, all_actions, rewards, next_states, all_next_state, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | itialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
| identifier_body |
MADDPGAgent.py | import numpy as np
import random
from collections import namedtuple, deque
from MADDPG.Models.MADDPGCritic import Critic
from MADDPG.Models.MADDPGActor import Actor
import torch
import torch.optim as optim
import MADDPG.random_p as rm
from MADDPG.schedule import LinearSchedule
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 512 # minibatch size
GAMMA = 0.99 # discount factor
# TAU = 1e-3 # for soft update of target parameters
ACTOR_LR = 1e-3 # Actor network learning rate
CRITIC_LR = 1e-4 # Actor network learning rate
UPDATE_EVERY = 20 # how often to update the network (time step)
# UPDATE_TIMES = 5 # how many times to update in one go
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,
weight_decay=1.e-5):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.n_seed = np.random.seed(seed)
self.num_agents = num_agents
self.update_times = update_times
self.n_step = 0
self.TAU = 1e-3
self.noise = []
for i in range(num_agents):
self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))
# critic local and target network (Q-Learning)
self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)
self.critic_target.load_state_dict(self.critic_local.state_dict())
# actor local and target network (Policy gradient)
self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)
self.actor_target.load_state_dict(self.actor_local.state_dict())
# optimizer for critic and actor network
self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)
self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
self.a_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
for i in range(self.num_agents):
all_state = np.concatenate((state[i], state[1 - i]))
all_actions = np.concatenate((action[i], action[1 - i]))
all_next_state = np.concatenate((next_state[i], next_state[1 - i]))
self.memory.add(state[i], all_state, action[i], all_actions, reward[i], next_state[i], all_next_state,
done[i])
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
for i in range(self.update_times):
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def | (self, state, training=True):
"""Returns continous actions values for all action for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().detach().to(device)
# print(state.shape,"act")
self.n_step += 1
epsilon = max((1500 - self.n_step) / 1500, .01)
self.actor_local.eval()
with torch.no_grad():
actions = self.actor_local(state)
self.actor_local.train()
if training:
# return np.clip(actions.cpu().data.numpy()+np.random.uniform(-1,1,(2,2))*epsilon,-1,1) #adding noise to action space
r = np.random.random()
if r <= epsilon:
return np.random.uniform(-1, 1, (2, 2))
else:
return np.clip(actions.cpu().data.numpy(), -1, 1) # epsilon greedy policy
else:
return actions.cpu().data.numpy()
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Variable]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, all_state, action, all_actions, rewards, next_state, all_next_state, dones = experiences
batch_size = all_next_state.shape[0]
all_next_actions = self.actor_target(all_next_state.view(batch_size * 2, -1)).view(batch_size, -1)
critic_target_input = torch.cat((all_next_state, all_next_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(
device)
with torch.no_grad():
Q_target_next = self.critic_target(critic_target_input, all_next_actions.view(batch_size * 2, -1)[::2])
Q_targets = rewards + (gamma * Q_target_next * (1 - dones))
critic_local_input = torch.cat((all_state, all_actions.view(batch_size * 2, -1)[1::2]), dim=1).to(device)
Q_expected = self.critic_local(critic_local_input, action)
# critic loss
huber_loss = torch.nn.SmoothL1Loss()
loss = huber_loss(Q_expected, Q_targets.detach())
self.optimizer_critic.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), 1)
self.optimizer_critic.step()
# actor loss
action_pr_self = self.actor_local(states)
action_pr_other = self.actor_local(all_next_state.view(batch_size * 2, -1)[1::2]).detach()
# critic_local_input2=torch.cat((all_state,torch.cat((action_pr_self,action_pr_other),dim=1)),dim=1)
critic_local_input2 = torch.cat((all_state, action_pr_other), dim=1)
p_loss = -self.critic_local(critic_local_input2, action_pr_self).mean()
self.optimizer_actor.zero_grad()
p_loss.backward()
self.optimizer_actor.step()
# ------------------- update target network ------------------- #
self.TAU = min(5e-1, self.TAU * 1.001)
self.soft_update(self.critic_local, self.critic_target, self.TAU)
self.soft_update(self.actor_local, self.actor_target, self.TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset_random(self):
for i in range(self.num_agents):
self.noise[i].reset_states()
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "all_state", "action", "all_actions", "reward", "next_state",
"all_next_state", "done"])
self.seed = random.seed(seed)
def add(self, states, all_state, action, all_actions, reward, next_state, all_next_state, done):
"""Add a new experience to memory."""
e = self.experience(states, all_state, action, all_actions, reward, next_state, all_next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
all_states = torch.from_numpy(np.vstack([e.all_state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
all_actions = torch.from_numpy(np.vstack([e.all_actions for e in experiences if e is not None])).float().to(
device)
# actions.requires_grad=True
# print(actions.requires_grad,"grad")
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(
device)
all_next_state = torch.from_numpy(
np.vstack([e.all_next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(
device)
return (states, all_states, actions, all_actions, rewards, next_states, all_next_state, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | act | identifier_name |
server.go | /*
Copyright 2015 Cesanta Software Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/base64"
"encoding/json" | "fmt"
"math/rand"
"net"
"net/http"
"regexp"
"sort"
"strings"
"time"
"github.com/casbin/casbin/v2"
"github.com/cesanta/glog"
"github.com/docker/distribution/registry/auth/token"
"github.com/cesanta/docker_auth/auth_server/api"
"github.com/cesanta/docker_auth/auth_server/authn"
"github.com/cesanta/docker_auth/auth_server/authz"
)
var (
hostPortRegex = regexp.MustCompile(`^(?:\[(.+)\]:\d+|([^:]+):\d+)$`)
scopeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`)
)
type AuthServer struct {
config *Config
authenticators []api.Authenticator
authorizers []api.Authorizer
ga *authn.GoogleAuth
gha *authn.GitHubAuth
oidc *authn.OIDCAuth
glab *authn.GitlabAuth
}
func NewAuthServer(c *Config) (*AuthServer, error) {
as := &AuthServer{
config: c,
authorizers: []api.Authorizer{},
}
if c.ACL != nil {
staticAuthorizer, err := authz.NewACLAuthorizer(c.ACL)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, staticAuthorizer)
}
if c.ACLMongo != nil {
mongoAuthorizer, err := authz.NewACLMongoAuthorizer(c.ACLMongo)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, mongoAuthorizer)
}
if c.ACLXorm != nil {
xormAuthorizer, err := authz.NewACLXormAuthz(c.ACLXorm)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, xormAuthorizer)
}
if c.ExtAuthz != nil {
extAuthorizer := authz.NewExtAuthzAuthorizer(c.ExtAuthz)
as.authorizers = append(as.authorizers, extAuthorizer)
}
if c.Users != nil {
as.authenticators = append(as.authenticators, authn.NewStaticUserAuth(c.Users))
}
if c.ExtAuth != nil {
as.authenticators = append(as.authenticators, authn.NewExtAuth(c.ExtAuth))
}
if c.GoogleAuth != nil {
ga, err := authn.NewGoogleAuth(c.GoogleAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ga)
as.ga = ga
}
if c.GitHubAuth != nil {
gha, err := authn.NewGitHubAuth(c.GitHubAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, gha)
as.gha = gha
}
if c.OIDCAuth != nil {
oidc, err := authn.NewOIDCAuth(c.OIDCAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, oidc)
as.oidc = oidc
}
if c.GitlabAuth != nil {
glab, err := authn.NewGitlabAuth(c.GitlabAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, glab)
as.glab = glab
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scopeValue := range req.Form["scope"] {
for _, scopeStr := range strings.Split(scopeValue, " ") {
parts := strings.Split(scopeStr, ":")
var scope authScope
scopeType, scopeClass, err := parseScope(parts[0])
if err != nil {
return nil, err
}
switch len(parts) {
case 3:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1],
Actions: strings.Split(parts[2], ","),
}
case 4:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1] + ":" + parts[2],
Actions: strings.Split(parts[3], ","),
}
default:
return nil, fmt.Errorf("invalid scope: %q", scopeStr)
}
sort.Strings(scope.Actions)
ar.Scopes = append(ar.Scopes, scope)
}
}
}
return ar, nil
}
func (as *AuthServer) Authenticate(ar *authRequest) (bool, api.Labels, error) {
for i, a := range as.authenticators {
result, labels, err := a.Authenticate(ar.Account, ar.Password)
glog.V(2).Infof("Authn %s %s -> %t, %+v, %v", a.Name(), ar.Account, result, labels, err)
if err != nil {
if err == api.NoMatch {
continue
} else if err == api.WrongPass {
glog.Warningf("Failed authentication with %s: %s", err, ar.Account)
return false, nil, nil
}
err = fmt.Errorf("authn #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", ar, err)
return false, nil, err
}
return result, labels, nil
}
// Deny by default.
glog.Warningf("%s did not match any authn rule", ar)
return false, nil, nil
}
func (as *AuthServer) authorizeScope(ai *api.AuthRequestInfo) ([]string, error) {
for i, a := range as.authorizers {
result, err := a.Authorize(ai)
glog.V(2).Infof("Authz %s %s -> %s, %s", a.Name(), *ai, result, err)
if err != nil {
if err == api.NoMatch {
continue
}
err = fmt.Errorf("authz #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", *ai, err)
return nil, err
}
return result, nil
}
// Deny by default.
glog.Warningf("%s did not match any authz rule", *ai)
return nil, nil
}
func (as *AuthServer) Authorize(ar *authRequest) ([]authzResult, error) {
ares := []authzResult{}
for _, scope := range ar.Scopes {
ai := &api.AuthRequestInfo{
Account: ar.Account,
Type: scope.Type,
Name: scope.Name,
Service: ar.Service,
IP: ar.RemoteIP,
Actions: scope.Actions,
Labels: ar.Labels,
}
actions, err := as.authorizeScope(ai)
if err != nil {
return nil, err
}
ares = append(ares, authzResult{scope: scope, autorizedActions: actions})
}
return ares, nil
}
// https://github.com/docker/distribution/blob/master/docs/spec/auth/token.md#example
func (as *AuthServer) CreateToken(ar *authRequest, ares []authzResult) (string, error) {
now := time.Now().Unix()
tc := &as.config.Token
// Sign something dummy to find out which algorithm is used.
_, sigAlg, err := tc.privateKey.Sign(strings.NewReader("dummy"), 0)
if err != nil {
return "", fmt.Errorf("failed to sign: %s", err)
}
header := token.Header{
Type: "JWT",
SigningAlg: sigAlg,
KeyID: tc.publicKey.KeyID(),
}
headerJSON, err := json.Marshal(header)
if err != nil {
return "", fmt.Errorf("failed to marshal header: %s", err)
}
claims := token.ClaimSet{
Issuer: tc.Issuer,
Subject: ar.Account,
Audience: ar.Service,
NotBefore: now - 10,
IssuedAt: now,
Expiration: now + tc.Expiration,
JWTID: fmt.Sprintf("%d", rand.Int63()),
Access: []*token.ResourceActions{},
}
for _, a := range ares {
ra := &token.ResourceActions{
Type: a.scope.Type,
Name: a.scope.Name,
Actions: a.autorizedActions,
}
if ra.Actions == nil {
ra.Actions = []string{}
}
sort.Strings(ra.Actions)
claims.Access = append(claims.Access, ra)
}
claimsJSON, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("failed to marshal claims: %s", err)
}
payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON))
sig, sigAlg2, err := tc.privateKey.Sign(strings.NewReader(payload), 0)
if err != nil || sigAlg2 != sigAlg {
return "", fmt.Errorf("failed to sign token: %s", err)
}
glog.Infof("New token for %s %+v: %s", *ar, ar.Labels, claimsJSON)
return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil
}
func (as *AuthServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
glog.V(3).Infof("Request: %+v", req)
path_prefix := as.config.Server.PathPrefix
if as.config.Server.HSTS {
rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains")
}
switch {
case req.URL.Path == path_prefix+"/":
as.doIndex(rw, req)
case req.URL.Path == path_prefix+"/auth":
as.doAuth(rw, req)
case req.URL.Path == path_prefix+"/google_auth" && as.ga != nil:
as.ga.DoGoogleAuth(rw, req)
case req.URL.Path == path_prefix+"/github_auth" && as.gha != nil:
as.gha.DoGitHubAuth(rw, req)
case req.URL.Path == path_prefix+"/oidc_auth" && as.oidc != nil:
as.oidc.DoOIDCAuth(rw, req)
case req.URL.Path == path_prefix+"/gitlab_auth" && as.glab != nil:
as.glab.DoGitlabAuth(rw, req)
default:
http.Error(rw, "Not found", http.StatusNotFound)
return
}
}
// https://developers.google.com/identity/sign-in/web/server-side-flow
func (as *AuthServer) doIndex(rw http.ResponseWriter, req *http.Request) {
switch {
case as.ga != nil:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
fmt.Fprint(rw, `<p><a href="/google_auth">Login with Google account</a></p>`)
case as.gha != nil:
url := as.config.Server.PathPrefix + "/github_auth"
http.Redirect(rw, req, url, 301)
case as.oidc != nil:
url := as.config.Server.PathPrefix + "/oidc_auth"
http.Redirect(rw, req, url, 301)
case as.glab != nil:
url := as.config.Server.PathPrefix + "/gitlab_auth"
http.Redirect(rw, req, url, 301)
default:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
}
}
func (as *AuthServer) doAuth(rw http.ResponseWriter, req *http.Request) {
ar, err := as.ParseRequest(req)
ares := []authzResult{}
if err != nil {
glog.Warningf("Bad request: %s", err)
http.Error(rw, fmt.Sprintf("Bad request: %s", err), http.StatusBadRequest)
return
}
glog.V(2).Infof("Auth request: %+v", ar)
{
authnResult, labels, err := as.Authenticate(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authentication failed (%s)", err), http.StatusInternalServerError)
return
}
if !authnResult {
glog.Warningf("Auth failed: %s", *ar)
rw.Header()["WWW-Authenticate"] = []string{fmt.Sprintf(`Basic realm="%s"`, as.config.Token.Issuer)}
http.Error(rw, "Auth failed.", http.StatusUnauthorized)
return
}
ar.Labels = labels
}
if len(ar.Scopes) > 0 {
ares, err = as.Authorize(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authorization failed (%s)", err), http.StatusInternalServerError)
return
}
} else {
// Authentication-only request ("docker login"), pass through.
}
token, err := as.CreateToken(ar, ares)
if err != nil {
msg := fmt.Sprintf("Failed to generate token %s", err)
http.Error(rw, msg, http.StatusInternalServerError)
glog.Errorf("%s: %s", ar, msg)
return
}
// https://www.oauth.com/oauth2-servers/access-tokens/access-token-response/
// describes that the response should have the token in `access_token`
// https://docs.docker.com/registry/spec/auth/token/#token-response-fields
// the token should also be in `token` to support older clients
result, _ := json.Marshal(&map[string]string{"access_token": token, "token": token})
glog.V(3).Infof("%s", result)
rw.Header().Set("Content-Type", "application/json")
rw.Write(result)
}
func (as *AuthServer) Stop() {
for _, an := range as.authenticators {
an.Stop()
}
for _, az := range as.authorizers {
az.Stop()
}
glog.Infof("Server stopped")
}
// Copy-pasted from libtrust where it is private.
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
} | random_line_split | |
server.go | /*
Copyright 2015 Cesanta Software Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/http"
"regexp"
"sort"
"strings"
"time"
"github.com/casbin/casbin/v2"
"github.com/cesanta/glog"
"github.com/docker/distribution/registry/auth/token"
"github.com/cesanta/docker_auth/auth_server/api"
"github.com/cesanta/docker_auth/auth_server/authn"
"github.com/cesanta/docker_auth/auth_server/authz"
)
var (
hostPortRegex = regexp.MustCompile(`^(?:\[(.+)\]:\d+|([^:]+):\d+)$`)
scopeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`)
)
type AuthServer struct {
config *Config
authenticators []api.Authenticator
authorizers []api.Authorizer
ga *authn.GoogleAuth
gha *authn.GitHubAuth
oidc *authn.OIDCAuth
glab *authn.GitlabAuth
}
func NewAuthServer(c *Config) (*AuthServer, error) {
as := &AuthServer{
config: c,
authorizers: []api.Authorizer{},
}
if c.ACL != nil {
staticAuthorizer, err := authz.NewACLAuthorizer(c.ACL)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, staticAuthorizer)
}
if c.ACLMongo != nil {
mongoAuthorizer, err := authz.NewACLMongoAuthorizer(c.ACLMongo)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, mongoAuthorizer)
}
if c.ACLXorm != nil {
xormAuthorizer, err := authz.NewACLXormAuthz(c.ACLXorm)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, xormAuthorizer)
}
if c.ExtAuthz != nil {
extAuthorizer := authz.NewExtAuthzAuthorizer(c.ExtAuthz)
as.authorizers = append(as.authorizers, extAuthorizer)
}
if c.Users != nil {
as.authenticators = append(as.authenticators, authn.NewStaticUserAuth(c.Users))
}
if c.ExtAuth != nil {
as.authenticators = append(as.authenticators, authn.NewExtAuth(c.ExtAuth))
}
if c.GoogleAuth != nil {
ga, err := authn.NewGoogleAuth(c.GoogleAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ga)
as.ga = ga
}
if c.GitHubAuth != nil {
gha, err := authn.NewGitHubAuth(c.GitHubAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, gha)
as.gha = gha
}
if c.OIDCAuth != nil {
oidc, err := authn.NewOIDCAuth(c.OIDCAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, oidc)
as.oidc = oidc
}
if c.GitlabAuth != nil {
glab, err := authn.NewGitlabAuth(c.GitlabAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, glab)
as.glab = glab
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scopeValue := range req.Form["scope"] {
for _, scopeStr := range strings.Split(scopeValue, " ") {
parts := strings.Split(scopeStr, ":")
var scope authScope
scopeType, scopeClass, err := parseScope(parts[0])
if err != nil {
return nil, err
}
switch len(parts) {
case 3:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1],
Actions: strings.Split(parts[2], ","),
}
case 4:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1] + ":" + parts[2],
Actions: strings.Split(parts[3], ","),
}
default:
return nil, fmt.Errorf("invalid scope: %q", scopeStr)
}
sort.Strings(scope.Actions)
ar.Scopes = append(ar.Scopes, scope)
}
}
}
return ar, nil
}
func (as *AuthServer) Authenticate(ar *authRequest) (bool, api.Labels, error) {
for i, a := range as.authenticators {
result, labels, err := a.Authenticate(ar.Account, ar.Password)
glog.V(2).Infof("Authn %s %s -> %t, %+v, %v", a.Name(), ar.Account, result, labels, err)
if err != nil {
if err == api.NoMatch {
continue
} else if err == api.WrongPass {
glog.Warningf("Failed authentication with %s: %s", err, ar.Account)
return false, nil, nil
}
err = fmt.Errorf("authn #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", ar, err)
return false, nil, err
}
return result, labels, nil
}
// Deny by default.
glog.Warningf("%s did not match any authn rule", ar)
return false, nil, nil
}
func (as *AuthServer) authorizeScope(ai *api.AuthRequestInfo) ([]string, error) {
for i, a := range as.authorizers {
result, err := a.Authorize(ai)
glog.V(2).Infof("Authz %s %s -> %s, %s", a.Name(), *ai, result, err)
if err != nil {
if err == api.NoMatch {
continue
}
err = fmt.Errorf("authz #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", *ai, err)
return nil, err
}
return result, nil
}
// Deny by default.
glog.Warningf("%s did not match any authz rule", *ai)
return nil, nil
}
func (as *AuthServer) Authorize(ar *authRequest) ([]authzResult, error) |
// https://github.com/docker/distribution/blob/master/docs/spec/auth/token.md#example
func (as *AuthServer) CreateToken(ar *authRequest, ares []authzResult) (string, error) {
now := time.Now().Unix()
tc := &as.config.Token
// Sign something dummy to find out which algorithm is used.
_, sigAlg, err := tc.privateKey.Sign(strings.NewReader("dummy"), 0)
if err != nil {
return "", fmt.Errorf("failed to sign: %s", err)
}
header := token.Header{
Type: "JWT",
SigningAlg: sigAlg,
KeyID: tc.publicKey.KeyID(),
}
headerJSON, err := json.Marshal(header)
if err != nil {
return "", fmt.Errorf("failed to marshal header: %s", err)
}
claims := token.ClaimSet{
Issuer: tc.Issuer,
Subject: ar.Account,
Audience: ar.Service,
NotBefore: now - 10,
IssuedAt: now,
Expiration: now + tc.Expiration,
JWTID: fmt.Sprintf("%d", rand.Int63()),
Access: []*token.ResourceActions{},
}
for _, a := range ares {
ra := &token.ResourceActions{
Type: a.scope.Type,
Name: a.scope.Name,
Actions: a.autorizedActions,
}
if ra.Actions == nil {
ra.Actions = []string{}
}
sort.Strings(ra.Actions)
claims.Access = append(claims.Access, ra)
}
claimsJSON, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("failed to marshal claims: %s", err)
}
payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON))
sig, sigAlg2, err := tc.privateKey.Sign(strings.NewReader(payload), 0)
if err != nil || sigAlg2 != sigAlg {
return "", fmt.Errorf("failed to sign token: %s", err)
}
glog.Infof("New token for %s %+v: %s", *ar, ar.Labels, claimsJSON)
return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil
}
func (as *AuthServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
glog.V(3).Infof("Request: %+v", req)
path_prefix := as.config.Server.PathPrefix
if as.config.Server.HSTS {
rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains")
}
switch {
case req.URL.Path == path_prefix+"/":
as.doIndex(rw, req)
case req.URL.Path == path_prefix+"/auth":
as.doAuth(rw, req)
case req.URL.Path == path_prefix+"/google_auth" && as.ga != nil:
as.ga.DoGoogleAuth(rw, req)
case req.URL.Path == path_prefix+"/github_auth" && as.gha != nil:
as.gha.DoGitHubAuth(rw, req)
case req.URL.Path == path_prefix+"/oidc_auth" && as.oidc != nil:
as.oidc.DoOIDCAuth(rw, req)
case req.URL.Path == path_prefix+"/gitlab_auth" && as.glab != nil:
as.glab.DoGitlabAuth(rw, req)
default:
http.Error(rw, "Not found", http.StatusNotFound)
return
}
}
// https://developers.google.com/identity/sign-in/web/server-side-flow
func (as *AuthServer) doIndex(rw http.ResponseWriter, req *http.Request) {
switch {
case as.ga != nil:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
fmt.Fprint(rw, `<p><a href="/google_auth">Login with Google account</a></p>`)
case as.gha != nil:
url := as.config.Server.PathPrefix + "/github_auth"
http.Redirect(rw, req, url, 301)
case as.oidc != nil:
url := as.config.Server.PathPrefix + "/oidc_auth"
http.Redirect(rw, req, url, 301)
case as.glab != nil:
url := as.config.Server.PathPrefix + "/gitlab_auth"
http.Redirect(rw, req, url, 301)
default:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
}
}
func (as *AuthServer) doAuth(rw http.ResponseWriter, req *http.Request) {
ar, err := as.ParseRequest(req)
ares := []authzResult{}
if err != nil {
glog.Warningf("Bad request: %s", err)
http.Error(rw, fmt.Sprintf("Bad request: %s", err), http.StatusBadRequest)
return
}
glog.V(2).Infof("Auth request: %+v", ar)
{
authnResult, labels, err := as.Authenticate(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authentication failed (%s)", err), http.StatusInternalServerError)
return
}
if !authnResult {
glog.Warningf("Auth failed: %s", *ar)
rw.Header()["WWW-Authenticate"] = []string{fmt.Sprintf(`Basic realm="%s"`, as.config.Token.Issuer)}
http.Error(rw, "Auth failed.", http.StatusUnauthorized)
return
}
ar.Labels = labels
}
if len(ar.Scopes) > 0 {
ares, err = as.Authorize(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authorization failed (%s)", err), http.StatusInternalServerError)
return
}
} else {
// Authentication-only request ("docker login"), pass through.
}
token, err := as.CreateToken(ar, ares)
if err != nil {
msg := fmt.Sprintf("Failed to generate token %s", err)
http.Error(rw, msg, http.StatusInternalServerError)
glog.Errorf("%s: %s", ar, msg)
return
}
// https://www.oauth.com/oauth2-servers/access-tokens/access-token-response/
// describes that the response should have the token in `access_token`
// https://docs.docker.com/registry/spec/auth/token/#token-response-fields
// the token should also be in `token` to support older clients
result, _ := json.Marshal(&map[string]string{"access_token": token, "token": token})
glog.V(3).Infof("%s", result)
rw.Header().Set("Content-Type", "application/json")
rw.Write(result)
}
func (as *AuthServer) Stop() {
for _, an := range as.authenticators {
an.Stop()
}
for _, az := range as.authorizers {
az.Stop()
}
glog.Infof("Server stopped")
}
// Copy-pasted from libtrust where it is private.
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
| {
ares := []authzResult{}
for _, scope := range ar.Scopes {
ai := &api.AuthRequestInfo{
Account: ar.Account,
Type: scope.Type,
Name: scope.Name,
Service: ar.Service,
IP: ar.RemoteIP,
Actions: scope.Actions,
Labels: ar.Labels,
}
actions, err := as.authorizeScope(ai)
if err != nil {
return nil, err
}
ares = append(ares, authzResult{scope: scope, autorizedActions: actions})
}
return ares, nil
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.