text stringlengths 1 1.05M |
|---|
#/bin/sh
rm build/static/js/main.*
rm build/static/css/main.*
react-scripts build |
<gh_stars>1-10
const { promisify } = require('util');
const execAsync = promisify(require('child_process').exec);
const {
parseJsonFile,
writeJsonFile,
} = require('./utils');
const [
CHANGES_LIST_FILENAME = 'changes.json',
TEMPLATE_OUTPUT_FILENAME = 'dynamic-template.json',
] = process.argv.slice(2);
const {
BUILDKITE_COMMIT,
} = process.env;
generateDynamicPipeline(BUILDKITE_COMMIT, CHANGES_LIST_FILENAME, TEMPLATE_OUTPUT_FILENAME);
async function generateDynamicPipeline(commit, changesFilename, outputFilename) {
try {
const changedServices = await parseJsonFile(changesFilename);
if (!changedServices.length) {
console.log('no changed services!');
return;
}
const generatedTemplate = await buildPipelineSteps(commit, changedServices);
await writeJsonFile(outputFilename, generatedTemplate);
await uploadPipelineToBuildkite(outputFilename);
} catch (error) {
console.error(error);
process.exitCode = 1;
}
}
async function readStepsForChangedServices(services) {
// TODO: break this method down further
const COMMON_PHASES = ['build', 'deploy-staging', 'deploy-prod'];
const readServiceStepsForPhase = async (phase) => await Promise.all(
services.map(service => parseJsonFile(`${service}/.buildkite/${phase}.json`))
);
const [build, deployStaging, deployProd] = await Promise.all(
COMMON_PHASES.map(readServiceStepsForPhase).filter(Boolean)
);
const flattenSteps = (phase) => phase.reduce((acc, { steps = [] }) => acc.concat(steps), []);
return {
build: flattenSteps(build),
deployStaging: flattenSteps(deployStaging),
deployProd: flattenSteps(deployProd),
};
}
async function buildPipelineSteps(commit, services = []) {
const DEFAULT_TEMPLATE = {env: {}, steps: []};
const { build, deployStaging, deployProd } = await readStepsForChangedServices(services);
if (!services.length) { return DEFAULT_TEMPLATE }
const steps = build
.concat(
{
type: "waiter"
},
deployStaging,
{
type: "waiter"
},
{
"type": "script",
"name": "e2e-staging :pray:",
"command": "echo 'e2e'",
},
{
block: "Release :red_button: :dragon:"
},
deployProd,
{
type: "waiter"
},
{
"type": "script",
"name": "e2e-prod :pray:",
"command": "echo 'e2e'",
}
);
return Object.assign(
DEFAULT_TEMPLATE,
{ steps },
);
}
function uploadPipelineToBuildkite(filePath) {
return execAsync(`cat ${filePath} | buildkite-agent pipeline upload`);
}
|
public static int calculateEvenSum(int[] arr) {
int sum = 0;
for (int num : arr) {
if (num % 2 == 0) {
sum += num;
}
}
return sum;
} |
<gh_stars>0
/* Driver for the APA102 serial controllable LED */
#include <libopencm3/stm32/rcc.h>
#include <libopencm3/stm32/gpio.h>
#include <libopencm3/stm32/usart.h>
#include "vf-badge.h"
#define APA102_PORT GPIOA
#define APA102_PIN_VREG GPIO0
#define APA102_PIN_TX GPIO2
#define APA102_PIN_CLK GPIO4
/* Synchronous USART is not available on the TSSOP-20 package. */
//#define APA102_USART_RCC RCC_USART2
//#define APA102_USART USART2
/* Color calibration constants */
#define CAL_RED (FIXED_ONE / 3)
#define CAL_GREEN (FIXED_ONE / 4)
#define CAL_BLUE (FIXED_ONE)
void
apa102_setup(void)
{
/* Enable the 5V regulator */
gpio_mode_setup(APA102_PORT, GPIO_MODE_OUTPUT, GPIO_PUPD_NONE, APA102_PIN_VREG);
gpio_set(APA102_PORT, APA102_PIN_VREG);
#ifdef APA102_USART
/* Enable clocks for the USART. */
rcc_periph_clock_enable(APA102_USART_RCC);
/* Setup GPIO pins for USART control */
gpio_mode_setup(APA102_PORT, GPIO_MODE_AF, GPIO_PUPD_NONE, APA102_PIN_TX | APA102_PIN_CLK);
gpio_set_af(APA102_PORT, GPIO_AF4, APA102_PIN_TX | APA102_PIN_CLK);
/* Setup the USART for synchronous transmit */
usart_set_baudrate(APA102_USART, 100000);
USART_CR2(APA102_USART) = USART_CR2_MSBFIRST | USART_CR2_LBCL | USART_CR2_CLKEN | USART_CR2_CPOL | USART_CR2_CPHA;
USART_CR1(APA102_USART) = USART_CR1_TE | USART_CR1_UE;
#else
/* Setup the clock and data pins as GPIO outputs for bitbashing the APA102. */
gpio_mode_setup(APA102_PORT, GPIO_MODE_OUTPUT, GPIO_PUPD_NONE, APA102_PIN_TX | APA102_PIN_CLK);
gpio_set(APA102_PORT, APA102_PIN_TX | APA102_PIN_CLK);
#endif
} /* apa102_setup */
#define APA102_NOPS() __asm volatile("nop")
#define APA102_BITBANG(_bit_) \
do { GPIO_BSRR(APA102_PORT) = (APA102_PIN_CLK << 16) | (APA102_PIN_TX << ((_bit_) ? 0 : 16)); \
APA102_NOPS();\
GPIO_BSRR(APA102_PORT) = (APA102_PIN_CLK << 0); \
} while(0)
static void
apa102_write(const uint8_t *data, unsigned int len)
{
#ifdef APA102_USART
while (len--) {
usart_send_blocking(APA102_USART, *data++);
} /* while */
#else
while (len--) {
uint8_t byte = *data++;
APA102_BITBANG(byte & 0x80);
APA102_BITBANG(byte & 0x40);
APA102_BITBANG(byte & 0x20);
APA102_BITBANG(byte & 0x10);
APA102_BITBANG(byte & 0x08);
APA102_BITBANG(byte & 0x04);
APA102_BITBANG(byte & 0x02);
APA102_BITBANG(byte & 0x01);
} /* while */
GPIO_BSRR(APA102_PORT) = (APA102_PIN_TX << 16);
#endif
} /* apa102_write */
void
apa102_write_rgb(uint8_t red, uint8_t green, uint8_t blue, uint8_t value)
{
uint8_t data[] = {
0x00, 0x00, 0x00, 0x00, 0x00, /* start frame */
(value >> 3) | 0xE0, /* pixel brightness */
#ifdef CAL_BLUE
FIXED_MUL(blue, CAL_BLUE),
#else
blue,
#endif
#ifdef CAL_GREEN
FIXED_MUL(green, CAL_GREEN),
#else
green,
#endif
#ifdef CAL_RED
FIXED_MUL(red, CAL_RED),
#else
red,
#endif
0xff, /* end frame. */
};
if (value) {
gpio_set(APA102_PORT, APA102_PIN_VREG);
} else {
gpio_clear(APA102_PORT, APA102_PIN_VREG);
}
apa102_write(data, sizeof(data));
} /* apa102_write_rgb */
void
apa102_write_hsv(unsigned int hue, uint8_t saturation, uint8_t value)
{
/* Get sector and fraction of sector. */
unsigned int sector = hue / 60;
unsigned int fraction = (hue % 60) * (FIXED_ONE / 60);
unsigned int p, q, t;
unsigned int red, blue, green;
/* Math stuff */
p = FIXED_ONE - saturation;
q = FIXED_ONE - FIXED_MUL(saturation, fraction);
//t = FIXED_ONE - FIXED_MUL(saturation, (FIXED_ONE - fraction));
t = FIXED_ONE + FIXED_ONE - saturation - q;
/* Color sector determines the final result. */
if (sector >= 6) {
sector %= 6;
}
switch (sector) {
case 0:
red = FIXED_MAX;
green = t;
blue = p;
break;
case 1:
red = q;
green = FIXED_MAX;
blue = p;
break;
case 2:
red = p;
green = FIXED_MAX;
blue = t;
break;
case 3:
red = p;
green = q;
blue = FIXED_MAX;
break;
case 4:
red = t;
green = p;
blue = FIXED_MAX;
break;
case 5:
default:
red = FIXED_MAX;
green = p;
blue = q;
break;
} /* switch */
/* Output the RGB value to the LED. */
if (red > FIXED_MAX) red = FIXED_MAX;
if (green > FIXED_MAX) green = FIXED_MAX;
if (blue > FIXED_MAX) blue = FIXED_MAX;
apa102_write_rgb(red, green, blue, value);
} /* apa102_write_hsv */
|
#!/bin/bash
ulimit -n 65535
JMX_PORT=18501
GC_LOG=./logs/gc.log
#jvm config
JAVA_BASE_OPTS=" -Djava.awt.headless=true -Dfile.encoding=UTF-8 "
JAVA_JMX_OPTS=" -Dcom.sun.management.jmxremote \
-Dcom.sun.management.jmxremote.port=$JMX_PORT \
-Dcom.sun.management.jmxremote.ssl=false \
-Dcom.sun.management.jmxremote.authenticate=false "
JAVA_MEM_OPTS=" -server -Xms2g -Xmx2g -Xmn600m -XX:PermSize=128m \
-XX:MaxPermSize=128m -Xss256K -XX:+DisableExplicitGC \
-XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled \
-XX:+UseCMSCompactAtFullCollection -XX:LargePageSizeInBytes=128m \
-XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly \
-XX:CMSInitiatingOccupancyFraction=70 "
JAVA_GC_OPTS=" -verbose:gc -Xloggc:$GC_LOG \
-XX:+PrintGCDetails -XX:+PrintGCDateStamps "
JAVA_CP=" -cp conf:lib/* "
JAVA_OPTS=" $JAVA_BASE_OPTS $JAVA_MEM_OPTS $JAVA_JMX_OPTS $JAVA_GC_OPTS $JAVA_CP"
RUNJAVA="$JAVA_HOME/bin/java"
$RUNJAVA $JAVA_OPTS $JAVA_CP com.github.wenweihu86.distmq.broker.BrokerMain
|
#!/bin/sh
set -e
cd "$(dirname "$0")/.."
MISSING_UTILS=()
if ! (which svgo >/dev/null); then
echo 'svgo not found in $PATH' >&2
MISSING_UTILS+=( svgo )
fi
if ! (which pngcrush >/dev/null); then
echo 'pngcrush not found in $PATH' >&2
MISSING_UTILS+=( pngcrush )
fi
if ! (which convert >/dev/null); then
echo 'convert not found in $PATH' >&2
MISSING_UTILS+=( imagemagick )
fi
if ! [ -z $MISSING_UTILS ]; then
if [[ "$(uname)" = *Darwin* ]]; then
echo 'try `brew install '"${MISSING_UTILS[@]}"'` on mac'
fi
exit 1
fi
pushd res >/dev/null
# crunch /docs/res/*.svg
for f in *.svg; do
svgo --multipass -q "$f" &
done
# crunch /docs/res/icons/*.svg
for f in icons/*.svg; do
svgo --multipass -q "$f" &
done
# crunch /docs/res/*.png
for f in *.png; do
TMPNAME=.$f.tmp
(pngcrush -q "$f" "$TMPNAME" && mv -f "$TMPNAME" "$f") &
done
popd >/dev/null
pushd samples/img >/dev/null
# crunch /docs/samples/img/*.png
for f in *.png; do
TMPNAME=.$f.tmp
if (echo "$f" | grep -q 'thumb'); then
(convert "$f" -flatten -background white -colors 16 "$TMPNAME" && pngcrush -q "$TMPNAME" "$f") &
else
(pngcrush -q "$f" "$TMPNAME" && mv -f "$TMPNAME" "$f") &
fi
done
popd >/dev/null
pushd samples/icons >/dev/null
# crunch /docs/samples/icons/*.svg
for f in *.svg; do
svgo --multipass -q "$f" &
done
popd >/dev/null
# wait for all background processes to exit
wait
|
#include <iostream>
#include <memory>
#include <random>
#include <string>
#include <Eigen/Sparse>
#include "GeometricMultigridOperators.h"
#include "GeometricMultigridPoissonSolver.h"
#include "InitialMultigridTestDomains.h"
#include "Renderer.h"
#include "ScalarGrid.h"
#include "Transform.h"
#include "UniformGrid.h"
#include "Utilities.h"
using namespace FluidSim2D::RenderTools;
using namespace FluidSim2D::SimTools;
std::unique_ptr<Renderer> renderer;
static constexpr int gridSize = 512;
static constexpr bool useComplexDomain = true;
static constexpr bool useSolidSphere = true;
int main(int argc, char** argv)
{
using namespace GeometricMultigridOperators;
using StoreReal = double;
using SolveReal = double;
using Vector = std::conditional<std::is_same<SolveReal, float>::value, Eigen::VectorXf, Eigen::VectorXd>::type;
UniformGrid<CellLabels> domainCellLabels;
VectorGrid<StoreReal> boundaryWeights;
int mgLevels;
{
UniformGrid<CellLabels> baseDomainCellLabels;
VectorGrid<StoreReal> baseBoundaryWeights;
// Complex domain set up
if (useComplexDomain)
buildComplexDomain(baseDomainCellLabels,
baseBoundaryWeights,
gridSize,
useSolidSphere);
// Simple domain set up
else
buildSimpleDomain(baseDomainCellLabels,
baseBoundaryWeights,
gridSize,
1 /*dirichlet band*/);
// Build expanded domain
std::pair<Vec2i, int> mgSettings = buildExpandedDomain(domainCellLabels, boundaryWeights, baseDomainCellLabels, baseBoundaryWeights);
mgLevels = mgSettings.second;
}
SolveReal dx = boundaryWeights.dx();
UniformGrid<StoreReal> rhsA(domainCellLabels.size(), 0);
UniformGrid<StoreReal> rhsB(domainCellLabels.size(), 0);
tbb::parallel_for(tbb::blocked_range<int>(0, domainCellLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int> &range)
{
std::default_random_engine generator;
std::uniform_real_distribution<StoreReal> distribution(0, 1);
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = domainCellLabels.unflatten(cellIndex);
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
{
rhsA(cell) = distribution(generator);
rhsB(cell) = distribution(generator);
}
}
});
Transform xform(dx, Vec2f(0));
std::cout.precision(10);
{
UniformGrid<StoreReal> solutionA(domainCellLabels.size(), 0);
UniformGrid<StoreReal> solutionB(domainCellLabels.size(), 0);
std::vector<Vec2i> boundaryCells = buildBoundaryCells(domainCellLabels, 3);
// Test Jacobi symmetry
boundaryJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, boundaryCells, dx, &boundaryWeights);
boundaryJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, boundaryCells, dx, &boundaryWeights);
interiorJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, dx, &boundaryWeights);
interiorJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, dx, &boundaryWeights);
boundaryJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, boundaryCells, dx, &boundaryWeights);
boundaryJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, boundaryCells, dx, &boundaryWeights);
SolveReal dotA = dotProduct<SolveReal>(solutionA, rhsB, domainCellLabels);
SolveReal dotB = dotProduct<SolveReal>(solutionB, rhsA, domainCellLabels);
std::cout << "Jacobi smoother symmetry test: " << dotA << ", " << dotB << std::endl;
assert(fabs(dotA - dotB) / fabs(std::max(dotA, dotB)) < 1E-10);
}
{
// Test direct solve symmetry
Eigen::SimplicialCholesky<Eigen::SparseMatrix<SolveReal>> myCoarseSolver;
Eigen::SparseMatrix<SolveReal> sparseMatrix;
// Pre-build matrix at the coarsest level
int interiorCellCount = 0;
UniformGrid<int> directSolverIndices(domainCellLabels.size(), -1);
{
forEachVoxelRange(Vec2i(0), domainCellLabels.size(), [&](const Vec2i &cell)
{
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
directSolverIndices(cell) = interiorCellCount++;
});
// Build rows
std::vector<Eigen::Triplet<SolveReal>> sparseElements;
SolveReal gridScale = 1. / sqr(dx);
forEachVoxelRange(Vec2i(0), domainCellLabels.size(), [&](const Vec2i &cell)
{
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
for (int axis : {0, 1})
for (int direction : {0, 1})
{
Vec2i adjacentCell = cellToCell(cell, axis, direction);
assert(domainCellLabels(adjacentCell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(adjacentCell) == CellLabels::BOUNDARY_CELL);
Vec2i face = cellToFace(cell, axis, direction);
assert(boundaryWeights(face, axis) == 1);
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex >= 0);
sparseElements.emplace_back(index, adjacentIndex, -gridScale);
}
sparseElements.emplace_back(index, index, 4. * gridScale);
}
else if (domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
SolveReal diagonal = 0;
for (int axis : {0, 1})
for (int direction : {0, 1})
{
Vec2i adjacentCell = cellToCell(cell, axis, direction);
if (domainCellLabels(adjacentCell) == CellLabels::INTERIOR_CELL)
{
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex >= 0);
Vec2i face = cellToFace(cell, axis, direction);
assert(boundaryWeights(face, axis) == 1);
sparseElements.emplace_back(index, adjacentIndex, -gridScale);
++diagonal;
}
else if (domainCellLabels(adjacentCell) == CellLabels::BOUNDARY_CELL)
{
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex >= 0);
Vec2i face = cellToFace(cell, axis, direction);
SolveReal weight = boundaryWeights(face, axis);
sparseElements.emplace_back(index, adjacentIndex, -gridScale * weight);
diagonal += weight;
}
else if (domainCellLabels(adjacentCell) == CellLabels::DIRICHLET_CELL)
{
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex == -1);
Vec2i face = cellToFace(cell, axis, direction);
SolveReal weight = boundaryWeights(face, axis);
diagonal += weight;
}
else
{
assert(domainCellLabels(adjacentCell) == CellLabels::EXTERIOR_CELL);
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex == -1);
Vec2i face = cellToFace(cell, axis, direction);
assert(boundaryWeights(face, axis) == 0);
}
}
sparseElements.emplace_back(index, index, gridScale * diagonal);
}
});
// Solve system
sparseMatrix = Eigen::SparseMatrix<SolveReal>(interiorCellCount, interiorCellCount);
sparseMatrix.setFromTriplets(sparseElements.begin(), sparseElements.end());
sparseMatrix.makeCompressed();
myCoarseSolver.compute(sparseMatrix);
assert(myCoarseSolver.info() == Eigen::Success);
}
UniformGrid<StoreReal> solutionA(domainCellLabels.size(), 0);
{
Vector coarseRHSVector = Vector::Zero(interiorCellCount);
// Copy to Eigen and direct solve
tbb::parallel_for(tbb::blocked_range<int>(0, domainCellLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = domainCellLabels.unflatten(cellIndex);
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
coarseRHSVector(index) = rhsA(cell);
}
}
});
Vector directSolution = myCoarseSolver.solve(coarseRHSVector);
// Copy solution back
tbb::parallel_for(tbb::blocked_range<int>(0, domainCellLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = domainCellLabels.unflatten(cellIndex);
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
solutionA(cell) = directSolution(index);
}
}
});
}
UniformGrid<StoreReal> solutionB(domainCellLabels.size(), 0);
{
Vector coarseRHSVector = Vector::Zero(interiorCellCount);
// Copy to Eigen and direct solve
tbb::parallel_for(tbb::blocked_range<int>(0, domainCellLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = domainCellLabels.unflatten(cellIndex);
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
coarseRHSVector(index) = rhsB(cell);
}
}
});
Vector directSolution = myCoarseSolver.solve(coarseRHSVector);
// Copy solution back
tbb::parallel_for(tbb::blocked_range<int>(0, domainCellLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = domainCellLabels.unflatten(cellIndex);
if (domainCellLabels(cell) == CellLabels::INTERIOR_CELL ||
domainCellLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
solutionB(cell) = directSolution(index);
}
}
});
}
// Compute dot products
SolveReal dotA = dotProduct<SolveReal>(solutionA, rhsB, domainCellLabels);
SolveReal dotB = dotProduct<SolveReal>(solutionB, rhsA, domainCellLabels);
std::cout << "Direct solver symmetry test: " << dotA << ", " << dotB << std::endl;
assert(fabs(dotA - dotB) / fabs(std::max(dotA, dotB)) < 1E-10);
}
{
// Test down and up sampling
UniformGrid<CellLabels> coarseDomainLabels = buildCoarseCellLabels(domainCellLabels);
assert(unitTestBoundaryCells<StoreReal>(coarseDomainLabels) && unitTestBoundaryCells<StoreReal>(domainCellLabels, &boundaryWeights));
assert(unitTestExteriorCells(coarseDomainLabels) && unitTestExteriorCells(domainCellLabels));
assert(unitTestCoarsening(coarseDomainLabels, domainCellLabels));
UniformGrid<StoreReal> coarseRhs(coarseDomainLabels.size(), 0);
UniformGrid<StoreReal> solutionA(domainCellLabels.size(), 0);
{
downsample<SolveReal>(coarseRhs, rhsA, coarseDomainLabels, domainCellLabels);
upsampleAndAdd<SolveReal>(solutionA, coarseRhs, domainCellLabels, coarseDomainLabels);
}
UniformGrid<StoreReal> solutionB(domainCellLabels.size(), 0);
{
downsample<SolveReal>(coarseRhs, rhsB, coarseDomainLabels, domainCellLabels);
upsampleAndAdd<SolveReal>(solutionB, coarseRhs, domainCellLabels, coarseDomainLabels);
}
// Compute dot products
SolveReal dotA = dotProduct<SolveReal>(solutionA, rhsB, domainCellLabels);
SolveReal dotB = dotProduct<SolveReal>(solutionB, rhsA, domainCellLabels);
std::cout << "Coarse transfer symmetry test: " << dotA << ", " << dotB << std::endl;
assert(fabs(dotA - dotB) / fabs(std::max(dotA, dotB)) < 1E-10);
}
{
// Test single level correction
UniformGrid<CellLabels> coarseDomainLabels = buildCoarseCellLabels(domainCellLabels);
assert(unitTestBoundaryCells<StoreReal>(coarseDomainLabels) && unitTestBoundaryCells<StoreReal>(domainCellLabels, &boundaryWeights));
assert(unitTestExteriorCells(coarseDomainLabels) && unitTestExteriorCells(domainCellLabels));
assert(unitTestCoarsening(coarseDomainLabels, domainCellLabels));
Eigen::SimplicialCholesky<Eigen::SparseMatrix<SolveReal>> myCoarseSolver;
Eigen::SparseMatrix<SolveReal> sparseMatrix;
// Pre-build matrix at the coarsest level
int interiorCellCount = 0;
UniformGrid<int> directSolverIndices(coarseDomainLabels.size(), -1);
{
forEachVoxelRange(Vec2i(0), coarseDomainLabels.size(), [&](const Vec2i &cell)
{
if (coarseDomainLabels(cell) == CellLabels::INTERIOR_CELL ||
coarseDomainLabels(cell) == CellLabels::BOUNDARY_CELL)
directSolverIndices(cell) = interiorCellCount++;
});
// Build rows
std::vector<Eigen::Triplet<SolveReal>> sparseElements;
SolveReal gridScale = 1. / sqr(2. * dx);
forEachVoxelRange(Vec2i(0), coarseDomainLabels.size(), [&](const Vec2i &cell)
{
if (coarseDomainLabels(cell) == CellLabels::INTERIOR_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
for (int axis : {0, 1})
for (int direction : {0, 1})
{
Vec2i adjacentCell = cellToCell(cell, axis, direction);
auto adjacentLabels = coarseDomainLabels(adjacentCell);
assert(adjacentLabels == CellLabels::INTERIOR_CELL ||
adjacentLabels == CellLabels::BOUNDARY_CELL);
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex >= 0);
sparseElements.emplace_back(index, adjacentIndex, -gridScale);
}
sparseElements.emplace_back(index, index, 4. * gridScale);
}
else if (coarseDomainLabels(cell) == CellLabels::BOUNDARY_CELL)
{
SolveReal diagonal = 0;
int index = directSolverIndices(cell);
assert(index >= 0);
for (int axis : {0, 1})
for (int direction : {0, 1})
{
Vec2i adjacentCell = cellToCell(cell, axis, direction);
auto cellLabels = coarseDomainLabels(adjacentCell);
if (cellLabels == CellLabels::INTERIOR_CELL ||
cellLabels == CellLabels::BOUNDARY_CELL)
{
int adjacentIndex = directSolverIndices(adjacentCell);
assert(adjacentIndex >= 0);
sparseElements.emplace_back(index, adjacentIndex, -gridScale);
++diagonal;
}
else if (cellLabels == CellLabels::DIRICHLET_CELL)
++diagonal;
}
sparseElements.emplace_back(index, index, diagonal * gridScale);
}
});
sparseMatrix = Eigen::SparseMatrix<SolveReal>(interiorCellCount, interiorCellCount);
sparseMatrix.setFromTriplets(sparseElements.begin(), sparseElements.end());
sparseMatrix.makeCompressed();
myCoarseSolver.compute(sparseMatrix);
assert(myCoarseSolver.info() == Eigen::Success);
}
// Transfer rhs to coarse rhs as if it was a residual with a zero initial guess
UniformGrid<StoreReal> solutionA(domainCellLabels.size(), 0);
{
// Pre-smooth to get an initial guess
std::vector<Vec2i> boundaryCells = buildBoundaryCells(domainCellLabels, 3);
// Test Jacobi symmetry
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, boundaryCells, dx, &boundaryWeights);
interiorJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, dx, &boundaryWeights);
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, boundaryCells, dx, &boundaryWeights);
// Compute new residual
UniformGrid<StoreReal> residualA(domainCellLabels.size(), 0);
computePoissonResidual<SolveReal>(residualA, solutionA, rhsA, domainCellLabels, dx, &boundaryWeights);
UniformGrid<StoreReal> coarseRhs(coarseDomainLabels.size(), 0);
downsample<SolveReal>(coarseRhs, residualA, coarseDomainLabels, domainCellLabels);
Vector coarseRHSVector = Vector::Zero(interiorCellCount);
// Copy to Eigen and direct solve
tbb::parallel_for(tbb::blocked_range<int>(0, coarseDomainLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = coarseDomainLabels.unflatten(cellIndex);
if (coarseDomainLabels(cell) == CellLabels::INTERIOR_CELL ||
coarseDomainLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
coarseRHSVector(index) = coarseRhs(cell);
}
}
});
UniformGrid<StoreReal> coarseSolution(coarseDomainLabels.size(), 0);
Vector directSolution = myCoarseSolver.solve(coarseRHSVector);
// Copy solution back
tbb::parallel_for(tbb::blocked_range<int>(0, coarseDomainLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = coarseDomainLabels.unflatten(cellIndex);
if (coarseDomainLabels(cell) == CellLabels::INTERIOR_CELL ||
coarseDomainLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
coarseSolution(cell) = directSolution(index);
}
}
});
upsampleAndAdd<SolveReal>(solutionA, coarseSolution, domainCellLabels, coarseDomainLabels);
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, boundaryCells, dx, &boundaryWeights);
interiorJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, dx, &boundaryWeights);
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionA, rhsA, domainCellLabels, boundaryCells, dx, &boundaryWeights);
}
UniformGrid<StoreReal> solutionB(domainCellLabels.size(), 0);
{
// Pre-smooth to get an initial guess
std::vector<Vec2i> boundaryCells = buildBoundaryCells(domainCellLabels, 3);
// Test Jacobi symmetry
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, boundaryCells, dx, &boundaryWeights);
interiorJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, dx, &boundaryWeights);
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, boundaryCells, dx, &boundaryWeights);
// Compute new residual
UniformGrid<StoreReal> residualB(domainCellLabels.size(), 0);
computePoissonResidual<SolveReal>(residualB, solutionB, rhsB, domainCellLabels, dx, &boundaryWeights);
UniformGrid<StoreReal> coarseRhs(coarseDomainLabels.size(), 0);
downsample<SolveReal>(coarseRhs, residualB, coarseDomainLabels, domainCellLabels);
Vector coarseRHSVector = Vector::Zero(interiorCellCount);
// Copy to Eigen and direct solve
tbb::parallel_for(tbb::blocked_range<int>(0, coarseDomainLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = coarseDomainLabels.unflatten(cellIndex);
if (coarseDomainLabels(cell) == CellLabels::INTERIOR_CELL ||
coarseDomainLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
coarseRHSVector(index) = coarseRhs(cell);
}
}
});
UniformGrid<StoreReal> coarseSolution(coarseDomainLabels.size(), 0);
Vector directSolution = myCoarseSolver.solve(coarseRHSVector);
// Copy solution back
tbb::parallel_for(tbb::blocked_range<int>(0, coarseDomainLabels.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = coarseDomainLabels.unflatten(cellIndex);
if (coarseDomainLabels(cell) == CellLabels::INTERIOR_CELL ||
coarseDomainLabels(cell) == CellLabels::BOUNDARY_CELL)
{
int index = directSolverIndices(cell);
assert(index >= 0);
coarseSolution(cell) = directSolution(index);
}
}
});
upsampleAndAdd<SolveReal>(solutionB, coarseSolution, domainCellLabels, coarseDomainLabels);
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, boundaryCells, dx, &boundaryWeights);
interiorJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, dx, &boundaryWeights);
for (int iteration = 0; iteration < 3; ++iteration)
boundaryJacobiPoissonSmoother<SolveReal>(solutionB, rhsB, domainCellLabels, boundaryCells, dx, &boundaryWeights);
}
SolveReal dotA = dotProduct<SolveReal>(solutionA, rhsB, domainCellLabels);
SolveReal dotB = dotProduct<SolveReal>(solutionB, rhsA, domainCellLabels);
std::cout << "One level correction symmetry: " << dotA << ", " << dotB << std::endl;
assert(fabs(dotA - dotB) / fabs(std::max(dotA, dotB)) < 1E-10);
}
{
// Pre-build multigrid preconditioner
GeometricMultigridPoissonSolver mgSolver(domainCellLabels, boundaryWeights, mgLevels, dx);
UniformGrid<StoreReal> solutionA(domainCellLabels.size(), 0);
mgSolver.applyMGVCycle(solutionA, rhsA);
mgSolver.applyMGVCycle(solutionA, rhsA, true);
mgSolver.applyMGVCycle(solutionA, rhsA, true);
mgSolver.applyMGVCycle(solutionA, rhsA, true);
UniformGrid<StoreReal> solutionB(domainCellLabels.size(), 0);
mgSolver.applyMGVCycle(solutionB, rhsB);
mgSolver.applyMGVCycle(solutionB, rhsB, true);
mgSolver.applyMGVCycle(solutionB, rhsB, true);
mgSolver.applyMGVCycle(solutionB, rhsB, true);
SolveReal dotA = dotProduct<SolveReal>(solutionA, rhsB, domainCellLabels);
SolveReal dotB = dotProduct<SolveReal>(solutionB, rhsA, domainCellLabels);
std::cout << "4 v-cycle symmetry: " << dotA << ", " << dotB << std::endl;
assert(fabs(dotA - dotB) / fabs(std::max(dotA, dotB)) < 1E-10);
}
// Print domain labels to make sure they are set up correctly
int pixelHeight = 1080;
int pixelWidth = pixelHeight;
renderer = std::make_unique<Renderer>("MG Symmetry Test", Vec2i(pixelWidth, pixelHeight), Vec2f(0), 1, &argc, argv);
ScalarGrid<float> tempGrid(Transform(dx, Vec2f(0)), domainCellLabels.size());
tbb::parallel_for(tbb::blocked_range<int>(0, tempGrid.voxelCount(), tbbLightGrainSize), [&](const tbb::blocked_range<int>& range)
{
for (int cellIndex = range.begin(); cellIndex != range.end(); ++cellIndex)
{
Vec2i cell = tempGrid.unflatten(cellIndex);
tempGrid(cell) = float(domainCellLabels(cell));
}
});
tempGrid.drawVolumetric(*renderer, Vec3f(0), Vec3f(1), float(CellLabels::INTERIOR_CELL), float(CellLabels::BOUNDARY_CELL));
renderer->run();
} |
<filename>torchmdnet/module.py
import torch
from torch.optim import AdamW
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.nn.functional import mse_loss, l1_loss
from pytorch_lightning import LightningModule
from torchmdnet.models.model import create_model, load_model
class LNNP(LightningModule):
def __init__(self, hparams, prior_model=None, mean=None, std=None):
super(LNNP, self).__init__()
self.save_hyperparameters(hparams)
if self.hparams.load_model:
self.model = load_model(self.hparams.load_model, args=self.hparams)
else:
self.model = create_model(self.hparams, prior_model, mean, std)
# initialize exponential smoothing
self.ema = None
self._reset_ema_dict()
# initialize loss collection
self.losses = None
self._reset_losses_dict()
def configure_optimizers(self):
optimizer = AdamW(
self.model.parameters(),
lr=self.hparams.lr,
weight_decay=self.hparams.weight_decay,
)
scheduler = ReduceLROnPlateau(
optimizer,
"min",
factor=self.hparams.lr_factor,
patience=self.hparams.lr_patience,
min_lr=self.hparams.lr_min,
)
lr_scheduler = {
"scheduler": scheduler,
"monitor": "val_loss",
"interval": "epoch",
"frequency": 1,
}
return [optimizer], [lr_scheduler]
def forward(self, z, pos, batch=None):
return self.model(z, pos, batch=batch)
def training_step(self, batch, batch_idx):
return self.step(batch, mse_loss, "train")
def validation_step(self, batch, batch_idx, *args):
if len(args) == 0 or (len(args) > 0 and args[0] == 0):
# validation step
return self.step(batch, mse_loss, "val")
# test step
return self.step(batch, l1_loss, "test")
def test_step(self, batch, batch_idx):
return self.step(batch, l1_loss, "test")
def step(self, batch, loss_fn, stage):
with torch.set_grad_enabled(stage == "train" or self.hparams.derivative):
# TODO: the model doesn't necessarily need to return a derivative once
# Union typing works under TorchScript (https://github.com/pytorch/pytorch/pull/53180)
pred, deriv = self(batch.z, batch.pos, batch.batch)
loss_y, loss_dy = 0, 0
if self.hparams.derivative:
if "y" not in batch:
# "use" both outputs of the model's forward function but discard the first
# to only use the derivative and avoid 'Expected to have finished reduction
# in the prior iteration before starting a new one.', which otherwise get's
# thrown because of setting 'find_unused_parameters=False' in the DDPPlugin
deriv = deriv + pred.sum() * 0
# force/derivative loss
loss_dy = loss_fn(deriv, batch.dy)
if stage in ["train", "val"] and self.hparams.ema_alpha_dy < 1:
if self.ema[stage + "_dy"] is None:
self.ema[stage + "_dy"] = loss_dy.detach()
# apply exponential smoothing over batches to dy
loss_dy = (
self.hparams.ema_alpha_dy * loss_dy
+ (1 - self.hparams.ema_alpha_dy) * self.ema[stage + "_dy"]
)
self.ema[stage + "_dy"] = loss_dy.detach()
if self.hparams.force_weight > 0:
self.losses[stage + "_dy"].append(loss_dy.detach())
if "y" in batch:
if batch.y.ndim == 1:
batch.y = batch.y.unsqueeze(1)
# energy/prediction loss
loss_y = loss_fn(pred, batch.y)
if stage in ["train", "val"] and self.hparams.ema_alpha_y < 1:
if self.ema[stage + "_y"] is None:
self.ema[stage + "_y"] = loss_y.detach()
# apply exponential smoothing over batches to y
loss_y = (
self.hparams.ema_alpha_y * loss_y
+ (1 - self.hparams.ema_alpha_y) * self.ema[stage + "_y"]
)
self.ema[stage + "_y"] = loss_y.detach()
if self.hparams.energy_weight > 0:
self.losses[stage + "_y"].append(loss_y.detach())
# total loss
loss = loss_y * self.hparams.energy_weight + loss_dy * self.hparams.force_weight
self.losses[stage].append(loss.detach())
return loss
def optimizer_step(self, *args, **kwargs):
optimizer = kwargs["optimizer"] if "optimizer" in kwargs else args[2]
if self.trainer.global_step < self.hparams.lr_warmup_steps:
lr_scale = min(
1.0,
float(self.trainer.global_step + 1)
/ float(self.hparams.lr_warmup_steps),
)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams.lr
super().optimizer_step(*args, **kwargs)
optimizer.zero_grad()
def training_epoch_end(self, training_step_outputs):
dm = self.trainer.datamodule
if hasattr(dm, "test_dataset") and len(dm.test_dataset) > 0:
should_reset = (
self.current_epoch % self.hparams.test_interval == 0
or (self.current_epoch - 1) % self.hparams.test_interval == 0
)
if should_reset:
# reset validation dataloaders before and after testing epoch, which is faster
# than skipping test validation steps by returning None
self.trainer.reset_val_dataloader(self)
def validation_epoch_end(self, validation_step_outputs):
if not self.trainer.running_sanity_check:
# construct dict of logged metrics
result_dict = {
"epoch": self.current_epoch,
"lr": self.trainer.optimizers[0].param_groups[0]["lr"],
"train_loss": torch.stack(self.losses["train"]).mean(),
"val_loss": torch.stack(self.losses["val"]).mean(),
}
# add test loss if available
if len(self.losses["test"]) > 0:
result_dict["test_loss"] = torch.stack(self.losses["test"]).mean()
# if prediction and derivative are present, also log them separately
if len(self.losses["train_y"]) > 0 and len(self.losses["train_dy"]) > 0:
result_dict["train_loss_y"] = torch.stack(self.losses["train_y"]).mean()
result_dict["train_loss_dy"] = torch.stack(
self.losses["train_dy"]
).mean()
result_dict["val_loss_y"] = torch.stack(self.losses["val_y"]).mean()
result_dict["val_loss_dy"] = torch.stack(self.losses["val_dy"]).mean()
if len(self.losses["test"]) > 0:
result_dict["test_loss_y"] = torch.stack(
self.losses["test_y"]
).mean()
result_dict["test_loss_dy"] = torch.stack(
self.losses["test_dy"]
).mean()
self.log_dict(result_dict, sync_dist=True)
self._reset_losses_dict()
def _reset_losses_dict(self):
self.losses = {
"train": [],
"val": [],
"test": [],
"train_y": [],
"val_y": [],
"test_y": [],
"train_dy": [],
"val_dy": [],
"test_dy": [],
}
def _reset_ema_dict(self):
self.ema = {"train_y": None, "val_y": None, "train_dy": None, "val_dy": None}
|
package vkdumper
import java.io.{FileOutputStream, PrintWriter}
import monix.execution.Scheduler
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.json4s.jackson.Serialization
import org.json4s.{DefaultReaders, Formats, NoTypeHints}
import vkdumper.Utils._
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.{Await, Awaitable}
import scala.runtime.ScalaRunTime
object EC {
//implicit val genericEC: ExecutionContext = ExecutionContext.global
// - Monix scheduler provides EC
implicit val genericSched: Scheduler = Scheduler.global
}
class ProgressPrinter {
def conv(curr: Int, total: Int): Unit = {
val c = con.counter(total, curr + 1)
con(s"[$c/$total] updating conversations...")
}
def convDone(total: Int): Unit =
con(s"[$total/$total] conversation update done")
private def msgStartText(peer: Int, c: String) =
s"[$c 0%] conversation $peer"
def msgStart(peer: Int, pos: ConvPos): Unit =
con(msgStartText(peer, pos.cs))
def msgStart(peer: Int, n: Int, total: Int): Unit = {
val c = con.counter(total, n + 1)
con(msgStartText(peer, s"$c/$total"))
}
def msg(peer: Int, offset: Int, pos: ConvPos): Unit = {
val t = pos.total
val p = con.counter(100, Math.round(100D / t * offset).toInt)
val m = con.counter(t, offset + 1)
con(s"[${pos.cs} $p%] msg $m/$t, peer $peer")
}
def msgDone(peer: Int, pos: ConvPos): Unit = {
val t = pos.total
con(s"[${pos.cs} 100%] msg $t/$t, peer $peer")
}
}
object Utils {
val unit: Unit = ()
object prog extends ProgressPrinter
object con {
def apply(): Unit = println()
def apply(m: Any): Unit = println(m)
def p(m: Any): Unit = print(m)
def np(m: Any): Unit = print(s"\n$m")
def rp(m: Any): Unit = print(s"\r$m")
def counter(max: Int, c: Int): String = {
val ms = max.toString
val ml = ms.length
val cs = c.toString
val pc = {
val l = ml - cs.length
if (l < 0) 0 else l
}
val pref = " " * pc
s"$pref$cs"
}
}
@tailrec
def foldList[T](src: List[T], acc: T)(f: (T, T) => Option[T]): List[T] = {
if (src.isEmpty) return acc :: Nil
f(acc, src.head) match {
case Some(a) => foldList(src.tail, a)(f)
case None => acc :: src
}
}
type Rng = (Int, Int)
def rngCheck(rng: Rng*): Unit = rng foreach {
case (a, b) if a > b => throw new ArithmeticException("Bad range")
case _ => ()
}
@tailrec
def mergeRanges(cr: Rng, list: List[Rng], hl: List[Rng] = Nil): List[Rng] = {
if (list.isEmpty) return (cr :: hl).sortBy(_._1)
val (cf, ct) = cr
val (e @ (ef, et)) :: rem = list
val l = cf <= (et + 1) && cf >= ef
val r = (ct + 1) >= ef && ct <= et
val o = ef > cf && et < ct
rngCheck(e, cr)
if (o) mergeRanges(cr, rem, hl)
else
(l, r) match {
case (false, false) => mergeRanges(cr, rem, hl :+ e)
case (true, true) => hl ::: list
case (true, _) => mergeRanges(ef -> ct, rem, hl)
case (_, true) => mergeRanges(cf -> et, rem, hl)
}
}
def await[T](a: Awaitable[T]): T = awaitT(60.seconds, a)
def awaitT[T](time: FiniteDuration, a: Awaitable[T]): T =
Await.result(a, time)
def awaitU(as: Awaitable[Any]*): Unit = as.foreach(await)
def awaitU(time: FiniteDuration, as: Awaitable[Any]*): Unit =
as.foreach(awaitT(time, _))
def esc(m: String, code: Int = 1): Nothing = {
println(m)
System.exit(code)
throw new Exception("trap")
}
def esc(code: Int): Nothing =
esc("", code)
def pwHelper(file: String, content: String, append: Boolean): Unit = {
val o = new FileOutputStream(file, append)
val pw = new PrintWriter(o)
pw.println(content)
pw.flush()
pw.close()
}
trait ProductToString { this: Product =>
override def toString = ScalaRunTime._toString(this)
}
object CMPUtils {
implicit val formats: Formats = Serialization.formats(NoTypeHints)
import DefaultReaders._
def fromString(str: String) = {
val j = parse(str)
CachedMsgProgress(
(j \ "r").extract[List[List[Int]]].collect {
case f :: t :: Nil => f -> t
},
(j \ "last").as[Int]
)
}
}
case class CachedMsgProgress(ranges: List[Rng], lastMsgId: Int) {
import CMPUtils._
def stringRepr: String = {
val j = (
"r" -> ranges.map { case (f, t) => List(f, t) }
) ~ ("last" -> lastMsgId)
compact(render(j))
}
private def makeOffset(rng: List[Rng]): Int = rng match {
case Nil => 0
case (_, o) :: _ => o
}
def lastOffset: Int = makeOffset(ranges.reverse)
def leastOffset: Int = makeOffset(ranges)
}
}
|
#!/bin/bash
set -ex
if [[ $ENGINE != "podman" ]]; then
exit 0
fi
. /etc/os-release
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/testing/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
sudo apt-get update -qq
sudo apt-get -qq -y install podman uidmap slirp4netns
podman version
|
#!/bin/bash
# Copyright 2019 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
err=0
trap 'err=1' ERR
pytest lib/test
test $err = 0
|
<filename>Viewer/ecflowUI/src/VAttributeType.hpp
//============================================================================
// Copyright 2009-2020 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//
//============================================================================
#ifndef VATTRIBUTETYPE_HPP_
#define VATTRIBUTETYPE_HPP_
#include <map>
#include <vector>
#include <string>
#include "VParam.hpp"
class AttributeFilter;
class VNode;
class VAttribute;
class VAttributeType : public VParam
{
public:
~VAttributeType() override = default;
static std::vector<VParam*> filterItems();
static VAttributeType* find(const std::string& name);
static VAttributeType* find(int id);
static const std::vector<VAttributeType*>& types() {return types_;}
int typeId() const {return typeId_;}
int keyToDataIndex(const std::string& key) const;
int searchKeyToDataIndex(const std::string& key) const;
QStringList searchKeys() const;
virtual QString toolTip(QStringList d) const {return QString();}
virtual QString definition(QStringList d) const {return QString();}
static void scan(VNode* vnode,std::vector<VAttribute*>& v);
typedef void (*ScanProc) (VNode* vnode,std::vector<VAttribute*>& vec);
ScanProc scanProc() {return scanProc_;}
static const std::vector<std::string>& lastNames() {return lastNames_;}
static void saveLastNames();
static void initLastNames();
//Called from VConfigLoader
static void load(VProperty*);
protected:
explicit VAttributeType(const std::string& name);
typedef std::vector<VAttributeType*>::const_iterator TypeIterator;
std::map<std::string,int> keyToData_;
std::map<std::string,int> searchKeyToData_;
int dataCount_;
int typeId_;
ScanProc scanProc_;
static std::vector<std::string> lastNames_;
private:
static std::map<std::string,VAttributeType*> typesMap_;
static std::vector<VAttributeType*> types_;
};
#endif
|
CREATE TABLE customer_info (
customer_id INT PRIMARY KEY,
first_name VARCHAR(50) NOT NULL,
last_name VARCHAR(50) NOT NULL,
email VARCHAR(255) NOT NULL,
address VARCHAR(255) NOT NULL,
phone_number VARCHAR(15) NOT NULL
); |
def generate_embedded_code(ssid, password, uid):
embedded_code = f"const char *ssid = \"{ssid}\"; //Network SSID\n"
embedded_code += f"const char *password = \"{password}\"; //Network PASSWORD\n"
embedded_code += f"const char *UID = \"{uid}\"; //UID Card Code\n"
return embedded_code |
#!/bin/sh
cat sample.in | python solve_A.py
exit
cat A-small.in | python solve_A.py
cat A-large.in | python solve_A.py
|
#!/usr/bin/env bash
# This script manages to deploy the infrastructure for the Atlassian Data Center products
#
# Usage: install.sh [-c <config_file>] [-h]
# -c <config_file>: Terraform configuration file. The default value is 'config.tfvars' if the argument is not provided.
# -f : Auto-approve
# -h : provides help to how executing this script.
set -e
set -o pipefail
ROOT_PATH=$(cd $(dirname "${0}"); pwd)
SCRIPT_PATH="${ROOT_PATH}/scripts"
LOG_FILE="${ROOT_PATH}/logs/terraform-dc-install_$(date '+%Y-%m-%d_%H-%M-%S').log"
LOG_TAGGING="${ROOT_PATH}/logs/terraform-dc-asg-tagging_$(date '+%Y-%m-%d_%H-%M-%S').log"
ENVIRONMENT_NAME=
OVERRIDE_CONFIG_FILE=
DIFFERENT_ENVIRONMENT=1
source "${SCRIPT_PATH}/common.sh"
show_help(){
if [ -n "${HELP_FLAG}" ]; then
cat << EOF
This script provisions the infrastructure for Atlassian Data Center products in AWS environment.
The infrastructure will be generated by terraform and state of the resources will be kept in a S3 bucket which will be provision by this script if is not existed.
Before installing the infrastructure make sure you have completed the configuration process and did all perquisites.
For more information visit https://github.com/atlassian-labs/data-center-terraform.
EOF
fi
echo
echo "Usage: ./install.sh [-c <config_file>] [-h]"
echo " -c <config_file>: Terraform configuration file. The default value is 'config.tfvars' if the argument is not provided."
echo " -h : provides help to how executing this script."
echo
exit 2
}
# Extract arguments
CONFIG_FILE=
HELP_FLAG=
FORCE_FLAG=
while getopts hf?c: name ; do
case $name in
h) HELP_FLAG=1; show_help;; # Help
c) CONFIG_FILE="${OPTARG}";; # Config file name to install - this overrides the default, 'config.tfvars'
f) FORCE_FLAG="-f";; # Auto-approve
?) log "Invalid arguments." "ERROR" ; show_help
esac
done
shift $((${OPTIND} - 1))
UNKNOWN_ARGS="$*"
# Check for prerequisite tooling
# https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/
check_for_prerequisites() {
declare -a tools=("aws" "helm" "terraform")
for tool in "${tools[@]}"
do :
if ! command -v "${tool}" &>/dev/null; then
log "The required dependency [${tool}] could not be found. Please make sure that it is installed before continuing." "ERROR"
exit 1
fi
done
}
# Validate the arguments.
process_arguments() {
# set the default value for config file if is not provided
if [ -z "${CONFIG_FILE}" ]; then
CONFIG_FILE="${ROOT_PATH}/config.tfvars"
else
if [[ ! -f "${CONFIG_FILE}" ]]; then
log "Terraform configuration file '${CONFIG_FILE}' not found!" "ERROR"
show_help
fi
fi
CONFIG_ABS_PATH="$(cd "$(dirname "${CONFIG_FILE}")"; pwd)/$(basename "${CONFIG_FILE}")"
OVERRIDE_CONFIG_FILE="-var-file=${CONFIG_ABS_PATH}"
log "Terraform will use '${CONFIG_ABS_PATH}' to install the infrastructure."
if [ -n "${UNKNOWN_ARGS}" ]; then
log "Unknown arguments: ${UNKNOWN_ARGS}" "ERROR"
show_help
fi
}
# Make sure the infrastructure config file is existed and contains the valid data
verify_configuration_file() {
log "Verifying the config file."
HAS_VALIDATION_ERR=
# Make sure the config values are defined
set +e
INVALID_CONTENT=$(grep -o '^[^#]*' "${CONFIG_ABS_PATH}" | grep '<\|>')
set -e
ENVIRONMENT_NAME=$(get_variable 'environment_name' "${CONFIG_ABS_PATH}")
REGION=$(get_variable 'region' "${CONFIG_ABS_PATH}")
if [ "${#ENVIRONMENT_NAME}" -gt 24 ]; then
log "The environment name '${ENVIRONMENT_NAME}' is too long(${#ENVIRONMENT_NAME} characters)." "ERROR"
log "Please make sure your environment name is less than 24 characters."
HAS_VALIDATION_ERR=1
fi
if [ -n "${INVALID_CONTENT}" ]; then
log "Configuration file '${CONFIG_ABS_PATH##*/}' is not valid." "ERROR"
log "Terraform uses this file to generate customised infrastructure for '${ENVIRONMENT_NAME}' on your AWS account."
log "Please modify '${CONFIG_ABS_PATH##*/}' using a text editor and complete the configuration. "
log "Then re-run the install.sh to deploy the infrastructure."
log "${INVALID_CONTENT}"
HAS_VALIDATION_ERR=1
fi
INSTALL_BAMBOO=$(get_product "bamboo" "${CONFIG_ABS_PATH}")
if [ -n "${INSTALL_BAMBOO}" ]; then
# check license and admin password
export POPULATED_LICENSE=$(grep -o '^[^#]*' "${CONFIG_ABS_PATH}" | grep 'bamboo_license')
export POPULATED_ADMIN_PWD=$(grep -o '^[^#]*' "${CONFIG_ABS_PATH}" | grep 'bamboo_admin_password')
if [ -z "${POPULATED_LICENSE}" ] && [ -z "${TF_VAR_bamboo_license}" ]; then
log "License is missing. Please provide Bamboo license in config file, or export it to the environment variable 'TF_VAR_bamboo_license'." "ERROR"
HAS_VALIDATION_ERR=1
fi
if [ -z "${POPULATED_ADMIN_PWD}" ] && [ -z "${TF_VAR_bamboo_admin_password}" ]; then
log "Admin password is missing. Please provide Bamboo admin password in config file, or export it to the environment variable 'TF_VAR_bamboo_admin_password'." "ERROR"
HAS_VALIDATION_ERR=1
fi
fi
if [ -n "${HAS_VALIDATION_ERR}" ]; then
log "There was a problem with the configuration file. Execution is aborted." "ERROR"
exit 1
fi
}
# Generates ./terraform-backend.tf and ./modules/tfstate/tfstate-local.tf using the content of local.tf and current aws account
generate_terraform_backend_variables() {
log "${ENVIRONMENT_NAME}' infrastructure deployment is started using '${CONFIG_ABS_PATH##*/}'."
log "Terraform state backend/variable files are to be created."
bash "${SCRIPT_PATH}/generate-variables.sh" -c "${CONFIG_ABS_PATH}" "${FORCE_FLAG}"
S3_BUCKET=$(get_variable 'bucket' "${ROOT_PATH}/terraform-backend.tf")
}
# Create S3 bucket, bucket key, and dynamodb table to keep state and manage lock if they are not created yet
create_tfstate_resources() {
# Check if the S3 bucket is existed otherwise create the bucket to keep the terraform state
log "Checking the terraform state."
if ! test -d "${ROOT_PATH}/logs" ; then
mkdir "${ROOT_PATH}/logs"
fi
touch "${LOG_FILE}"
local STATE_FOLDER="${ROOT_PATH}/modules/tfstate"
set +e
aws s3api head-bucket --bucket "${S3_BUCKET}" 2>/dev/null
S3_BUCKET_EXISTS=$?
set -e
if [ ${S3_BUCKET_EXISTS} -eq 0 ]
then
log "S3 bucket '${S3_BUCKET}' already exists."
else
# create s3 bucket to be used for keep state of the terraform project
log "Creating '${S3_BUCKET}' bucket for storing the terraform state..."
if ! test -d "${STATE_FOLDER}/.terraform" ; then
terraform -chdir="${STATE_FOLDER}" init -no-color | tee -a "${LOG_FILE}"
fi
terraform -chdir="${STATE_FOLDER}" apply -auto-approve "${OVERRIDE_CONFIG_FILE}" | tee -a "${LOG_FILE}"
sleep 5
fi
}
# Deploy the infrastructure if is not created yet otherwise apply the changes to existing infrastructure
create_update_infrastructure() {
log "Starting to analyze the infrastructure..."
if [ -n "${DIFFERENT_ENVIRONMENT}" ]; then
log "Migrating the terraform state to S3 bucket..."
terraform -chdir="${ROOT_PATH}" init -migrate-state -no-color | tee -a "${LOG_FILE}"
terraform -chdir="${ROOT_PATH}" init -no-color | tee -a "${LOG_FILE}"
fi
terraform -chdir="${ROOT_PATH}" apply -auto-approve -no-color "${OVERRIDE_CONFIG_FILE}" | tee -a "${LOG_FILE}"
terraform -chdir="${ROOT_PATH}" output -json > outputs.json
}
# Apply the tags into ASG and EC2 instances created by ASG
add_tags_to_asg_resources() {
log "Tagging Auto Scaling Group and EC2 instances. It may take a few minutes. Please wait..."
TAG_MODULE_PATH="${ROOT_PATH}/modules/AWS/asg_ec2_tagging"
terraform -chdir="${TAG_MODULE_PATH}" init -no-color > "${LOG_TAGGING}"
terraform -chdir="${TAG_MODULE_PATH}" apply -auto-approve -no-color "${OVERRIDE_CONFIG_FILE}" >> "${LOG_TAGGING}"
log "Resource tags are applied to ASG and all EC2 instances."
}
set_current_context_k8s() {
local EKS_PREFIX="atlas-"
local EKS_SUFFIX="-cluster"
local EKS_CLUSTER_NAME=${EKS_PREFIX}${ENVIRONMENT_NAME}${EKS_SUFFIX}
local EKS_CLUSTER="${EKS_CLUSTER_NAME:0:38}"
CONTEXT_FILE="${ROOT_PATH}/kubeconfig_${EKS_CLUSTER}"
if [[ -f "${CONTEXT_FILE}" ]]; then
log "EKS Cluster ${EKS_CLUSTER} in region ${REGION} is ready to use."
log "Kubernetes config file could be found at '${CONTEXT_FILE}'"
# No need to update Kubernetes context when run by e2e test
if [ -z "${FORCE_FLAG}" ]; then
aws --region "${REGION}" eks update-kubeconfig --name "${EKS_CLUSTER}"
fi
else
log "Kubernetes context file '${CONTEXT_FILE}' could not be found."
fi
}
resume_bamboo_server() {
# Please note that if you import the dataset, make sure admin credential in config file (config.tfvars)
# is matched with admin info stored in dataset you import.
BAMBOO_DATASET=$(get_variable 'dataset_url' "${CONFIG_ABS_PATH}")
INSTALL_BAMBOO=$(get_product "bamboo" "${CONFIG_ABS_PATH}")
local SERVER_STATUS=
# resume the server only if a dataset is imported
if [ -n "${BAMBOO_DATASET}" ] && [ -n "${INSTALL_BAMBOO}" ]; then
log "Resuming Bamboo server."
ADMIN_USERNAME=$(get_variable 'bamboo_admin_username' "${CONFIG_ABS_PATH}")
ADMIN_PASSWORD=$(get_variable 'bamboo_admin_password' "${CONFIG_ABS_PATH}")
if [ -z "${ADMIN_USERNAME}" ]; then
ADMIN_USERNAME="${TF_VAR_bamboo_admin_username}"
fi
if [ -z "${ADMIN_PASSWORD}" ]; then
ADMIN_PASSWORD="${TF_VAR_bamboo_admin_password}"
fi
if [ -z "${ADMIN_USERNAME}" ]; then
read -p "Please enter the bamboo administrator username: " ADMIN_USERNAME
fi
if [ -n "${ADMIN_USERNAME}" ]; then
if [ -z "${ADMIN_PASSWORD}" ]; then
echo "Please enter password of the Bamboo '${ADMIN_USERNAME}' user: "
read -s ADMIN_PASSWORD
fi
bamboo_url=$(terraform output | grep '"bamboo" =' | sed -nE 's/^.*"(.*)".*$/\1/p')
resume_bamboo_url="${bamboo_url}/rest/api/latest/server/resume"
local RESULT=$(curl -s -u "${ADMIN_USERNAME}:${ADMIN_PASSWORD}" -X POST "${resume_bamboo_url}")
if [[ "x${RESULT}" == *"RUNNING"* ]]; then
SERVER_STATUS="RUNNING"
log "Bamboo server was resumed and it is running successfully."
elif [ "x${RESULT}" == *"AUTHENTICATED_FAILED"* ]; then
log "The provided admin username and password is not matched with the credential stored in the dataset." "ERROR"
else
log "Unexpected state when resuming Bamboo server, state: ${RESULT}" "ERROR"
fi
fi
if [ -z $SERVER_STATUS ]; then
log "We were not able to login into the Bamboo software to resume the server." "WARN"
log "Please login into the Bamboo and 'RESUME' the server before start using the product."
fi
fi
}
set_synchrony_url() {
DOMAIN=$(get_variable 'domain' "${CONFIG_ABS_PATH}")
INSTALL_CONFLUENCE=$(get_product "confluence" "${CONFIG_ABS_PATH}")
if [ -z "${DOMAIN}" ] && [ -n "${INSTALL_CONFLUENCE}" ]; then
log "Configuring the Synchrony service."
SYNCHRONY_FULL_URL=$(terraform output | sed "s/ //g" | grep "synchrony_url=" | sed -nE 's/^.*"(.*)".*$/\1/p')
helm upgrade confluence atlassian-data-center/confluence -n atlassian --reuse-values --set synchrony.ingressUrl="${SYNCHRONY_FULL_URL}" > /dev/null
log "Synchrony URL is set to '${SYNCHRONY_FULL_URL}'."
fi
}
# Update the current load balancer listener on port 7999 to use the TCP protocol
enable_ssh_tcp_protocol_on_lb_listener() {
readonly SSH_TCP_PORT="7999"
local install_bitbucket
local region
local load_balancer_dns
local load_balancer_name
local original_instance_port
install_bitbucket=$(get_product "bitbucket" "${CONFIG_ABS_PATH}")
if [ -n "${install_bitbucket}" ]; then
region=$(get_variable 'region' "${CONFIG_ABS_PATH}")
load_balancer_dns=$(terraform output | grep '"load_balancer_hostname" =' | sed -nE 's/^.*"(.*)".*$/\1/p')
load_balancer_name=$(echo "${load_balancer_dns}" | cut -d '-' -f 1)
original_instance_port=$(aws elb describe-load-balancers --load-balancer-name ${load_balancer_name} --query 'LoadBalancerDescriptions[*].ListenerDescriptions[?Listener.LoadBalancerPort==`'"${SSH_TCP_PORT}"'`].Listener[].InstancePort | [0]' --region "${region}")
log "Enabling SSH connectivity for Bitbucket. Updating load balancer [${load_balancer_dns}] listener protocol from HTTP to TCP on port ${SSH_TCP_PORT}..."
describe_lb_listener "${load_balancer_name}" "${region}"
# delete the current listener for port 7999 and re-create but using the TCP protocol instead
if delete_lb_listener "${load_balancer_name}" "${region}" && create_lb_listener "${load_balancer_name}" "${original_instance_port}" "${region}"; then
log "Load balancer listener protocol updated for ${load_balancer_dns}."
describe_lb_listener "${load_balancer_name}" "${region}"
else
log "ERROR! There was an issue updating the load balancer [${load_balancer_dns}] listener protocol from HTTP to TCP on port ${SSH_TCP_PORT}. You may want to do this manually via the AWS Console."
fi
fi
}
# Check for prerequisite tooling
check_for_prerequisites
# Process the arguments
process_arguments
# Verify the configuration file
verify_configuration_file
# Generates ./terraform-backend.tf and ./modules/tfstate/tfstate-local.tf
generate_terraform_backend_variables
# Create S3 bucket and dynamodb table to keep state
create_tfstate_resources
# Deploy the infrastructure
create_update_infrastructure
# Manually add resource tags into ASG and EC2
add_tags_to_asg_resources
# Resume bamboo server if the credential is provided
resume_bamboo_server
# Print information about manually adding the new k8s context
set_current_context_k8s
# Set the correct Synchrony URL
set_synchrony_url
# To allow SSH connectivity for Bitbucket update the Load Balancer protocol for listener port 7999
enable_ssh_tcp_protocol_on_lb_listener
# Show the list of installed Helm charts
helm list --namespace atlassian
|
def num_common_words(str1, str2):
# split strings into words
words1 = str1.split()
words2 = str2.split()
# create a set to store words
unique_words = set()
# add words from both strings to the set
for word in words1:
unique_words.add(word)
for word in words2:
unique_words.add(word)
# count number of common words
count = 0
for word in unique_words:
if word in words1 and word in words2:
count += 1
return count |
<gh_stars>0
# from https://realpython.com/intro-to-python-threading/#working-with-many-threads
import concurrent.futures
import logging
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
executor.map(thread_function, range(3))
my_other_map = ["one", "two", "three"]
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
executor.map(thread_function, my_other_map)
|
//============================================================================
// Copyright 2009-2018 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//
//============================================================================
#ifndef TIMELINEITEMWIDGET_HPP
#define TIMELINEITEMWIDGET_HPP
#include "InfoPanelItem.hpp"
#include "VInfo.hpp"
#include <QWidget>
class VNode;
class TimelineWidget;
class TimelineItemWidget : public QWidget, public InfoPanelItem
{
public:
explicit TimelineItemWidget(QWidget *parent=nullptr);
~TimelineItemWidget() override;
void reload(VInfo_ptr) override;
QWidget* realWidget() override;
void clearContents() override;
bool hasSameContents(VInfo_ptr info) override;
void notifyInfoChanged(const std::string& path) override;
void nodeChanged(const VNode*, const std::vector<ecf::Aspect::Type>&) override {}
void defsChanged(const std::vector<ecf::Aspect::Type>&) override {}
void writeSettings(VComboSettings* vs) override;
void readSettings(VComboSettings* vs) override;
protected:
void updateState(const ChangeFlags&) override;
void serverSyncFinished() override;
void connectStateChanged() override;
private:
void load();
TimelineWidget* w_;
bool delayedLoad_;
};
#endif // TIMELINEITEMWIDGET_HPP
|
<filename>Modules/Filtering/Convolution/include/otbConvolutionImageFilter.h<gh_stars>100-1000
/*
* Copyright (C) 2005-2020 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef otbConvolutionImageFilter_h
#define otbConvolutionImageFilter_h
#include "itkImageToImageFilter.h"
#include "itkImage.h"
#include "itkNumericTraits.h"
#include "itkArray.h"
#include "itkZeroFluxNeumannBoundaryCondition.h"
namespace otb
{
/** \class ConvolutionImageFilter
* \brief Applies a convolution filter to a mono channel image
*
* Computes an image which is the convolution of the input image
* with a filter.
*
* The radius of the input filter is provided by the \code SetInput() \endcode
* method and the filters coefficients are given by an itk::Array passed to the
* \code SetFilter() \endcode method.
*
* By default, the input filter is not normalized but it can be using the
* NormalizeFilterOn() method.
*
* This filter allows the user to choose the boundary conditions in the template parameters.
Default boundary conditions are zero flux Neumann boundary conditions.
*
* An optimized version of this filter using FFTW is available in the Orfeo ToolBox and
* will significantly improves performances especially for large kernels
* (see OverlapSaveConvolutionImageFilter).
*
* \sa Image
* \sa Neighborhood
* \sa NeighborhoodOperator
* \sa NeighborhoodIterator
* \sa ImageBoundaryCondition
* \sa ZeroFluxNeumannBoundaryCondition
* \sa OverlapSaveConvolutionImageFilter
*
* \ingroup IntensityImageFilters
* \ingroup Streamed
* \ingroup MultiThreaded
*
* \ingroup OTBConvolution
*/
template <class TInputImage, class TOutputImage, class TBoundaryCondition = itk::ZeroFluxNeumannBoundaryCondition<TInputImage>,
class TFilterPrecision = typename itk::NumericTraits<typename TInputImage::InternalPixelType>::RealType>
class ITK_EXPORT ConvolutionImageFilter : public itk::ImageToImageFilter<TInputImage, TOutputImage>
{
public:
/** Extract dimension from input and output image. */
itkStaticConstMacro(InputImageDimension, unsigned int, TInputImage::ImageDimension);
itkStaticConstMacro(OutputImageDimension, unsigned int, TOutputImage::ImageDimension);
/** Convenient typedefs for simplifying declarations. */
typedef TInputImage InputImageType;
typedef TOutputImage OutputImageType;
/** Standard class typedefs. */
typedef ConvolutionImageFilter Self;
typedef itk::ImageToImageFilter<InputImageType, OutputImageType> Superclass;
typedef itk::SmartPointer<Self> Pointer;
typedef itk::SmartPointer<const Self> ConstPointer;
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** Run-time type information (and related methods). */
itkTypeMacro(ConvolutionImageFilter, ImageToImageFilter);
/** Image typedef support. */
typedef typename InputImageType::PixelType InputPixelType;
typedef typename OutputImageType::PixelType OutputPixelType;
typedef typename itk::NumericTraits<InputPixelType>::RealType InputRealType;
typedef typename InputImageType::RegionType InputImageRegionType;
typedef typename OutputImageType::RegionType OutputImageRegionType;
typedef typename InputImageType::SizeType InputSizeType;
typedef TFilterPrecision FilterPrecisionType;
typedef typename itk::Array<FilterPrecisionType> ArrayType;
typedef TBoundaryCondition BoundaryConditionType;
/** Set the radius of the neighborhood of the filter */
virtual void SetRadius(const InputSizeType rad)
{
itkDebugMacro("setting radius to " << rad);
if (this->m_Radius != rad)
{
this->m_Radius = rad;
unsigned int arraySize = 1;
for (unsigned int i = 0; i < m_Radius.GetSizeDimension(); ++i)
{
arraySize *= 2 * this->m_Radius[i] + 1;
}
this->m_Filter.SetSize(arraySize);
this->m_Filter.Fill(1);
this->Modified();
}
}
/** Get the radius of the neighborhood of the filter*/
itkGetConstReferenceMacro(Radius, InputSizeType);
/** Set the input filter */
virtual void SetFilter(ArrayType filter)
{
if (filter.Size() != m_Filter.Size())
{
itkExceptionMacro("Error in SetFilter, invalid filter size:" << filter.Size() << " instead of (2*m_Radius[0]+1)*(2*m_Radius[1]+1): " << m_Filter.Size());
}
else
{
m_Filter = filter;
}
this->Modified();
}
itkGetConstReferenceMacro(Filter, ArrayType);
/**
* Set/Get methods for the normalization of the filter
*/
itkSetMacro(NormalizeFilter, bool);
itkGetMacro(NormalizeFilter, bool);
itkBooleanMacro(NormalizeFilter);
#ifdef ITK_USE_CONCEPT_CHECKING
/** Begin concept checking */
itkConceptMacro(InputHasNumericTraitsCheck, (itk::Concept::HasNumericTraits<InputPixelType>));
/** End concept checking */
#endif
protected:
ConvolutionImageFilter();
~ConvolutionImageFilter() override
{
}
void PrintSelf(std::ostream& os, itk::Indent indent) const override;
/** ConvolutionImageFilter can be implemented as a multithreaded filter.
* Therefore, this implementation provides a ThreadedGenerateData()
* routine which is called for each processing thread. The output
* image data is allocated automatically by the superclass prior to
* calling ThreadedGenerateData(). ThreadedGenerateData can only
* write to the portion of the output image specified by the
* parameter "outputRegionForThread"
*
* \sa ImageToImageFilter::ThreadedGenerateData(),
* ImageToImageFilter::GenerateData() */
void ThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId) override;
/** ConvolutionImageFilter needs a larger input requested region than
* the output requested region. As such, ConvolutionImageFilter needs
* to provide an implementation for GenerateInputRequestedRegion()
* in order to inform the pipeline execution model.
*
* \sa ImageToImageFilter::GenerateInputRequestedRegion() */
void GenerateInputRequestedRegion() override;
private:
ConvolutionImageFilter(const Self&) = delete;
void operator=(const Self&) = delete;
/** Radius of the filter */
InputSizeType m_Radius;
/** Array containing the filter values */
ArrayType m_Filter;
/** Flag for filter coefficients normalization */
bool m_NormalizeFilter;
};
} // end namespace itk
#ifndef OTB_MANUAL_INSTANTIATION
#include "otbConvolutionImageFilter.hxx"
#endif
#endif
|
public class MainActivity extends AppCompatActivity {
private List<Memo> memos;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
memos = new ArrayList<>();
Button addButton = findViewById(R.id.add_button);
addButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(MainActivity.this, AddMemoActivity.class);
startActivityForResult(intent, REQUEST_ADD);
}
});
Button deleteButton = findViewById(R.id.delete_button);
deleteButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(MainActivity.this, DeleteMemoActivity.class);
startActivityForResult(intent, REQUEST_DELETE);
}
});
ListView memoListView = findViewById(R.id.memo_list);
MemoAdapter memoAdapter = new MemoAdapter(this, memos);
memoListView.setAdapter(memoAdapter);
}
// Other methods...
} |
package no.mnemonic.commons.container.plugins.impl;
import no.mnemonic.commons.component.LifecycleAspect;
import no.mnemonic.commons.container.ComponentContainer;
import no.mnemonic.commons.container.ContainerListener;
import org.junit.Before;
import org.junit.Test;
import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.inOrder;
public class ContainerListenerHandlerTest {
@Mock
private ContainerListener listener;
@Mock
private LifecycleAspect component;
@Before
public void setup(){
MockitoAnnotations.initMocks(this);
}
@Test
public void testStartContainerWithListener() {
ComponentContainer.create(component, listener).initialize();
InOrder ordered = inOrder(component, listener);
ordered.verify(component).startComponent();
ordered.verify(listener).notifyContainerStarted(any());
}
@Test
public void testStopContainerWithListener() {
ComponentContainer container = ComponentContainer.create(component, listener).initialize();
container.destroy();
InOrder ordered = inOrder(listener, component, listener);
ordered.verify(listener).notifyContainerDestroying(any());
ordered.verify(component).stopComponent();
ordered.verify(listener).notifyContainerDestroyed(any());
}
}
|
<filename>App/src/main/java/com/honyum/elevatorMan/net/ChatListResponse.java
package com.honyum.elevatorMan.net;
import com.honyum.elevatorMan.net.base.Response;
import com.honyum.elevatorMan.net.base.ResponseHead;
import org.litepal.annotation.Column;
import org.litepal.crud.DataSupport;
import java.io.Serializable;
import java.util.List;
public class ChatListResponse extends Response {
private ResponseHead head;
private List<ChatListBody> body;
@Override
public ResponseHead getHead() {
return head;
}
@Override
public void setHead(ResponseHead head) {
this.head = head;
}
public List<ChatListBody> getBody() {
return body;
}
public void setBody(List<ChatListBody> body) {
this.body = body;
}
public static class ChatListBody extends DataSupport implements Serializable{
private Long code;
private String content;
private String alarmId;
@Column(ignore = true)
//这里忽略了ID 字段 因为冲突,所以数据库中存储的ID 是litepal自动生成d的。 而不是数据里的ID
private String id;
private String sendTime;
private String senderName;
private String senderId;
//1.文字 2.语音 3.图片 4.视频
private String type;
private int timeLength;
private boolean isLoad =false;
public int getTimeLength() {
return timeLength;
}
public void setTimeLength(int timeLength) {
this.timeLength = timeLength;
}
public String getSenderId() {
return senderId;
}
public void setSenderId(String senderId) {
this.senderId = senderId;
}
public Long getCode() {
return code;
}
public void setCode(Long code) {
this.code = code;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public String getAlarmId() {
return alarmId;
}
public void setAlarmId(String alarmId) {
this.alarmId = alarmId;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getSendTime() {
return sendTime;
}
public void setSendTime(String sendTime) {
this.sendTime = sendTime;
}
public String getSenderName() {
return senderName;
}
public void setSenderName(String senderName) {
this.senderName = senderName;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public boolean isLoad() {
return isLoad;
}
public void setLoad(boolean load) {
isLoad = load;
}
}
public static ChatListResponse getChatList(String json) {
return (ChatListResponse) parseFromJson(ChatListResponse.class, json);
}
}
|
#! /bin/bash
# Initialization of Script
gcloud init < a
gcloud container clusters create io
cd kubernetes
# Create a Kubernetes cluster and launch Nginx container
if (kubectl create deployment nginx --image=nginx:1.10.0
kubectl expose deployment nginx --port 80 --type LoadBalancer)
then
printf "\n\e[1;96m%s\n\n\e[m" 'Created Kubernetes cluster & Nginx container: Checkpoint Completed (1/5)'
sleep 2.5
# Create Monolith pods and service
if (kubectl create -f pods/monolith.yaml
kubectl create secret generic tls-certs --from-file tls/
kubectl create configmap nginx-proxy-conf --from-file nginx/proxy.conf
kubectl create -f pods/secure-monolith.yaml
kubectl create -f services/monolith.yaml)
then
printf "\n\e[1;96m%s\n\n\e[m" 'Monlith pods and service created: Checkpoint Completed (2/5)'
sleep 2.5
# Allow traffic to the monolith service on the exposed nodeport
if gcloud compute firewall-rules create allow-monolith-nodeport \
--allow=tcp:31000
then
printf "\n\e[1;96m%s\n\n\e[m" 'Traffic allowed: Checkpoint Completed (3/5)'
sleep 2.5
# Adding Labels to Pods
if kubectl label pods secure-monolith 'secure=enabled'
then
printf "\n\e[1;96m%s\n\n\e[m" 'Label added: Checkpoint Completed (4/5)'
sleep 2.5
# Deploying Applications with Kubernetes
if (kubectl create -f deployments/auth.yaml
kubectl create -f services/auth.yaml
kubectl create -f deployments/hello.yaml
kubectl create -f services/hello.yaml
kubectl create configmap nginx-frontend-conf --from-file=nginx/frontend.conf
kubectl create -f deployments/frontend.yaml
kubectl create -f services/frontend.yaml)
then
printf "\n\e[1;96m%s\n\n\e[m" 'Applications Deployed: Checkpoint Completed (5/5)'
sleep 2.5
printf "\n\e[1;92m%s\n\n\e[m" 'Lab Completed'
fi
fi
fi
fi
fi
gcloud auth revoke --all
|
// ============================================================================
// Interview Problem: Constant Time Stack Max
// ============================================================================
//
// -------
// Prompt:
// -------
//
// Modify the definition of the Stack class provided to create an enhanced
// version of a Stack data structure called MinMaxStack.
//
// A MinMaxStack has all of the same behavior as a Stack, but can also return
// the node with the minimum or maximum value in constant time.
//
// You may alter any of the original Stack's methods, including the
// constructor.
//
// Values of nodes of the MinMaxStack are always guaranteed to be numbers.
//
//
// ------------
// Constraints:
// ------------
//
// (1) All MinMaxStack methods must run in constant time, O(1).
//
//
// --------
// Example:
// --------
//
// const minMaxStack = new MinMaxStack();
//
// minMaxStack.push(10);
// minMaxStack.push(12);
// minMaxStack.push(8);
// minMaxStack.push(2);
// minMaxStack.push(20);
//
// console.log(minMaxStack.min().value); => 2
// console.log(minMaxStack.max().value); => 20
//
// minMaxStack.pop();
// console.log(minMaxStack.min().value); => 2
// console.log(minMaxStack.max().value); => 12
//
// minMaxStack.pop();
// console.log(minMaxStack.min().value); => 8
// console.log(minMaxStack.max().value); => 12
//
// minMaxStack.pop();
// console.log(minMaxStack.min().value); => 10
// console.log(minMaxStack.max().value); => 12
//
// minMaxStack.pop();
// console.log(minMaxStack.min().value); => 10
// console.log(minMaxStack.max().value); => 10
//
// minMaxStack.pop();
// console.log(minMaxStack.min()); => null
// console.log(minMaxStack.max()); => null
//
//
// -----------
// Let's code!
// -----------
class Node {
constructor(val) {
this.value = val;
this.next = null;
}
}
// Refactor the regular Stack below into a MinMaxStack!
class MinMaxStack {
constructor() {
this.top = null;
this.bottom = null;
this.length = 0;
this.minValueNodes = [];
this.maxValueNodes = [];
}
push(val) {
const newNode = new Node(val);
if (!this.top) {
this.top = newNode;
this.bottom = newNode;
} else {
const temp = this.top;
this.top = newNode;
this.top.next = temp;
}
if (!this.minValueNodes.length || val < this.minValueNodes[this.minValueNodes.length - 1].value)
this.minValueNodes.push(newNode);
if (!this.maxValueNodes.length || val > this.maxValueNodes[this.maxValueNodes.length - 1].value)
this.maxValueNodes.push(newNode);
return ++this.length;
}
pop() {
if (!this.top) {
return null;
}
const temp = this.top;
if (this.top === this.bottom) {
this.bottom = null;
}
if (this.top === this.minValueNodes[this.minValueNodes.length - 1]) this.minValueNodes.pop();
if (this.top === this.maxValueNodes[this.maxValueNodes.length - 1]) this.maxValueNodes.pop();
this.top = this.top.next;
this.length--;
return temp;
}
size() {
return this.length;
}
min() {
return this.minValueNodes[this.minValueNodes.length - 1] || null;
}
max() {
return this.maxValueNodes[this.maxValueNodes.length - 1] || null;
}
}
// Forgetting something down here?
exports.Node = Node;
exports.MinMaxStack = MinMaxStack;
|
var mysql = require('mysql');
var mySqlClient = mysql.createConnection({
host :"localhost",
user :"root",
password :"",
database :"XXX"
});
exports.addData = function(req, res){
console.log("données recus : ");
console.log(req.body);
var mName = req.body.name;
var mAge = req.body.age;
var insQuery = "INSERT INTO testJs (name, age) VALUES ('"+mName+"','"+mAge+"');";
mySqlClient.query(insQuery, function select(error, results, fields) {
if (error) {
console.log(error);
mySqlClient.end();
return;
}
res.send({message:"done", query:insQuery,result:results});
});
console.log("fin.");
};
exports.getData = function(req, res){
console.log("Obtention des données ... ");
var getQuery = "SELECT * FROM testJs;";
mySqlClient.query(getQuery, function select(error, results, fields) {
if (error) {
console.log(error);
mySqlClient.end();
return;
}
res.send({message:"done", query:getQuery, result:results});
});
console.log("fin.");
};
exports.getDataByField = function(req, res){
console.log("Obtention des données pour un field ... ");
console.log(req.query.age);
var mAge = req.query.age;
var getQuery = "SELECT * FROM testJs WHERE age ='"+mAge+"';";
mySqlClient.query(getQuery, function select(error, results, fields) {
if (error) {
console.log(error);
mySqlClient.end();
return;
}
res.send({message:"done", query:getQuery, result:results});
});
console.log("fin.");
};
exports.updateData = function(req, res){
console.log("données recus pour mise a jour : ");
console.log(req.body);
//if i used @query in retrofit then the data will be in req.query.things
var mId = req.body.id;
var mName = req.body.name;
var mAge = req.body.age;
var upQuery = "UPDATE testJs SET name = '"+mName+"' , age = '"+mAge+"' WHERE id = '"+mId+"';";
mySqlClient.query(upQuery, function select(error, results, fields) {
if (error) {
console.log(error);
mySqlClient.end();
return;
}
res.send({message:"done", query:upQuery,result:results});
});
console.log("fin.");
};
exports.deleteData = function(req, res){
console.log("Suppression en cours ... ");
console.log(req.body);
var mId = req.body.id;
var delQuery = "DELETE FROM testJs WHERE id = '"+mId+"';";
mySqlClient.query(delQuery, function select(error, results, fields) {
if (error) {
console.log(error);
mySqlClient.end();
return;
}
res.send({message:"done", query:delQuery,result:results});
});
console.log("fin.");
};
|
globalThis.fetch = function() {
} |
package ch.sbb.maven.plugins.iib.mojos;
import static org.twdata.maven.mojoexecutor.MojoExecutor.artifactId;
import static org.twdata.maven.mojoexecutor.MojoExecutor.configuration;
import static org.twdata.maven.mojoexecutor.MojoExecutor.element;
import static org.twdata.maven.mojoexecutor.MojoExecutor.executeMojo;
import static org.twdata.maven.mojoexecutor.MojoExecutor.executionEnvironment;
import static org.twdata.maven.mojoexecutor.MojoExecutor.goal;
import static org.twdata.maven.mojoexecutor.MojoExecutor.groupId;
import static org.twdata.maven.mojoexecutor.MojoExecutor.name;
import static org.twdata.maven.mojoexecutor.MojoExecutor.plugin;
import static org.twdata.maven.mojoexecutor.MojoExecutor.version;
import java.io.File;
import org.apache.maven.execution.MavenSession;
import org.apache.maven.model.Plugin;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.BuildPluginManager;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugins.annotations.Component;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
import org.codehaus.plexus.util.xml.Xpp3Dom;
import org.twdata.maven.mojoexecutor.MojoExecutor.Element;
import org.twdata.maven.mojoexecutor.MojoExecutor.ExecutionEnvironment;
/**
* Installs a zipped-up, IIB9 project into the user's local maven repository.
*
*
* @author <NAME>, 2016
*/
@Mojo(name = "install-zip", defaultPhase = LifecyclePhase.INSTALL)
public class InstallZipMojo extends AbstractMojo {
/**
* The Maven Project Object
*/
@Parameter(property = "project", required = true, readonly = true)
protected MavenProject project;
/**
* The Maven Session Object
*/
@Parameter(property = "session", required = true, readonly = true)
protected MavenSession session;
/**
* The Maven PluginManager Object
*/
@Component
protected BuildPluginManager buildPluginManager;
@Parameter(property = "zipFilePath", required = true, defaultValue = "${project.build.directory}/${project.artifactId}-${project.version}.zip")
protected String zipFilePath;
public void execute() throws MojoFailureException, MojoExecutionException
{
try
{
File file = new File(zipFilePath);
if (!file.exists())
{
String message = "The zip file '" + file.getAbsolutePath() + "' is missing";
getLog().error(message);
throw new MojoFailureException(message);
}
// / attempt to add the pom file
Element pomElement = null; //
String pomFilePath = project.getBasedir().getAbsolutePath() + File.separator + "pom.xml";
File pomFile = new File(pomFilePath);
if (!pomFile.exists())
{
getLog().warn("no pom file could be located at " + pomFilePath);
pomElement = element("generatePom", "true");
}
else
{
pomElement = element("pomFile", pomFilePath);
}
// unpack all IIB dependencies that match the given scope (compile)
Plugin plugin = plugin(groupId("org.apache.maven.plugins"), artifactId("maven-install-plugin"), version("2.5.2"));
String goal = goal("install-file");
Xpp3Dom xpp3Dom = configuration(
element(name("file"), zipFilePath), // / Output location.
element(name("repositoryLayout"), "default"), // / Comma Separated list of Types to include
element(name("artifactId"), project.getArtifactId()),
element(name("version"), project.getVersion()),
element(name("packaging"), "zip"),
element(name("groupId"), project.getGroupId()),
pomElement
);
ExecutionEnvironment executionEnvironment = executionEnvironment(project, session, buildPluginManager);
executeMojo(plugin, goal, xpp3Dom, executionEnvironment);
} catch (Exception e)
{
throw new MojoFailureException(e.toString());
}
}
}
|
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import {humanizeFileSize} from './humanizeFileSize';
import {test} from 'rome';
test(
'humanizeFileSize',
(t) => {
const testCases = [
{input: 1, expected: '1B'},
{input: 10, expected: '10B'},
{input: 100, expected: '100B'},
{input: 1_000, expected: '1kB'},
{input: 10_000, expected: '10kB'},
{input: 100_000, expected: '100kB'},
{input: 1_000_000, expected: '1MB'},
{input: 10_000_000, expected: '10MB'},
{input: 100_000_000, expected: '100MB'},
{input: 1_000_000_000, expected: '1GB'},
{input: 10_000_000_000, expected: '10GB'},
{input: 100_000_000_000, expected: '100GB'},
{input: 1_000_000_000_000, expected: '1TB'},
{input: 10_000_000_000_000, expected: '10TB'},
{input: 100_000_000_000_000, expected: '100TB'},
{input: 1_000_000_000_000_000, expected: '1PB'},
{input: 10_000_000_000_000_000, expected: '10PB'},
{input: 100_000_000_000_000_000, expected: '100PB'},
{input: 1_000_000_000_000_000_000, expected: '1EB'},
{input: 10_000_000_000_000_000_000, expected: '10EB'},
{input: 100_000_000_000_000_000_000, expected: '100EB'},
{input: 1e+21, expected: '1ZB'},
{input: 1e+22, expected: '10ZB'},
{input: 1e+23, expected: '100ZB'},
{input: 1e+24, expected: '1YB'},
{input: 1e+25, expected: '10YB'},
];
testCases.forEach((td) => {
t.is(humanizeFileSize(td.input), td.expected);
});
},
);
|
from Jumpscale import j
import gevent
import time
class SSHClientBase(j.baseclasses.object_config):
"""
is an ssh client
"""
_SCHEMATEXT = """
@url = jumpscale.sshclient.1
name** = ""
addr = "localhost"
port = 22
login = "root"
passwd = ""
sshkey_name = ""
proxy = ""
stdout = True (B)
forward_agent = True (B)
allow_agent = True (B)
# client_type = "paramiko,pssh" (E)
timeout = 60
"""
def _init(self, **kwargs):
self._client_ = None
self._executor = None
self._wireguard = None
self._init3()
if self.sshkey_name and self.sshkey_name not in j.core.myenv.sshagent.key_names:
j.core.myenv.sshagent.start()
self.sshkey_obj.load()
@property
def executor(self):
if not self._executor:
self._executor = j.tools.executor.ssh_get(self)
return self._executor
def reset(self):
if self._client_:
# disconnect 2 possible ways on sshclient
try:
self._client_.disconnect()
except:
pass
try:
self._client.close()
except:
pass
self._init3()
def _init3(self):
self.async_ = False
self._connected = None
self._transport_ = None
self._ftp = None
self._syncer = None
@property
def uid(self):
return "%s-%s-%s" % (self.addr, self.port, self.name)
# def sftp_stat(self, path):
# return self.sftp.stat(path)
@property
def sshkey_obj(self):
"""
return right sshkey
"""
if self.sshkey_name in [None, ""]:
raise j.exceptions.Base("sshkeyname needs to be specified")
return j.clients.sshkey.get(name=self.sshkey_name)
@property
def isconnected(self):
if self._connected is None:
self._connected = j.sal.nettools.tcpPortConnectionTest(self.addr, self.port, 1)
self.active = True
self._sshclient = None
self._ftpclient = None
return self._connected
def ssh_authorize(self, pubkeys=None, homedir="/root", interactive=True):
"""add key to authorized users, if key is specified will get public key from sshkey client,
or can directly specify the public key. If both are specified key name instance will override public key.
:param user: user to authorize
:type user: str
:param pubkey: public key to authorize, defaults to None
:type pubkey: str, optional
"""
if not pubkeys:
pubkeys = [self.sshkey_obj.pubkey]
if isinstance(pubkeys, str):
pubkeys = [pubkeys]
for sshkey in pubkeys:
# TODO: need to make sure its only 1 time
self.execute(
'echo "{sshkey}" >> {homedir}/.ssh/authorized_keys'.format(**locals()), interactive=interactive
)
@property
def syncer(self):
"""
is a tool to sync local files to your remote ssh instance
:return:
"""
if self._syncer is None:
self._syncer = j.tools.syncer.get(name=self.name, sshclient_names=[self.name])
return self._syncer
def portforward_to_local(self, remoteport=6379, localport=6380):
"""
forward remote port on ssh host to the local one, so we can connect over localhost to the remote one
e.g. expose your remote redis (in container) to your machine you're using this env from
:param remoteport: the port to forward to local
:param localport: the local tcp port to be used (will terminate on remote)
:return:
"""
self.portforward_kill(localport)
C = f"ssh -4 -f -N -L {localport}:127.0.0.1:{remoteport} {self.login}@{self.addr} -p {self.port}"
print(C)
j.sal.process.execute(C)
print("Test tcp port to:%s" % localport)
if not j.sal.nettools.waitConnectionTest("localhost", localport, 10):
raise j.exceptions.Base("Cannot open ssh forward:%s_%s_%s" % (self, remoteport, localport))
print("Connection ok")
def portforward_to_remote(self, remoteport, localport, timeout=50):
"""
forward local port to remote host port
:param remoteport: port used on ssh host
:param localport: the local tcp port to be used
:return:
"""
if not j.sal.nettools.tcpPortConnectionTest("localhost", localport):
raise j.exceptions.Base(
"Cannot open ssh forward:%s_%s_%s (local port:%s)" % (self, remoteport, localport, localport)
)
# self.portforwardKill(localport)
C = f"ssh -4 -R {remoteport}:127.0.0.1:{localport} {self.login}@{self.addr} -p {self.port}"
print(C)
key = f"{self.addr}_{self.port}_{remoteport}_{localport}"
cmd = j.servers.startupcmd.get(name=key)
cmd.cmd_start = C
cmd.ports = []
cmd.timeout = 20
cmd.process_strings = [] # ["ssh -R 8112:localhost:8111 <EMAIL> -p 22"]
cmd.executor = "tmux"
cmd.start()
start = j.data.time.epoch
end = start + timeout
while j.data.time.epoch < end:
if self.tcp_remote_port_check("127.0.0.1", remoteport):
self._log_info(f"Connection ok for {remoteport} to local:{localport}")
return
time.sleep(0.1)
# if not cmd.is_running():
# raise j.exceptions.Base("could not start:%s in tmux" % C)
raise j.exceptions.Base("could not start:%s in tmux" % C)
def tcp_remote_port_check(self, addr="localhost", port=22):
cmd = f"nc -zv {addr} {port}"
rc, _, _ = self.execute(cmd)
if rc == 0:
return True
else:
return False
def portforward_kill(self, localport):
"""
kill the forward
:param localport:
:return:
"""
self._log_warning("kill portforward localport: %s" % localport)
j.sal.process.killProcessByPort(localport)
def upload(
self,
source,
dest=None,
recursive=True,
createdir=True,
rsyncdelete=True,
ignoredir=None,
ignorefiles=None,
keepsymlinks=True,
retry=4,
):
"""
:param source:
:param dest:
:param recursive:
:param createdir:
:param rsyncdelete:
:param ignoredir: the following are always in, no need to specify ['.egg-info', '.dist-info', '__pycache__']
:param ignorefiles: the following are always in, no need to specify: ["*.egg-info","*.pyc","*.bak"]
:param keepsymlinks:
:param showout:
:return:
"""
if not dest:
dest = source
if not j.sal.fs.isDir(source):
if j.sal.fs.isFile(source):
return self.file_copy(source, dest)
else:
raise j.exceptions.Base("only support dir or file for upload")
if dest[0] != "/":
raise j.exceptions.RuntimeError("need / in beginning of dest path")
if source[-1] != "/":
source += "/"
if dest[-1] != "/":
dest += "/"
if j.sal.fs.isDir(source):
if source[-1] != "/":
source += "/"
if dest[-1] != "/":
dest += "/"
dest = "%s@%s:%s" % (self.login, self.addr, dest)
j.sal.fs.copyDirTree(
source,
dest,
keepsymlinks=keepsymlinks,
deletefirst=False,
overwriteFiles=rsyncdelete,
ignoredir=ignoredir,
ignorefiles=ignorefiles,
rsync=True,
ssh=True,
sshport=self.port,
recursive=recursive,
createdir=createdir,
rsyncdelete=rsyncdelete,
showout=True,
retry=retry,
)
self._cache.reset()
def download(self, source, dest=None, ignoredir=None, ignorefiles=None, recursive=True):
"""
:param source:
:param dest:
:param recursive:
:param ignoredir: the following are always in, no need to specify ['.egg-info', '.dist-info', '__pycache__']
:param ignorefiles: the following are always in, no need to specify: ["*.egg-info","*.pyc","*.bak"]
:return:
"""
if not dest:
dest = source
if not self.executor.path_isdir(source):
if self.executor.path_isfile(source):
res = self._client.scp_recv(source, dest)
gevent.joinall(res)
self._log_info("Copied remote file %s to local destination %s for %s" % (dest, source, self))
return
else:
if not self.executor.exists(source):
raise j.exceptions.Base("%s does not exists, cannot download" % source)
raise j.exceptions.Base("src:%s needs to be dir or file" % source)
else:
# we know now its a dir
if source[0] != "/":
raise j.exceptions.RuntimeError("need / in beginning of source path")
if source[-1] != "/":
source += "/"
if dest[-1] != "/":
dest += "/"
source = "root@%s:%s" % (self.addr, source)
j.sal.fs.copyDirTree(
source,
dest,
keepsymlinks=True,
deletefirst=False,
overwriteFiles=True,
ignoredir=ignoredir,
ignorefiles=ignorefiles,
rsync=True,
ssh=True,
sshport=self.port,
recursive=recursive,
)
def execute(self, cmd, interactive=True, showout=True, die=True, timeout=None):
"""
:param cmd: cmd to execute, can be multiline or even a script
:param interactive: run in a way we can interact with the execution
:param showout: show the stdout?
:param replace: replace the {} statements in the cmd (script)
:param die: die if error found
:param timeout: timeout for execution in seconds
:param script: if None and its multiline it will be default be executed as script, otherwise do script=False
when the len of the cmd is more than 100.000 then will always execute as script
:return:
"""
if not isinstance(cmd, str):
raise j.exceptions.Base("cmd needs to be string")
if "\n" in cmd or len(cmd) > 100000:
raise RuntimeError(
"NOT IMPLEMENTED, need to use execute on executor level which will convert from script to command"
)
elif interactive:
return self._execute_interactive(cmd, showout=showout, die=die)
else:
return self._execute(cmd, showout=showout, die=die, timeout=timeout)
def _execute_interactive(self, cmd, showout=False, replace=True, die=True):
if "\n" in cmd:
raise j.exceptions.Base("cannot have \\n in cmd: %s" % cmd)
if "'" in cmd:
cmd = cmd.replace("'", '"')
cmd2 = "ssh -oStrictHostKeyChecking=no -t {LOGIN}@{ADDR} -A -p {PORT} '%s'" % (cmd)
cmd3 = j.core.tools.text_replace(cmd2, args={"LOGIN": self.login, "ADDR": self.addr, "PORT": self.port})
return j.core.tools.execute(cmd3, interactive=True, showout=False, replace=False, asfile=True, die=die)
def __repr__(self):
return "SSHCLIENT ssh: %s (%s)" % (self.addr, self.port)
__str__ = __repr__
|
import config from '../../config'
import ORM from '../../orm'
import Logic from '../../logic'
import {clearDatabase} from '../orm/helper'
let orm: ORM;
let logic: Logic;
beforeAll(async () => {
orm = new ORM(config.mongoose);
logic = new Logic(orm);
await orm.connect();
});
beforeEach(async () => {
await clearDatabase();
});
afterAll(async () => {
await orm.disconnect();
});
test('vote for one idea', async () => {
const user = new ORM.User({name: 'name', login: 'login'});
await user.save();
const idea = new ORM.Idea({title: 'title', author: user._id});
await idea.save();
const userVote1 = await logic.getIdeaById('my-realm', user, idea._id);
expect(userVote1).toBeDefined();
expect(userVote1.myVote).toBe(0);
expect(userVote1.voteRating).toBe(0);
await logic.voteAndReturnNewValues(user._id, idea._id, 3);
const userVote2 = await logic.getIdeaById('my-realm', user, idea._id);
expect(userVote2).toBeDefined();
expect(userVote2.myVote).toBe(3);
expect(userVote2.voteRating).toBe(1);
});
test('change vote', async () => {
const user = new ORM.User({name: 'name', login: 'login'});
await user.save();
const idea = new ORM.Idea({title: 'title', author: user._id});
await idea.save();
const userVote1 = await logic.getIdeaById('my-realm', user, idea._id);
expect(userVote1).toBeDefined();
expect(userVote1.myVote).toBe(0);
expect(userVote1.voteRating).toBe(0);
await logic.voteAndReturnNewValues(user._id, idea._id, 3);
const userVote2 = await logic.getIdeaById('my-realm', user, idea._id);
expect(userVote2).toBeDefined();
expect(userVote2.myVote).toBe(3);
expect(userVote2.voteRating).toBe(1);
await logic.voteAndReturnNewValues(user._id, idea._id, 4);
const userVote3 = await logic.getIdeaById('my-realm', user, idea._id);
expect(userVote3).toBeDefined();
expect(userVote3.myVote).toBe(4);
expect(userVote3.voteRating).toBe(-1);
});
test('vote for two ideas', async () => {
const user = new ORM.User({name: 'name', login: 'login'});
await user.save();
const idea1 = new ORM.Idea({title: 'title', author: user._id});
await idea1.save();
const idea2 = new ORM.Idea({title: 'title', author: user._id});
await idea2.save();
const userVote1 = await logic.getIdeaById('my-realm', user, idea1._id);
expect(userVote1).toBeDefined();
expect(userVote1.myVote).toBe(0);
expect(userVote1.voteRating).toBe(0);
await logic.voteAndReturnNewValues(user._id, idea1._id, 3);
const userVote2 = await logic.getIdeaById('my-realm', user, idea1._id);
expect(userVote2).toBeDefined();
expect(userVote2.myVote).toBe(3);
expect(userVote2.voteRating).toBe(1);
const userVote3 = await logic.getIdeaById('my-realm', user, idea2._id);
expect(userVote3).toBeDefined();
expect(userVote3.myVote).toBe(0);
expect(userVote3.voteRating).toBe(0);
await logic.voteAndReturnNewValues(user._id, idea2._id, 4);
const userVote4 = await logic.getIdeaById('my-realm', user, idea1._id);
expect(userVote4).toBeDefined();
expect(userVote4.myVote).toBe(3);
expect(userVote4.voteRating).toBe(1);
const userVote5 = await logic.getIdeaById('my-realm', user, idea2._id);
expect(userVote5).toBeDefined();
expect(userVote5.myVote).toBe(4);
expect(userVote5.voteRating).toBe(-1);
});
|
<gh_stars>0
const billRoute = require('express').Router();
const billCtrl = require('../controllers').controller.Bill;
const authenticate = require('../middleware/authenticate');
const role = require('../middleware/role');
billRoute.get('/s', authenticate.authenticateJWT, role.checkRole(["Admin", "Customer"], { read: true }), billCtrl.getMany);
billRoute.get('/:_id', authenticate.authenticateJWT, role.checkRole(["Admin", "Customer"], { read: true }), billCtrl.getById);
billRoute.post('/', authenticate.authenticateJWT, role.checkRole(["Admin", "Customer"], { create: true }), billCtrl.createData);
billRoute.put('/:_id', authenticate.authenticateJWT, role.checkRole(["Admin", "Customer"], { up: true }), billCtrl.updateById);
// billRoute.delete('/:id', billCtrl.deleteById);
module.exports = billRoute;
|
def run_quiz(questions):
score = 0
for question in questions:
print(question["question"])
for option in question["options"]:
print(option)
user_answer = input("Enter your choice (A, B, C, or D): ").strip().upper()
while user_answer not in ["A", "B", "C", "D"]:
print("Invalid choice. Please enter A, B, C, or D.")
user_answer = input("Enter your choice (A, B, C, or D): ").strip().upper()
if user_answer == question["answer"]:
print("Correct!")
score += 1
else:
print(f"Wrong! The correct answer is {question['answer']}.")
print(f"You scored {score}/{len(questions)} in the quiz.")
# Example usage
questions = [
{
"question": "What is the capital of France?",
"options": ["A) London", "B) Paris", "C) Berlin", "D) Madrid"],
"answer": "B"
},
{
"question": "Which planet is known as the Red Planet?",
"options": ["A) Venus", "B) Mars", "C) Jupiter", "D) Saturn"],
"answer": "B"
},
# Add more questions here
]
run_quiz(questions) |
# Import modules
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# Read the data
data = pd.read_csv('data.csv')
# Define the input features
features = ['Age', 'Gender', 'BloodPressure', 'Glucose', 'BMI', 'Pregnancies', 'Insulin']
# Define the target variable
target = 'DiabetesRiskScore'
# Create training and test datasets
X_train, X_test, y_train, y_test = train_test_split(data[features], data[target], test_size=0.2, random_state=0)
# Fit the model
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions
y_pred = model.predict(X_test)
# Calculate the accuracy of the model
score = mean_squared_error(y_test, y_pred)
# Make a prediction for the given patient
patient_data = [33, 'Female', 140, 90, 25.5, 3, 80]
diagnosis = model.predict([patient_data])[0]
print('Diabetes risk score for given patient is: ', diagnosis) |
package com.telenav.osv.data.user.model;
import androidx.annotation.Nullable;
import com.telenav.osv.application.PreferenceTypes;
import com.telenav.osv.data.KVBaseModel;
import com.telenav.osv.data.user.model.details.BaseUserDetails;
import com.telenav.osv.data.user.model.details.gamification.GamificationDetails;
/**
* User model class for data related to the user.
*
* @author horatiuf
*/
public class User extends KVBaseModel {
/**
* Access token required for each online request.
*/
private String accessToken = "";
/**
* Display name for the user in the Profile screen.
*/
private String displayName = "";
/**
* The type representing the login authentication method. It can be:
* <ul>
* <li>Facebook</li>
* <li>Google</li>
* <li>OSM</li>
* </ul>
*/
private String loginType;
/**
* The username of the account.
*/
private String userName;
/**
* The type of the user. The value can be:
* <ul>
* <li>{@link PreferenceTypes#USER_TYPE_UNKNOWN}</li>
* <li>{@link PreferenceTypes#USER_TYPE_CONTRIBUTOR}</li>
* <li>{@link PreferenceTypes#USER_TYPE_QA}</li>
* </ul>
*/
private int userType;
/**
* The details of the account which can be either:
* <ul>
* <li>{@link GamificationDetails}</li>
* </ul>
* In order to check the type, the value from {@link BaseUserDetails#getType()} can be used.
*/
@Nullable
private BaseUserDetails details;
/**
* User Id for Jarvis User
*/
private int jarvisUserId;
/**
* User Name for Jarvis User
*/
private String jarvisUserName;
/**
* Access token required for Jarvis API requests
*/
private String jarvisAccessToken;
/**
* Refresh token for Jarvis
*/
private String jarvisRefreshToken;
/**
* Default constructor for the base model class.
* @param ID {@code String} representing User ID.
* @param accessToken {@code String} representing {@link #accessToken}.
* @param displayName {@code String} representing {@link #displayName}.
* @param loginType {@code String} representing {@link #loginType}.
* @param userName {@code String} representing {@link #userName}.
* @param userType {@code int} representing {@link #userType}.
* @param jarvisUserId {@code int} representing {@link #jarvisUserId}.
* @param jarvisUserName {@code int} representing {@link #jarvisUserName}.
* @param jarvisAccessToken {@code int} representing {@link #jarvisAccessToken}.
* @param details {@code BaseUserDetails} representing {@link #details}.
*/
public User(
String ID,
String accessToken,
String displayName,
String loginType,
String userName,
int userType,
int jarvisUserId,
@Nullable String jarvisUserName,
@Nullable String jarvisAccessToken,
@Nullable String jarvisRefreshToken,
@Nullable BaseUserDetails details
) {
super(ID);
this.accessToken = accessToken;
this.displayName = displayName;
this.loginType = loginType;
this.userName = userName;
this.userType = userType;
this.jarvisUserId = jarvisUserId;
this.jarvisUserName = jarvisUserName;
this.jarvisAccessToken = jarvisAccessToken;
this.jarvisRefreshToken = jarvisRefreshToken;
this.details = details;
}
/**
* @return {@code String} representing {@link #accessToken}.
*/
public String getAccessToken() {
return accessToken;
}
/**
* @return {@code String} representing {@link #displayName}.
*/
public String getDisplayName() {
return displayName;
}
/**
* @return {@code String} representing {@link #loginType}.
*/
public String getLoginType() {
return loginType;
}
/**
* @return {@code String} representing {@link #userName}.
*/
public String getUserName() {
return userName;
}
/**
* @return {@code int} representing {@link #userType}.
*/
public int getUserType() {
return userType;
}
/**
* @return {@code BaseUserDetails} representing {@link #details}.
*/
@Nullable
public BaseUserDetails getDetails() {
return details;
}
/**
* @param details the new {@code BaseUserDetails} to be set.
*/
public void setDetails(@Nullable BaseUserDetails details) {
this.details = details;
}
public int getJarvisUserId() {
return jarvisUserId;
}
public String getJarvisUserName() {
return jarvisUserName;
}
public String getJarvisAccessToken() {
return jarvisAccessToken;
}
public String getJarvisRefreshToken() {
return jarvisRefreshToken;
}
}
|
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "helper"
require "gapic/grpc/service_stub"
require "google/cloud/dialogflow/cx/v3beta1/test_case_pb"
require "google/cloud/dialogflow/cx/v3beta1/test_case_services_pb"
require "google/cloud/dialogflow/cx/v3beta1/test_cases"
class ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::ClientTest < Minitest::Test
class ClientStub
attr_accessor :call_rpc_count, :requests
def initialize response, operation, &block
@response = response
@operation = operation
@block = block
@call_rpc_count = 0
@requests = []
end
def call_rpc *args, **kwargs
@call_rpc_count += 1
@requests << @block&.call(*args, **kwargs)
yield @response, @operation if block_given?
@response
end
end
def test_list_test_cases
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCasesResponse.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
page_size = 42
page_token = "<PASSWORD>"
view = :TEST_CASE_VIEW_UNSPECIFIED
list_test_cases_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :list_test_cases, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCasesRequest, request
assert_equal "hello world", request["parent"]
assert_equal 42, request["page_size"]
assert_equal "hello world", request["page_token"]
assert_equal :TEST_CASE_VIEW_UNSPECIFIED, request["view"]
refute_nil options
end
Gapic::ServiceStub.stub :new, list_test_cases_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.list_test_cases({ parent: parent, page_size: page_size, page_token: page_token, view: view }) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use named arguments
client.list_test_cases parent: parent, page_size: page_size, page_token: page_token, view: view do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.list_test_cases ::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCasesRequest.new(parent: parent, page_size: page_size, page_token: page_token, view: view) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.list_test_cases({ parent: parent, page_size: page_size, page_token: page_token, view: view }, grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.list_test_cases(::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCasesRequest.new(parent: parent, page_size: page_size, page_token: page_token, view: view), grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, list_test_cases_client_stub.call_rpc_count
end
end
def test_batch_delete_test_cases
# Create GRPC objects.
grpc_response = ::Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
names = ["hello world"]
batch_delete_test_cases_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :batch_delete_test_cases, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::BatchDeleteTestCasesRequest, request
assert_equal "hello world", request["parent"]
assert_equal ["hello world"], request["names"]
refute_nil options
end
Gapic::ServiceStub.stub :new, batch_delete_test_cases_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.batch_delete_test_cases({ parent: parent, names: names }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.batch_delete_test_cases parent: parent, names: names do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.batch_delete_test_cases ::Google::Cloud::Dialogflow::CX::V3beta1::BatchDeleteTestCasesRequest.new(parent: parent, names: names) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.batch_delete_test_cases({ parent: parent, names: names }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.batch_delete_test_cases(::Google::Cloud::Dialogflow::CX::V3beta1::BatchDeleteTestCasesRequest.new(parent: parent, names: names), grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, batch_delete_test_cases_client_stub.call_rpc_count
end
end
def test_get_test_case
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCase.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "<PASSWORD>"
get_test_case_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :get_test_case, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::GetTestCaseRequest, request
assert_equal "hello world", request["name"]
refute_nil options
end
Gapic::ServiceStub.stub :new, get_test_case_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.get_test_case({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.get_test_case name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.get_test_case ::Google::Cloud::Dialogflow::CX::V3beta1::GetTestCaseRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.get_test_case({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.get_test_case(::Google::Cloud::Dialogflow::CX::V3beta1::GetTestCaseRequest.new(name: name), grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, get_test_case_client_stub.call_rpc_count
end
end
def test_create_test_case
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCase.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
test_case = {}
create_test_case_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :create_test_case, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::CreateTestCaseRequest, request
assert_equal "hello world", request["parent"]
assert_equal Gapic::Protobuf.coerce({}, to: ::Google::Cloud::Dialogflow::CX::V3beta1::TestCase), request["test_case"]
refute_nil options
end
Gapic::ServiceStub.stub :new, create_test_case_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.create_test_case({ parent: parent, test_case: test_case }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.create_test_case parent: parent, test_case: test_case do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.create_test_case ::Google::Cloud::Dialogflow::CX::V3beta1::CreateTestCaseRequest.new(parent: parent, test_case: test_case) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.create_test_case({ parent: parent, test_case: test_case }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.create_test_case(::Google::Cloud::Dialogflow::CX::V3beta1::CreateTestCaseRequest.new(parent: parent, test_case: test_case), grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, create_test_case_client_stub.call_rpc_count
end
end
def test_update_test_case
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCase.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
test_case = {}
update_mask = {}
update_test_case_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :update_test_case, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::UpdateTestCaseRequest, request
assert_equal Gapic::Protobuf.coerce({}, to: ::Google::Cloud::Dialogflow::CX::V3beta1::TestCase), request["test_case"]
assert_equal Gapic::Protobuf.coerce({}, to: ::Google::Protobuf::FieldMask), request["update_mask"]
refute_nil options
end
Gapic::ServiceStub.stub :new, update_test_case_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.update_test_case({ test_case: test_case, update_mask: update_mask }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.update_test_case test_case: test_case, update_mask: update_mask do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.update_test_case ::Google::Cloud::Dialogflow::CX::V3beta1::UpdateTestCaseRequest.new(test_case: test_case, update_mask: update_mask) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.update_test_case({ test_case: test_case, update_mask: update_mask }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.update_test_case(::Google::Cloud::Dialogflow::CX::V3beta1::UpdateTestCaseRequest.new(test_case: test_case, update_mask: update_mask), grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, update_test_case_client_stub.call_rpc_count
end
end
def test_run_test_case
# Create GRPC objects.
grpc_response = ::Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
environment = "hello world"
run_test_case_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :run_test_case, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::RunTestCaseRequest, request
assert_equal "hello world", request["name"]
assert_equal "hello world", request["environment"]
refute_nil options
end
Gapic::ServiceStub.stub :new, run_test_case_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.run_test_case({ name: name, environment: environment }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.run_test_case name: name, environment: environment do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.run_test_case ::Google::Cloud::Dialogflow::CX::V3beta1::RunTestCaseRequest.new(name: name, environment: environment) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.run_test_case({ name: name, environment: environment }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.run_test_case(::Google::Cloud::Dialogflow::CX::V3beta1::RunTestCaseRequest.new(name: name, environment: environment), grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, run_test_case_client_stub.call_rpc_count
end
end
def test_batch_run_test_cases
# Create GRPC objects.
grpc_response = ::Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
environment = "hello world"
test_cases = ["hello world"]
batch_run_test_cases_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :batch_run_test_cases, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::BatchRunTestCasesRequest, request
assert_equal "hello world", request["parent"]
assert_equal "hello world", request["environment"]
assert_equal ["hello world"], request["test_cases"]
refute_nil options
end
Gapic::ServiceStub.stub :new, batch_run_test_cases_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.batch_run_test_cases({ parent: parent, environment: environment, test_cases: test_cases }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.batch_run_test_cases parent: parent, environment: environment, test_cases: test_cases do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.batch_run_test_cases ::Google::Cloud::Dialogflow::CX::V3beta1::BatchRunTestCasesRequest.new(parent: parent, environment: environment, test_cases: test_cases) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.batch_run_test_cases({ parent: parent, environment: environment, test_cases: test_cases }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.batch_run_test_cases(::Google::Cloud::Dialogflow::CX::V3beta1::BatchRunTestCasesRequest.new(parent: parent, environment: environment, test_cases: test_cases), grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, batch_run_test_cases_client_stub.call_rpc_count
end
end
def test_calculate_coverage
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::CalculateCoverageResponse.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
agent = "hello world"
type = :COVERAGE_TYPE_UNSPECIFIED
calculate_coverage_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :calculate_coverage, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::CalculateCoverageRequest, request
assert_equal "hello world", request["agent"]
assert_equal :COVERAGE_TYPE_UNSPECIFIED, request["type"]
refute_nil options
end
Gapic::ServiceStub.stub :new, calculate_coverage_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.calculate_coverage({ agent: agent, type: type }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.calculate_coverage agent: agent, type: type do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.calculate_coverage ::Google::Cloud::Dialogflow::CX::V3beta1::CalculateCoverageRequest.new(agent: agent, type: type) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.calculate_coverage({ agent: agent, type: type }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.calculate_coverage(::Google::Cloud::Dialogflow::CX::V3beta1::CalculateCoverageRequest.new(agent: agent, type: type), grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, calculate_coverage_client_stub.call_rpc_count
end
end
def test_import_test_cases
# Create GRPC objects.
grpc_response = ::Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
gcs_uri = "hello world"
import_test_cases_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :import_test_cases, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::ImportTestCasesRequest, request
assert_equal "hello world", request["parent"]
assert_equal "hello world", request["gcs_uri"]
assert_equal :gcs_uri, request.source
refute_nil options
end
Gapic::ServiceStub.stub :new, import_test_cases_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.import_test_cases({ parent: parent, gcs_uri: gcs_uri }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.import_test_cases parent: parent, gcs_uri: gcs_uri do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.import_test_cases ::Google::Cloud::Dialogflow::CX::V3beta1::ImportTestCasesRequest.new(parent: parent, gcs_uri: gcs_uri) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.import_test_cases({ parent: parent, gcs_uri: gcs_uri }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.import_test_cases(::Google::Cloud::Dialogflow::CX::V3beta1::ImportTestCasesRequest.new(parent: parent, gcs_uri: gcs_uri), grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, import_test_cases_client_stub.call_rpc_count
end
end
def test_export_test_cases
# Create GRPC objects.
grpc_response = ::Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
gcs_uri = "hello world"
data_format = :DATA_FORMAT_UNSPECIFIED
filter = "hello world"
export_test_cases_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :export_test_cases, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::ExportTestCasesRequest, request
assert_equal "hello world", request["parent"]
assert_equal "hello world", request["gcs_uri"]
assert_equal :gcs_uri, request.destination
assert_equal :DATA_FORMAT_UNSPECIFIED, request["data_format"]
assert_equal "hello world", request["filter"]
refute_nil options
end
Gapic::ServiceStub.stub :new, export_test_cases_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.export_test_cases({ parent: parent, gcs_uri: gcs_uri, data_format: data_format, filter: filter }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.export_test_cases parent: parent, gcs_uri: gcs_uri, data_format: data_format, filter: filter do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.export_test_cases ::Google::Cloud::Dialogflow::CX::V3beta1::ExportTestCasesRequest.new(parent: parent, gcs_uri: gcs_uri, data_format: data_format, filter: filter) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.export_test_cases({ parent: parent, gcs_uri: gcs_uri, data_format: data_format, filter: filter }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.export_test_cases(::Google::Cloud::Dialogflow::CX::V3beta1::ExportTestCasesRequest.new(parent: parent, gcs_uri: gcs_uri, data_format: data_format, filter: filter), grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, export_test_cases_client_stub.call_rpc_count
end
end
def test_list_test_case_results
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCaseResultsResponse.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
parent = "hello world"
page_size = 42
page_token = "hello <PASSWORD>"
filter = "hello world"
list_test_case_results_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :list_test_case_results, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCaseResultsRequest, request
assert_equal "hello world", request["parent"]
assert_equal 42, request["page_size"]
assert_equal "hello world", request["page_token"]
assert_equal "hello world", request["filter"]
refute_nil options
end
Gapic::ServiceStub.stub :new, list_test_case_results_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.list_test_case_results({ parent: parent, page_size: page_size, page_token: page_token, filter: filter }) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use named arguments
client.list_test_case_results parent: parent, page_size: page_size, page_token: page_token, filter: filter do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.list_test_case_results ::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCaseResultsRequest.new(parent: parent, page_size: page_size, page_token: page_token, filter: filter) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.list_test_case_results({ parent: parent, page_size: page_size, page_token: page_token, filter: filter }, grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.list_test_case_results(::Google::Cloud::Dialogflow::CX::V3beta1::ListTestCaseResultsRequest.new(parent: parent, page_size: page_size, page_token: page_token, filter: filter), grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, list_test_case_results_client_stub.call_rpc_count
end
end
def test_get_test_case_result
# Create GRPC objects.
grpc_response = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCaseResult.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "<PASSWORD>"
get_test_case_result_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :get_test_case_result, name
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::GetTestCaseResultRequest, request
assert_equal "hello world", request["name"]
refute_nil options
end
Gapic::ServiceStub.stub :new, get_test_case_result_client_stub do
# Create client
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.get_test_case_result({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.get_test_case_result name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.get_test_case_result ::Google::Cloud::Dialogflow::CX::V3beta1::GetTestCaseResultRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.get_test_case_result({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.get_test_case_result(::Google::Cloud::Dialogflow::CX::V3beta1::GetTestCaseResultRequest.new(name: name), grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, get_test_case_result_client_stub.call_rpc_count
end
end
def test_configure
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
client = block_config = config = nil
Gapic::ServiceStub.stub :new, nil do
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
end
config = client.configure do |c|
block_config = c
end
assert_same block_config, config
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client::Configuration, config
end
def test_operations_client
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
client = nil
Gapic::ServiceStub.stub :new, nil do
client = ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Client.new do |config|
config.credentials = grpc_channel
end
end
assert_kind_of ::Google::Cloud::Dialogflow::CX::V3beta1::TestCases::Operations, client.operations_client
end
end
|
#/usr/bin/env bash
# Known bugs:
# - Newlines in file/directory names break this script
# This is because we rely on `compgen -A`, which is broken like this.
# A fix would require implementing it ourselves, and no thanks!
# - `rgbasm --binary-digits=a` is treated the same as `rgbasm --binary-digits=` (for example)
# This is not our fault, Bash passes both of these identically.
# Maybe it could be worked around, but such a fix would likely be involved.
# The user can work around it by typing `--binary-digits ''` instead, for example.
# - Directories are not completed as such in "coalesced" short-opt arguments. For example,
# `rgbasm -M d<tab>` can autocomplete to `rgbasm -M dir/` (no space), but
# `rgbasm -Md<tab>` would autocomplete to `rgbasm -Mdir ` (trailing space) instead.
# This is because dircetory handling is performed by Readline, whom we can't tell about the short
# opt kerfuffle. The user can work around by separating the argument, as shown above.
# (Also, there might be more possible bugs if `-Mdir` is actually a directory. Ugh.)
# Something to note:
# `rgbasm --binary-digits=a` gets passed to us as ('rgbasm' '--binary-digits' '=' 'a')
# Thus, we don't need to do much to handle that form of argument passing: skip '=' after long opts.
_rgbasm_completions() {
COMPREPLY=()
# Format: "long_opt:state_after"
# Empty long opt = it doesn't exit
# See the `state` variable below for info about `state_after`
declare -A opts=(
[V]="version:normal"
[E]="export-all:normal"
[h]="halt-without-nop:normal"
[L]="preserve-ld:normal"
[v]="verbose:normal"
[w]=":normal"
[b]="binary-digits:unk"
[D]="define:unk"
[g]="gfx-chars:unk"
[i]="include:dir"
[M]="dependfile:glob-*.mk *.d"
[o]="output:glob-*.o"
[p]="pad-value:unk"
[r]="recursion-depth:unk"
[W]="warning:warning"
)
# Parse command-line up to current word
local opt_ena=true
# Possible states:
# - normal = Well, normal. Options are parsed normally.
# - unk = An argument that can't be completed, and should just be skipped.
# - warning = A warning flag.
# - dir = A directory path
# - glob-* = A glob, after the dash is a whitespace-separated list of file globs to use
local state=normal
# The length of the option, used as a return value by the function below
local optlen=0
# $1: a short option word
# `state` will be set to the parsing state after the last option character in the word. If
# "normal" is not returned, `optlen` will be set to the length (dash included) of the "option"
# part of the argument.
parse_short_opt() {
for (( i = 1; i < "${#1}"; i++ )); do
# If the option is not known, assume it doesn't take an argument
local opt="${opts["${1:$i:1}"]:-":normal"}"
state="${opt#*:}"
# If the option takes an argument, record the length and exit
if [[ "$state" != 'normal' ]]; then
let optlen="$i + 1"
return
fi
done
optlen=0
}
for (( i = 1; i < $COMP_CWORD; i++ )); do
local word="${COMP_WORDS[$i]}"
# If currently processing an argument, skip this word
if [[ "$state" != 'normal' ]]; then
state=normal
continue
fi
if [[ "$word" = '--' ]]; then
# Options stop being parsed after this
opt_ena=false
break
fi
# Check if it's a long option
if [[ "${word:0:2}" = '--' ]]; then
# If the option is unknown, assume it takes no arguments: keep the state at "normal"
for long_opt in "${opts[@]}"; do
if [[ "$word" = "--${long_opt%%:*}" ]]; then
state="${long_opt#*:}"
# Check if the next word is just '='; if so, skip it, the argument must follow
# (See "known bugs" at the top of this script)
let i++
if [[ "${COMP_WORDS[$i]}" != '=' ]]; then
let i--
fi
optlen=0
break
fi
done
# Check if it's a short option
elif [[ "${word:0:1}" = '-' ]]; then
# The `-M?` ones are a mix of short and long, augh
# They must match the *full* word, but only take a single dash
# So, handle them here
if [[ "$1" = "-M"[GP] ]]; then
state=normal
elif [[ "$1" = "-M"[TQ] ]]; then
state='glob-*.d *.mk *.o'
else
parse_short_opt "$word"
# The last option takes an argument...
if [[ "$state" != 'normal' ]]; then
if [[ "$optlen" -ne "${#word}" ]]; then
# If it's contained within the word, we won't complete it, revert to "normal"
state=normal
else
# Otherwise, complete it, but start at the beginning of *that* word
optlen=0
fi
fi
fi
fi
done
# Parse current word
# Careful that it might look like an option, so use `--` aggressively!
local cur_word="${COMP_WORDS[$COMP_CWORD]}"
# Process options, as short ones may change the state
if $opt_ena && [[ "$state" = 'normal' && "${cur_word:0:1}" = '-' ]]; then
# We might want to complete to an option or an arg to that option
# Parse the option word to check
# There's no whitespace in the option names, so we can ride a little dirty...
# Is this a long option?
if [[ "${cur_word:1:1}" = '-' ]]; then
# It is, try to complete one
COMPREPLY+=( $(compgen -W "${opts[*]%%:*}" -P '--' -- "${cur_word#--}") )
return 0
else
# Short options may be grouped, parse them to determine what to complete
# The `-M?` ones may not be followed by anything
if [[ "$1" != "-M"[GPTQ] ]]; then
parse_short_opt "$cur_word"
# We got some short options that behave like long ones
COMPREPLY+=( $(compgen -W '-MG -MP -MT -MQ' -- "$cur_word") )
if [[ "$state" = 'normal' ]]; then
COMPREPLY+=( $(compgen -W "${!opts[*]}" -P "$cur_word" '') )
return 0
elif [[ "$optlen" = "${#cur_word}" && "$state" != "warning" ]]; then
# This short option group only awaits its argument!
# Post the option group as-is as a reply so that Readline inserts a space,
# so that the next completion request switches to the argument
# An exception is made for warnings, since it's idiomatic to stick them to the
# `-W`, and it doesn't break anything.
COMPREPLY+=( "$cur_word" )
return 0
fi
fi
fi
fi
case "$state" in
unk) # Return with no replies: no idea what to complete!
;;
warning)
COMPREPLY+=( $(compgen -W "
assert
backwards-for
builtin-args
charmap-redef
div
empty-data-directive
empty-macro-arg
empty-strrpl
large-constant
long-string
macro-shift
nested-comment
obsolete
shift
shift-amount
truncation
user
all
extra
everything
error" -P "${cur_word:0:$optlen}" -- "${cur_word:$optlen}") )
;;
normal) # Acts like a glob...
state="glob-*.asm *.inc *.sm83"
;&
glob-*)
while read -r word; do
COMPREPLY+=("${cur_word:0:$optlen}$word")
done < <(for glob in ${state#glob-}; do compgen -A file -X \!"$glob" -- "${cur_word:$optlen}"; done)
# Also complete directories
;&
dir)
while read -r word; do
COMPREPLY+=("${cur_word:0:$optlen}$word")
done < <(compgen -A directory -- "${cur_word:$optlen}")
compopt -o filenames
;;
esac
}
complete -F _rgbasm_completions rgbasm
|
from typing import Union, Tuple
import math
def calculate_positions_and_momenta(time_like: float, return_cartesian: bool) -> Union[Tuple[float, float, float], Tuple[float, float, float]]:
# Perform calculations based on the input parameters
# For demonstration purposes, let's assume some arbitrary calculations
if return_cartesian:
# Calculate positions in Cartesian coordinates
x = time_like * 2.0 # Arbitrary calculation for x coordinate
y = time_like * 3.0 # Arbitrary calculation for y coordinate
z = time_like * 4.0 # Arbitrary calculation for z coordinate
return x, y, z
else:
# Calculate momenta in Spherical Polar coordinates
r = time_like * 1.5 # Arbitrary calculation for radial distance
theta = math.radians(45) # Arbitrary calculation for polar angle in radians
phi = math.radians(60) # Arbitrary calculation for azimuthal angle in radians
return r, theta, phi |
<filename>lib/rdfToJson.js
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const constants_1 = require("./constants");
/**
* @param {string} id
* @returns {boolean}
*/
function isBlank(id) {
return id.indexOf('_:') === 0;
}
/**
* @param {string} name
* @param {string[]} types
* @param {string} base
* @returns {string}
*/
function resolveProperty(name, types, base) {
return types
.map((type) => type.replace(/[^#/]*$/, ''))
.concat([base + '#'])
.reduce(function (toResolve, type) {
if (toResolve.indexOf(type) === 0) {
return toResolve.slice(type.length);
}
else {
return toResolve;
}
}, name);
}
/**
* @param {JsonItem[]} items
* @param {JsonPropertyValue[]} ancestors
*/
function removeCircular(items, ancestors = []) {
for (const item of items) {
const newAncestors = ancestors.concat([item]);
for (const [property, values] of Object.entries(item.properties)) {
item.properties[property] = values.map((value) => {
if (newAncestors.indexOf(value) >= 0) {
return 'ERROR';
}
else {
return value;
}
});
const subItems = values.filter((value) => typeof value === 'object');
removeCircular(subItems, newAncestors);
}
}
}
/**
* @param {import('./microdataToRdf').Triple[]} triples
* @param {import('./index').Config} config
* @returns {JsonResult}
*/
function rdfToJson(triples, config) {
const itemMap = triples.reduce((res, triple) => {
const id = triple.subject;
if (!res[id]) {
const item = { properties: {} };
if (!isBlank(id)) {
item.id = id;
}
res[id] = item;
}
return res;
}, {});
let topLevelItems = Object.keys(itemMap);
triples.forEach(function (triple) {
const base = config.base || '';
const item = itemMap[triple.subject];
if (triple.predicate === constants_1.RDF__TYPE) {
if (triple.object.id !== undefined) {
const namedNode = triple.object;
item.type = item.type || [];
item.type.push(namedNode.id);
}
}
else {
const property = resolveProperty(triple.predicate, item.type || [], base);
const value = item.properties[property] || [];
const object = triple.object;
if (object.id !== undefined) {
const namedNode = object;
if (isBlank(namedNode.id)) {
const refItem = itemMap[namedNode.id];
if (refItem) {
value.push(refItem);
topLevelItems = topLevelItems.filter(function (id) {
return id !== object.id;
});
}
}
else {
value.push(namedNode.id);
}
}
else {
const literalNode = object;
value.push(literalNode.value);
}
if (value.length > 0) {
item.properties[property] = value;
}
}
});
const items = topLevelItems.map(function (id) {
return itemMap[id];
});
removeCircular(items, []);
return { items };
}
exports.default = rdfToJson;
//# sourceMappingURL=rdfToJson.js.map |
#!/bin/bash
apt install git vim isc-dhcp-server exfat-fuse exfat-utils npm -y
npm install .
dd if=/dev/zero of=/scripts/system.img bs=1M count=512
mkdosfs /scripts/system.img
mkdir /test
mount -o loop /scripts/system.img /test
cd /usr/src
git clone https://github.com/hak5/bashbunny-payloads.git
rsync -a /usr/src/bashbunny-payloads/payloads/ /test/
sync
umount /test
cd /usr/src
git clone git://github.com/quick2wire/quick2wire-gpio-admin.git
cd quick2wire-gpio-admin
make
sudo make install
rsync -av /scripts/etc/ /etc/
rsync -av /scripts/boot/ /boot/
systemctl enable pibunny
cd /usr/src
git clone https://github.com/theresalu/rspiducky.git
cd rspiducky
gcc usleep.c -o /home/pi/usleep
gcc hid-gadget-test.c -o /home/pi/hid-gadget-test
chmod +x /usr/src/rspiducky/duckpy.sh
#For my led script
sudo wget -O /usr/local/sbin/neouart https://github.com/bigjosh/NeoUart/releases/download/2/neouart
chmod +x /usr/local/sbin/neouart
for BINFILE in `ls /scripts/bin/`
do
ln -sf /scripts/bin/$BINFILE /bin
done
|
#!/usr/bin/env bash
uptask() {
read -p "React Project Name: " prj
npx create-react-app $prj
}
|
#!/bin/bash
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
export LIBS="-lm"
# TODO: Remove when this is fixed.
# https://code.google.com/p/nativeclient/issues/detail?id=3598
if [ "$NACL_GLIBC" = "1" ]; then
export NACLPORTS_CFLAGS="${NACLPORTS_CFLAGS//-O2/}"
fi
# TODO: Remove when this is fixed.
# https://code.google.com/p/nativeclient/issues/detail?id=3205
if [ "$NACL_ARCH" = "arm" ]; then
export NACLPORTS_CFLAGS="${NACLPORTS_CFLAGS//-O2/}"
fi
|
<reponame>iamKyun/golang-tutorial<filename>example/array/array.go<gh_stars>100-1000
package main
import "fmt"
func main() {
// 声明一个长度为5的整数数组
// 一旦数组被声明了,那么它的数据类型跟长度都不能再被改变。
var array1 [5]int
fmt.Printf("array1: %d\n\n", array1)
// 声明一个长度为5的整数数组
// 初始化每个元素
array2 := [5]int{12, 123, 1234, 12345, 123456}
array2[1] = 5000
fmt.Printf("array2: %d\n\n", array2[1])
// n 是一个长度为 10 的数组
var n [10]int
var i,j int
/* 为数组 n 初始化元素 */
for i = 0; i < 10; i++ {
n[i] = i + 100 /* 设置元素为 i + 100 */
}
/* 输出每个数组元素的值 */
for j = 0; j < 10; j++ {
fmt.Printf("Element[%d] = %d\n", j, n[j] )
}
/* 数组 - 5 行 2 列*/
var a = [5][2]int{ {0,0}, {1,2}, {2,4}, {3,6},{4,8}}
var e, f int
/* 输出数组元素 */
for e = 0; e < 5; e++ {
for f = 0; f < 2; f++ {
fmt.Printf("a[%d][%d] = %d\n", e,f, a[e][f] )
}
}
} |
# File: R (Python 2.4)
import math
from pandac.PandaModules import MouseButton
from pandac.PandaModules import ColorBlendAttrib
from direct.interval.IntervalGlobal import Sequence, Func, LerpScaleInterval
from direct.gui.DirectGui import DirectButton, DirectLabel, DGG
from direct.task import Task
from direct.fsm import FSM
from pandac.PandaModules import TransformState
from pandac.PandaModules import Texture
from pandac.PandaModules import TextureStage
from direct.interval.IntervalGlobal import *
from pirates.piratesgui.GuiPanel import *
import random
_activePosition = 0.5
class RepairLeak(DirectButton, FSM.FSM):
def __init__(self, name, parent, leakscale, **kw):
self.name = name
pitchingGui = loader.loadModel('models/gui/pir_m_gui_srp_pitching_main')
self.hole = pitchingGui.find('**/hole')
if random.random() > 0.5:
self.holeFilled = pitchingGui.find('**/pitch1')
else:
self.holeFilled = pitchingGui.find('**/pitch2')
optiondefs = (('relief', None, None), ('geom', (self.hole, self.hole, self.hole, self.holeFilled), None), ('rolloverSound', None, None), ('clickSound', None, None))
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, parent = parent, **None)
self.initialiseoptions(RepairLeak)
FSM.FSM.__init__(self, 'leak_%sFSM' % self.name)
self.onCleanup = None
self.leakScale = leakscale
self.pitchingGame = parent
self._initVars()
self._initVisuals()
self._initIntervals()
self.fadeSequence = None
self.request('Idle')
def _initVars(self):
self.timeActive = 0.0
self.pulseScale = 0.59999999999999998
def _initVisuals(self):
textureCard = loader.loadModel('models/minigames/pir_m_gam_srp_water')
self.waterStream = textureCard.find('**/waterPlane')
tex = textureCard.findTexture('pir_t_gui_srp_waterDrops')
textureCard2 = loader.loadModel('models/minigames/pir_m_gam_srp_water')
self.waterStream2 = textureCard2.find('**/waterPlane')
tex2 = textureCard2.findTexture('pir_t_gui_srp_waterDrops')
alphaCard = loader.loadModel('models/minigames/pir_m_gui_srp_waterDropsAlpha')
self.alphaWaterStream = textureCard.find('**/pir_t_gui_srp_waterDropsAlpha')
alphatex = alphaCard.find('**/pir_t_gui_srp_waterDropsAlpha').findTexture('*')
self.alphaWaterStream2 = textureCard.find('**/pir_t_gui_srp_waterDropsAlpha2')
alphatex2 = alphaCard.find('**/pir_t_gui_srp_waterDropsAlpha2').findTexture('*')
alphaCard2 = loader.loadModel('models/minigames/pir_m_gui_srp_waterDropsAlpha')
self.alphaWaterStream3 = textureCard.find('**/pir_t_gui_srp_waterDropsAlpha')
alphatex3 = alphaCard2.findTexture('*')
self.alphaWaterStream4 = textureCard.find('**/pir_t_gui_srp_waterDropsAlpha2')
alphatex4 = alphaCard2.findTexture('*')
tex.setWrapU(Texture.WMRepeat)
tex.setWrapV(Texture.WMRepeat)
alphatex.setWrapU(Texture.WMRepeat)
alphatex.setWrapV(Texture.WMRepeat)
tex2.setWrapU(Texture.WMRepeat)
tex2.setWrapV(Texture.WMRepeat)
alphatex3.setWrapU(Texture.WMRepeat)
alphatex3.setWrapV(Texture.WMRepeat)
self.setScale(2.5 * self.leakScale)
self.waterStream.setScale(self.leakScale)
self.waterStream.setPos(self.getX(), 0.0, -0.5 * self.leakScale + self.getZ())
self.waterStream2.setScale(self.leakScale * 0.80000000000000004, self.leakScale, self.leakScale * 1.2)
self.waterStream2.setPos(self.getX(), 0.0, -0.59999999999999998 * self.leakScale + self.getZ())
self.waterStream.setColor(0.69999999999999996, 0.84999999999999998, 1.0, 1.0)
self.waterStream2.setColor(0.5, 0.59999999999999998, 0.90000000000000002, 1.0)
self.waterStream2.reparentTo(self.pitchingGame)
self.waterStream.reparentTo(self.pitchingGame)
self.waterStream2.setBin('fixed', 42)
self.waterStream.setBin('fixed', 40)
self.textureYOffset = random.random()
self.textureYDelta = 0.25 + 0.025000000000000001 / self.leakScale
self.textureYOffset2 = random.random()
self.textureYDelta2 = 0.25412353999999998 + 0.058754645634 / self.leakScale
self.textureYOffsetAlpha = 0.0
self.textureYDeltaAlpha = 0.25 + 0.025000000000000001 / self.leakScale
self.textureYOffsetAlpha2 = 0.0
self.textureYDeltaAlpha2 = 0.25412353999999998 + 0.058754645634 / self.leakScale
self.textureStage = self.waterStream.findTextureStage('*')
self.textureStage2 = self.waterStream2.findTextureStage('*')
self.textureStage3 = TextureStage('alphaLayer')
self.textureStage3.setMode(TextureStage.MModulate)
self.textureStage3.setSort(1)
self.waterStream.setTexture(self.textureStage3, alphatex)
self.textureStage4 = TextureStage('alphaLayer2')
self.textureStage4.setMode(TextureStage.MModulate)
self.textureStage4.setSort(2)
self.waterStream.setTexture(self.textureStage4, alphatex2)
trans = TransformState.makePos((0, 0.47999999999999998, 0))
self.waterStream.setTexTransform(self.textureStage4, trans)
self.textureStage5 = TextureStage('alphaLayer3')
self.textureStage5.setMode(TextureStage.MModulate)
self.textureStage5.setSort(1)
self.waterStream2.setTexture(self.textureStage5, alphatex3)
self.textureStage6 = TextureStage('alphaLayer4')
self.textureStage6.setMode(TextureStage.MModulate)
self.textureStage6.setSort(2)
self.waterStream2.setTexture(self.textureStage6, alphatex4)
trans = TransformState.makePos((0, 0.47999999999999998, 0))
self.waterStream2.setTexTransform(self.textureStage6, trans)
def repositionTo(self, newX, newZ):
self.setPos(newX, 0.0, newZ)
self.waterStream.setPos(self.getX(), 0.0, -0.5 * self.leakScale + self.getZ())
self.waterStream2.setPos(self.getX(), 0.0, -0.59999999999999998 * self.leakScale + self.getZ())
def _initIntervals(self):
pass
def destroy(self):
if self.fadeSequence is not None:
self.fadeSequence.clearToInitial()
self['extraArgs'] = None
taskMgr.remove('RepairLeak_%s.update' % self.name)
if self.onCleanup is not None:
self.onCleanup(self)
self.cleanup()
self.waterStream.removeNode()
self.waterStream2.removeNode()
DirectButton.destroy(self)
def update(self, task):
dt = globalClock.getDt()
self.timeActive += dt
self.textureYOffset += self.textureYDelta * dt
trans = TransformState.makePos((0, self.textureYOffset, 0))
self.waterStream.setTexTransform(self.textureStage, trans)
done = False
if self.getCurrentOrNextState() == 'Active' and self.textureYOffsetAlpha < _activePosition:
self.textureYOffsetAlpha += self.textureYDeltaAlpha * dt
if self.textureYOffsetAlpha > _activePosition:
self.textureYOffsetAlpha = _activePosition
trans2 = TransformState.makePos((0, self.textureYOffsetAlpha, 0))
self.waterStream.setTexTransform(self.textureStage3, trans2)
if self.getCurrentOrNextState() == 'Patched':
if self.textureYOffsetAlpha < _activePosition:
self.textureYOffsetAlpha = 0.75 - self.textureYOffsetAlpha / 2.0
trans2 = TransformState.makePos((0, self.textureYOffsetAlpha, 0))
self.waterStream.setTexTransform(self.textureStage3, trans2)
elif self.textureYOffsetAlpha < 1.0:
self.textureYOffsetAlpha += self.textureYDeltaAlpha * dt
trans2 = TransformState.makePos((0, self.textureYOffsetAlpha, 0))
self.waterStream.setTexTransform(self.textureStage3, trans2)
self.textureYOffset2 += self.textureYDelta2 * dt
trans = TransformState.makePos((0, self.textureYOffset2, 0))
self.waterStream2.setTexTransform(self.textureStage2, trans)
if self.getCurrentOrNextState() == 'Active' and self.textureYOffsetAlpha2 < _activePosition:
self.textureYOffsetAlpha2 += self.textureYDeltaAlpha2 * dt
if self.textureYOffsetAlpha2 > _activePosition:
self.textureYOffsetAlpha2 = _activePosition
trans2 = TransformState.makePos((0, self.textureYOffsetAlpha2, 0))
self.waterStream2.setTexTransform(self.textureStage5, trans2)
if self.getCurrentOrNextState() == 'Patched':
if self.textureYOffsetAlpha2 < _activePosition:
self.textureYOffsetAlpha2 = 0.75 - self.textureYOffsetAlpha2 / 2.0
trans2 = TransformState.makePos((0, self.textureYOffsetAlpha2, 0))
self.waterStream2.setTexTransform(self.textureStage5, trans2)
if self.textureYOffsetAlpha2 < 1.0:
self.textureYOffsetAlpha2 += self.textureYDeltaAlpha2 * dt
trans2 = TransformState.makePos((0, self.textureYOffsetAlpha2, 0))
self.waterStream2.setTexTransform(self.textureStage5, trans2)
else:
done = True
if done:
self.waterStream.stash()
self.waterStream2.stash()
self.fadeSequence = Sequence(LerpColorScaleInterval(self, duration = 2.0, colorScale = (1.0, 1.0, 1.0, 0.0)), Func(self.destroy))
self.fadeSequence.start()
return Task.done
else:
return Task.cont
def setCommandButtons(self):
self.guiItem.addClickButton(MouseButton.one())
self.bind(DGG.B1PRESS, self.commandFunc)
def enterIdle(self):
self.stash()
self.waterStream.stash()
self.waterStream2.stash()
self['state'] = DGG.DISABLED
def exitIdle(self):
pass
def enterPatched(self):
self['state'] = DGG.DISABLED
self.setScale(0.84999999999999998)
def exitPatched(self):
self.stash()
self.waterStream.stash()
self.waterStream2.stash()
def enterActive(self):
taskMgr.add(self.update, 'RepairLeak_%s.update' % self.name)
self.unstash()
self.waterStream.unstash()
self.waterStream2.unstash()
self['state'] = DGG.NORMAL
def exitActive(self):
self['state'] = DGG.DISABLED
|
export DPDK_VERSION=18.02
export GOPATH="$HOME"/go
export GOROOT=/opt/go
export NFF_GO="$GOPATH"/src/github.com/intel-go/nff-go
export PATH="$GOPATH"/bin:"$GOROOT"/bin:"$PATH"
export MAKEFLAGS="-j 4"
export NFF_GO_CARDS="00:06.0 00:07.0"
export DISTRO=$(lsb_release -i | cut -d: -f2 | sed s/'^\t'//)
export CARD1=ens6
export CARD2=ens7
# Bind ports to DPDK driver
bindports ()
{
sudo modprobe uio
sudo insmod "$NFF_GO"/dpdk/dpdk-${DPDK_VERSION}/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
sudo "$NFF_GO"/dpdk/dpdk-${DPDK_VERSION}/usertools/dpdk-devbind.py --bind=igb_uio $NFF_GO_CARDS
}
# Bind ports to Linux kernel driver
unbindports ()
{
sudo "$NFF_GO"/dpdk/dpdk-${DPDK_VERSION}/usertools/dpdk-devbind.py --bind=e1000 $NFF_GO_CARDS
}
# Run pktgen
runpktgen ()
{
(cd "$NFF_GO"/dpdk; sudo ./pktgen -c 0xff -n 4 -- -P -m "[1:2].0, [3:4].1" -T)
rc=$?; if [[ $rc == 0 ]]; then reset; fi
}
# Perform transient NAT client machine configuration. It initializes
# two network interfaces and sets up default routes to the server
# network.
natclient ()
{
sudo ip route add 192.168.16.0/24 via 192.168.14.1 dev $CARD1
sudo ip route add 192.168.26.0/24 via 192.168.24.1 dev $CARD2
}
# Perform one-time configuration needed for NAT client test
# machine. For it apache package is installed for apache benchmark
# program.
setupnatclient ()
{
sudo nmcli c add type ethernet ifname $CARD1 con-name $CARD1 ip4 192.168.14.2/24
sudo nmcli c add type ethernet ifname $CARD2 con-name $CARD2 ip4 192.168.24.2/24
sudo nmcli c up $CARD1
sudo nmcli c up $CARD2
natclient
if [ $DISTRO == Ubuntu ]; then
sudo apt-get install -y apache2
elif [ $DISTRO == Fedora ]; then
sudo dnf -y install httpd
fi
}
# Perform transient configuration for NAT middle machine. It
# initializes two first network interfaces for NFF-GO bindports
# command and initializes second interface pair for use with Linux
# NAT. In this setup enp0s16 is connected to server (public network)
# and enp0s9 is connected to client (private network).
natmiddle ()
{
export NFF_GO_CARDS="00:08.0 00:0a.0"
export CARD1=ens7
export CARD2=ens9
bindports
sudo sysctl -w net.ipv4.ip_forward=1
sudo iptables -t nat -A POSTROUTING -o $CARD2 -j MASQUERADE
sudo iptables -A FORWARD -i $CARD2 -o $CARD1 -m state --state RELATED,ESTABLISHED -j ACCEPT
sudo iptables -A FORWARD -i $CARD1 -o $CARD2 -j ACCEPT
}
# Perform one-time configuration needed for NAT middle machine. On
# Fedora we use firewall daemon to permanently record IP forwarding
# rules.
setupnatmiddle ()
{
natmiddle
sudo nmcli c add type ethernet ifname $CARD1 con-name $CARD1 ip4 192.168.24.1/24
sudo nmcli c add type ethernet ifname $CARD2 con-name $CARD2 ip4 192.168.26.1/24
sudo nmcli c up $CARD1
sudo nmcli c up $CARD2
}
# Perform one-time configuration needed for NAT server side
# machine. It installs Apache web server.
setupnatserver ()
{
sudo nmcli c add type ethernet ifname $CARD1 con-name $CARD1 ip4 192.168.16.2/24
sudo nmcli c add type ethernet ifname $CARD2 con-name $CARD2 ip4 192.168.26.2/24
sudo nmcli c up $CARD1
sudo nmcli c up $CARD2
if [ $DISTRO == Ubuntu ]; then
sudo apt-get install -y apache2
sudo systemctl enable apache2
sudo systemctl start apache2
elif [ $DISTRO == Fedora ]; then
sudo dnf -y install httpd
sudo systemctl enable httpd
sudo systemctl start httpd
fi
sudo dd if=/dev/zero of=/var/www/html/10k.bin bs=1 count=10240
sudo dd if=/dev/zero of=/var/www/html/100k.bin bs=1 count=102400
sudo dd if=/dev/zero of=/var/www/html/1m.bin bs=1 count=1048576
}
# Set up docker daemon, this is needed for automated testing.
setupdocker ()
{
if [ $DISTRO == Ubuntu ]; then
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install -y docker-ce
sudo gpasswd -a ubuntu docker
sudo sed -i -e 's,ExecStart=/usr/bin/dockerd -H fd://,ExecStart=/usr/bin/dockerd,' /lib/systemd/system/docker.service
elif [ $DISTRO == Fedora ]; then
sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
sudo dnf -y install docker-ce
sudo gpasswd -a vagrant docker
fi
sudo mkdir /etc/docker
sudo sh -c 'cat > /etc/docker/daemon.json <<EOF
{
"hosts": ["unix:///var/run/docker.sock", "tcp://0.0.0.0:2375"]
}
EOF'
sudo systemctl enable docker.service
sudo systemctl daemon-reload
sudo systemctl restart docker.service
}
|
# -*- coding: utf-8 -*-
import pycurl
import re
from module.common.json_layer import json_loads
from module.network.HTTPRequest import BadHeader
from module.plugins.internal.AdsCaptcha import AdsCaptcha
from module.plugins.internal.ReCaptcha import ReCaptcha
from module.plugins.internal.SolveMedia import SolveMedia
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class RapidgatorNet(SimpleHoster):
__name__ = "RapidgatorNet"
__type__ = "hoster"
__version__ = "0.34"
__pattern__ = r'http://(?:www\.)?(rapidgator\.net|rg\.to)/file/\w+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Rapidgator.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("chrox", None),
("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com")]
API_URL = "http://rapidgator.net/api/file"
COOKIES = [("rapidgator.net", "lang", "en")]
NAME_PATTERN = r'<title>Download file (?P<N>.*)</title>'
SIZE_PATTERN = r'File size:\s*<strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong>'
OFFLINE_PATTERN = r'>(File not found|Error 404)'
JSVARS_PATTERN = r'\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*\'?(.*?)\'?;'
PREMIUM_ONLY_PATTERN = r'You can download files up to|This file can be downloaded by premium only<'
ERROR_PATTERN = r'You have reached your (?:daily|hourly) downloads limit'
WAIT_PATTERN = r'(Delay between downloads must be not less than|Try again in).+'
LINK_FREE_PATTERN = r'return \'(http://\w+.rapidgator.net/.*)\';'
RECAPTCHA_PATTERN = r'"http://api\.recaptcha\.net/challenge\?k=(.*?)"'
ADSCAPTCHA_PATTERN = r'(http://api\.adscaptcha\.com/Get\.aspx[^"\']+)'
SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.script\?k=(.*?)"'
def setup(self):
if self.account:
self.sid = self.account.getAccountInfo(self.user).get('sid', None)
else:
self.sid = None
if self.sid:
self.premium = True
self.resumeDownload = self.multiDL = self.premium
self.chunkLimit = 1
def api_response(self, cmd):
try:
json = self.load('%s/%s' % (self.API_URL, cmd),
get={'sid': self.sid,
'url': self.pyfile.url}, decode=True)
self.logDebug("API:%s" % cmd, json, "SID: %s" % self.sid)
json = json_loads(json)
status = json['response_status']
msg = json['response_details']
except BadHeader, e:
self.logError("API: %s" % cmd, e, "SID: %s" % self.sid)
status = e.code
msg = e
if status == 200:
return json['response']
elif status == 423:
self.account.empty(self.user)
self.retry()
else:
self.account.relogin(self.user)
self.retry(wait_time=60)
def handlePremium(self, pyfile):
self.api_data = self.api_response('info')
self.api_data['md5'] = self.api_data['hash']
pyfile.name = self.api_data['filename']
pyfile.size = self.api_data['size']
self.link = self.api_response('download')['url']
def handleFree(self, pyfile):
jsvars = dict(re.findall(self.JSVARS_PATTERN, self.html))
self.logDebug(jsvars)
self.req.http.lastURL = pyfile.url
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
url = "http://rapidgator.net%s?fid=%s" % (
jsvars.get('startTimerUrl', '/download/AjaxStartTimer'), jsvars['fid'])
jsvars.update(self.getJsonResponse(url))
self.wait(jsvars.get('secs', 45), False)
url = "http://rapidgator.net%s?sid=%s" % (
jsvars.get('getDownloadUrl', '/download/AjaxGetDownload'), jsvars['sid'])
jsvars.update(self.getJsonResponse(url))
self.req.http.lastURL = pyfile.url
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With:"])
url = "http://rapidgator.net%s" % jsvars.get('captchaUrl', '/download/captcha')
self.html = self.load(url)
for _i in xrange(5):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m:
self.link = m.group(1)
break
else:
captcha = self.handleCaptcha()
if not captcha:
self.error(_("Captcha pattern not found"))
response, challenge = captcha.challenge()
self.html = self.load(url, post={'DownloadCaptchaForm[captcha]': "",
'adcopy_challenge' : challenge,
'adcopy_response' : response})
if "The verification code is incorrect" in self.html:
self.invalidCaptcha()
else:
self.correctCaptcha()
else:
self.error(_("Download link"))
def handleCaptcha(self):
for klass in (AdsCaptcha, ReCaptcha, SolveMedia):
inst = klass(self)
if inst.detect_key():
return inst
def getJsonResponse(self, url):
res = self.load(url, decode=True)
if not res.startswith('{'):
self.retry()
self.logDebug(url, res)
return json_loads(res)
getInfo = create_getInfo(RapidgatorNet) |
def filterVowels(sentence):
vowels = ["a", "e", "i", "o", "u"]
filteredSentence = ""
for char in sentence:
if char.lower() not in vowels:
filteredSentence += char
return filteredSentence
print(filterVowels("I want to learn programming."))
# Output: "wnt t lrn prgrmmng." |
#!/bin/bash
if [[ "$1" == "-f" ]] ;then
option="-f"
elif [[ "$1" == "-n" ]] ;then
option="-n"
else
option="-i"
fi
# cp $option -vu ~/.cheat/*.md cheat
cp $option -vu ~/.gdbinit gdb/gdbinit
cp $option -vu ~/.cgdb/cgdbrc gdb
cp -vru ~/.local/share/fcitx5/rime/* rime-dict
cp -vru ~/.config/fcitx5/* fcitx5
cp $option -vu ~/.tmux.conf tmux/tmux.conf
cp $option -vu ~/.config/xfce4/terminal/terminalrc xfce4-terminal
cp $option -vu ~/.zshrc zsh/zshrc
cp -vu ~/.config/dconf/user gnome
cp $option -vu ~/.local/bin/{say,see,terminal-tmux.sh} bin
|
/*
* Copyright 2016-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.ui.impl;
/**
* Base class for unit tests.
*/
public class AbstractUiImplTest {
/**
* System agnostic end-of-line character.
*/
protected static final String EOL = String.format("%n");
/**
* Prints the given string to stdout.
*
* @param s string to print
*/
protected void print(String s) {
System.out.println(s);
}
/**
* Prints the toString() of the given object to stdout.
*
* @param o object to print
*/
protected void print(Object o) {
if (o == null) {
print("<null>");
} else {
print(o.toString());
}
}
/**
* Prints the formatted string to stdout.
*
* @param fmt format string
* @param params parameters
* @see String#format(String, Object...)
*/
protected void print(String fmt, Object... params) {
print(String.format(fmt, params));
}
/**
* Prints a title, to delimit individual unit test output.
*
* @param s a title for the test
*/
protected void title(String s) {
print(EOL + "=== %s ===", s);
}
}
|
<gh_stars>1-10
#include <iostream>
#include <cmath>
using namespace std;
int main() {
int N, CB, CP;
int sumB = 0, sumP = 0;
cin >> N >> CB >> CP;
for (int i = 0; i < N; ++i) {
int B, P;
cin >> B >> P;
sumB += B;
sumP += P;
}
int res = 0;
res += (sumB/10+(sumB%10 == 0 ? 0 : 1))*CB;
res += (sumP/10+(sumP%10 == 0 ? 0 : 1))*CP;
cout << res << endl;
return 0;
}
|
#!/usr/bin/env bash
# Setup script environment
set -o errexit #Exit immediately if a pipeline returns a non-zero status
set -o errtrace #Trap ERR from shell functions, command substitutions, and commands from subshell
set -o nounset #Treat unset variables as an error
set -o pipefail #Pipe will exit with last non-zero status if applicable
shopt -s expand_aliases
alias die='EXIT=$? LINE=$LINENO error_exit'
trap die ERR
trap cleanup EXIT
function error_exit() {
trap - ERR
local DEFAULT='Unknown failure occured.'
local REASON="\e[97m${1:-$DEFAULT}\e[39m"
local FLAG="\e[91m[ERROR] \e[93m$EXIT@$LINE"
msg "$FLAG $REASON"
[ ! -z ${VMID-} ] && cleanup_vmid
exit $EXIT
}
function warn() {
local REASON="\e[97m$1\e[39m"
local FLAG="\e[93m[WARNING]\e[39m"
msg "$FLAG $REASON"
}
function info() {
local REASON="$1"
local FLAG="\e[36m[INFO]\e[39m"
msg "$FLAG $REASON"
}
function msg() {
local TEXT="$1"
echo -e "$TEXT"
}
function cleanup_vmid() {
if $(qm status $VMID &>/dev/null); then
if [ "$(qm status $VMID | awk '{print $2}')" == "running" ]; then
qm stop $VMID
fi
qm destroy $VMID
fi
}
function cleanup() {
popd >/dev/null
rm -rf $TEMP_DIR
}
TEMP_DIR=$(mktemp -d)
pushd $TEMP_DIR >/dev/null
# Select storage location
while read -r line; do
TAG=$(echo $line | awk '{print $1}')
TYPE=$(echo $line | awk '{printf "%-10s", $2}')
FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}')
ITEM=" Type: $TYPE Free: $FREE "
OFFSET=2
if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then
MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET))
fi
STORAGE_MENU+=( "$TAG" "$ITEM" "OFF" )
done < <(pvesm status -content images | awk 'NR>1')
if [ $((${#STORAGE_MENU[@]}/3)) -eq 0 ]; then
warn "'Disk image' needs to be selected for at least one storage location."
die "Unable to detect valid storage location."
elif [ $((${#STORAGE_MENU[@]}/3)) -eq 1 ]; then
STORAGE=${STORAGE_MENU[0]}
else
while [ -z "${STORAGE:+x}" ]; do
STORAGE=$(whiptail --title "Storage Pools" --radiolist \
"Which storage pool you would like to use for the container?\n\n" \
16 $(($MSG_MAX_LENGTH + 23)) 6 \
"${STORAGE_MENU[@]}" 3>&1 1>&2 2>&3) || exit
done
fi
info "Using '$STORAGE' for storage location."
# Get the next guest VM/LXC ID
VMID=300
info "Container ID is $VMID."
# Get latest Home Assistant disk image archive URL
msg "Getting URL for latest Home Assistant disk image..."
RELEASE_TYPE=vmdk
URL=$(cat<<EOF | python3
import requests
url = "https://api.github.com/repos/home-assistant/operating-system/releases"
r = requests.get(url).json()
if "message" in r:
exit()
for release in r:
if release["prerelease"]:
continue
for asset in release["assets"]:
if asset["name"].find("$RELEASE_TYPE") != -1:
image_url = asset["browser_download_url"]
print(image_url)
exit()
EOF
)
if [ -z "$URL" ]; then
die "Github has returned an error. A rate limit may have been applied to your connection."
fi
# Download Home Assistant disk image archive
msg "Downloading disk image..."
wget -q --show-progress $URL
echo -en "\e[1A\e[0K" #Overwrite output from wget
FILE=$(basename $URL)
# Extract Home Assistant disk image
msg "Extracting disk image..."
case $FILE in
*"gz") gunzip -f $FILE;;
*"zip") unzip -o $FILE;;
*"xz") xz -d $FILE;;
*) die "Unable to handle file extension '${FILE##*.}'.";;
esac
# Create variables for container disk
STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}')
if [ "$STORAGE_TYPE" = "dir" ]; then
DISK_EXT=".qcow2"
DISK_REF="$VMID/"
IMPORT_OPT="-format qcow2"
fi
for i in {0,1}; do
disk="DISK$i"
eval DISK${i}=vm-${VMID}-disk-${i}${DISK_EXT:-}
eval DISK${i}_REF=${STORAGE}:${DISK_REF:-}${!disk}
done
# Create VM
msg "Creating VM..."
VM_NAME="home-assistant"
qm create $VMID -agent 1 -bios ovmf -name $VM_NAME -net0 virtio,bridge=vmbr0 \
-onboot 1 -ostype l26 -scsihw virtio-scsi-pci
pvesm alloc $STORAGE $VMID $DISK0 128 1>&/dev/null
qm importdisk $VMID ${FILE%.*} $STORAGE ${IMPORT_OPT:-} 1>&/dev/null
qm set $VMID \
-efidisk0 ${DISK0_REF},size=128K \
-sata0 ${DISK1_REF},size=6G > /dev/null
qm set $VMID \
-boot order=sata0 > /dev/null
# Add serial port and enable console output
set +o errtrace
(
msg "Adding serial port and configuring console..."
trap '
warn "Unable to configure serial port. VM is still functional."
if [ "$(qm config $VMID | sed -n ''/serial0/p'')" != "" ]; then
qm set $VMID --delete serial0 >/dev/null
fi
exit
' ERR
if [ "$(command -v kpartx)" = "" ]; then
msg "Installing 'kpartx'..."
apt-get update >/dev/null
apt-get -qqy install kpartx &>/dev/null
fi
DISK1_PATH="$(pvesm path $DISK1_REF)"
DISK1_PART1="$(kpartx -al $DISK1_PATH | awk 'NR==1 {print $1}')"
DISK1_PART1_PATH="/dev/mapper/$DISK1_PART1"
TEMP_MOUNT="${TEMP_DIR}/mnt"
trap '
findmnt $TEMP_MOUNT >/dev/null && umount $TEMP_MOUNT
command -v kpartx >/dev/null && kpartx -d $DISK1_PATH
' EXIT
kpartx -a $DISK1_PATH
mkdir $TEMP_MOUNT
mount $DISK1_PART1_PATH $TEMP_MOUNT
sed -i 's/$/ console=ttyS0/' ${TEMP_MOUNT}/cmdline.txt
qm set $VMID -serial0 socket >/dev/null
)
info "Completed Successfully! New VM ID is \e[1m$VMID\e[0m."
|
<filename>fractal.rb<gh_stars>0
require 'chunky_png'
class Fractal
def initialize(width, height)
@width = width
@height = height
@file = ChunkyPNG::Image.new(@width, @height, ChunkyPNG::Color::WHITE)
end
def draw(iterations)
pos_x = 0
pos_y = 0
print "Calculating...\n"
while pos_y < @height
while pos_x < @width
part_re = pos_x / @width.to_f * 3 - 2.0 # -2 ... 1
part_im = pos_y / @height.to_f * 2 - 1 # -1 ... 1
alpha = calculate_color(part_re, part_im, iterations)
@file[pos_x, pos_y] = ChunkyPNG::Color.rgba(0, 0, 0, alpha)
pos_x += 1
end
pos_y += 1
pos_x = 0
end
end
def save(filename)
@file.save(filename, interlace: true)
print 'Created ' + filename + "\n"
end
private
def calculate_color(part_re, part_im, iterations)
i = 0
x = 0
y = 0
alpha = 255
while i < iterations
xn = x * x - y * y + part_re
yn = 2 * x * y + part_im
x = xn
y = yn
return alpha if x * x + y * y > 4
i += 1
if alpha > 5
alpha -= 10
elsif alpha == 5
alpha -= 5
end
end
255
end
end
fractal = Fractal.new(800, 600)
fractal.draw(600)
fractal.save('fractal.png')
|
<filename>__mocks__/styleMock.js
module.exports = {
'.tabs, .tabs--outer-wrapper': {
display: 'flex',
},
'.tabs--outer-wrapper': {
margin: '0 -15px',
'overflow-x': 'auto',
},
'@media (min-width: 540px)': {
'.tabs--outer-wrapper': {
margin: '0 -30px',
},
},
'@media (min-width: 720px)': {
'.tabs--outer-wrapper': {
margin: 0,
},
},
'.tabs': {
padding: 0,
'list-style-type': 'none',
'box-shadow': 'inset 0 -1px 0 #e3e3e3',
position: 'relative',
flex: '1 1 0%',
},
'.tabs > :first-child': {
'margin-left': 0,
},
'@media (min-width: 960px)': {
'.tabs > *': {
'margin-left': '15px',
'margin-right': '15px',
},
},
'.tabs > *': {
'-webkit-box-flex': 0,
'-ms-flex': '0 0 auto',
flex: '0 0 auto',
margin: '0 10px',
},
'.tabs--current-content': {
'margin-top': '32px',
},
'.tab': {
display: 'block',
padding: 0,
border: 0,
'border-bottom': '2px solid transparent',
'border-radius': 0,
background: 'transparent',
color: '#4d4d4d',
'font-size': '14px',
'line-height': '40px',
'letter-spacing': '.01em',
'text-align': 'center',
'text-decoration': 'none',
transition: 'background-color .1s ease-out',
'user-select': 'none',
'white-space': 'nowrap',
cursor: 'pointer',
},
'.tab:hover': {
color: '#0095ff',
},
'.is-active': {
position: 'relative',
'border-color': '#0095ff',
color: '#0095ff',
'font-weight': 700,
'letter-spacing': 0,
},
};
|
#!/usr/bin/env bash
BOOST_VERSION1="1.57.0"
BOOST_VERSION2="1_57_0"
BOOST_LIBRARY="program_options"
BOOST_TOOLSET="clang"
BOOST_ARCH="x86"
MASON_NAME=boost_lib${BOOST_LIBRARY}
MASON_VERSION=1.57.0
MASON_LIB_FILE=lib/libboost_${BOOST_LIBRARY}.a
. ${MASON_DIR}/mason.sh
function mason_load_source {
mason_download \
https://downloads.sourceforge.net/project/boost/boost/${BOOST_VERSION1}/boost_${BOOST_VERSION2}.tar.bz2 \
397306fa6d0858c4885fbba7d43a0164dcb7f53e
export MASON_BUILD_PATH=${MASON_ROOT}/.build/boost_${BOOST_VERSION2}
mason_extract_tar_bz2
}
function gen_config() {
echo "using $1 : : $(which $2)" > user-config.jam
if [[ "${AR:-false}" != false ]] || [[ "${RANLIB:-false}" != false ]]; then
echo ' : ' >> user-config.jam
if [[ "${AR:-false}" != false ]]; then
echo "<archiver>${AR} " >> user-config.jam
fi
if [[ "${RANLIB:-false}" != false ]]; then
echo "<ranlib>${RANLIB} " >> user-config.jam
fi
fi
echo ' ;' >> user-config.jam
}
function mason_compile {
gen_config ${BOOST_TOOLSET} clang++
if [[ ! -f ./b2 ]] ; then
./bootstrap.sh
fi
./b2 \
--with-${BOOST_LIBRARY} \
--prefix=${MASON_PREFIX} \
-j${MASON_CONCURRENCY} \
-d0 \
--ignore-site-config --user-config=user-config.jam \
architecture="${BOOST_ARCH}" \
toolset="${BOOST_TOOLSET}" \
link=static \
variant=release \
linkflags="${LDFLAGS:-" "}" \
cxxflags="${CXXFLAGS:-" "}" \
stage
mkdir -p $(dirname ${MASON_PREFIX}/${MASON_LIB_FILE})
mv stage/${MASON_LIB_FILE} ${MASON_PREFIX}/${MASON_LIB_FILE}
}
function mason_ldflags {
echo "-lboost_${BOOST_LIBRARY}"
}
function mason_clean {
make clean
}
mason_run "$@"
|
<filename>contracts/industry/parkingmeter/mbedParkingMeter/definitions.go
package main
type Device struct {
DeviceID *string `json:"deviceid,omitempty"`
MinimumUsageCost *float64 `json:"minimumusagecost,omitempty"`
MinimumUsageTime *int32 `json:"minimumusagetime,omitempty"`
OvertimeUsageCost *float64 `json:"overtimeusagecost,omitempty"`
OvertimeUsageTime *int32 `json:"overtimeusagetime,omitempty"`
Available *bool `json:"available,omitempty"`
}
type Usage struct {
DeviceID string `json:"deviceid,omitempty"`
StartTime string `json:"starttime,omitempty"`
EndTime string `json:"endtime,omitempty"` // current asset location
Duration int64 `json:"duration,omitempty"` // the name of the carrier
UsageCost float64 `json:"usagecost,omitempty"`
ActualEndtime string `json:"actualendtime,omitempty"` // celcius
OvertimeCost float64 `json:"overtimecost,omitempty"` // percent
TotalCost float64 `json:"totalcost,omitempty"` // percent
}
const CONTSTATEKEY string = "STATE"
const DEVICESKEY string = "DEVICES"
const USAGEKEY string = "USAGE"
const USAGEHIST string = "USAGEHIST"
const ALERTKEY string = "ALERT"
const LISTKEY string = "DEVLIST"
const MAXHIST int = 10
const BufferTime int = 2
type AlertLevels string
const (
Available AlertLevels = "available"
Confirm ="confirm"
HalfTime = "half-time"
Warning ="warning"
Overtime = "overtime"
)
const MYVERSION string = "1.0.0"
type ContractState struct {
Version string `json:"version"`
}
//Usage History
type UsageHistory struct {
History []string `json:"history"`
}
//Device List
type DevList struct {
Devices []string `json:"devices"`
}
var contractState = ContractState{MYVERSION}
|
<reponame>LoopSun/linux-client<filename>settings/server.py
#encoding=utf8
import os
# define the server address the agent should to notice
POST_SERVER = os.environ.get("POST_SERVER")
# post interval, to avoid DDOS server
POST_INTERVAL = os.environ.get("POST_INTERVAL", 3)
|
#!/bin/bash
docker-compose build |
package test
import (
"testing"
)
// Test the example in the example-with-consul-connect folder
func TestConsulConnectWithUbuntu16Ami(t *testing.T) {
t.Parallel()
runConsulConnectTest(t, "ubuntu16-ami", "examples/example-with-consul-connect", "../examples/consul-ami/consul.json", "ubuntu")
}
// Test the example in the example-with-consul-connect folder
func TestConsulConnectWithUbuntu18Ami(t *testing.T) {
t.Parallel()
runConsulConnectTest(t, "ubuntu18-ami", "examples/example-with-consul-connect", "../examples/consul-ami/consul.json", "ubuntu")
}
// Test the example in the example-with-consul-connect folder
func TestConsulConnectWithAmazonLinuxAmi(t *testing.T) {
t.Parallel()
runConsulConnectTest(t, "amazon-linux-2-ami", "examples/example-with-consul-connect", "../examples/consul-ami/consul.json", "ec2-user")
}
|
#include <iostream>
#include <unordered_map>
#include <vector>
using namespace std;
pair<int, int> twoSum(vector<int>& arr, int target) {
unordered_map<int, int> map;
for (int i = 0; i < arr.size(); i++) {
int diff = target - arr[i];
if (map.find(diff) != map.end())
return make_pair(map[diff], i);
map[arr[i]] = i;
}
return make_pair(-1, -1);
}
int main()
{
vector<int> arr = {1, 4, 8, 21, 45};
int target = 9;
pair<int, int> result = twoSum(arr, target);
cout << result.first << " " << result.second << endl; // 0 2
return 0;
} |
#!/bin/bash
set -eou pipefail
echo "++++++++++++++++++++++++++++++"
echo "Killing Secretless Broker"
echo "Quickstart Container"
echo "++++++++++++++++++++++++++++++"
echo ""
set -x
docker rm -f secretless-quickstart
sed -i '' '/localhost/d' ~/.ssh/known_hosts
|
/////////////////////////////////////////////////////////////////////////////
//
// http.c
//
// MiniWeb HTTP authentication implementation
//
/////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "httppil.h"
#include "httpapi.h"
#include "httpint.h"
#ifdef HTTPAUTH
extern HttpParam g_httpParam;
////////////////////////////////////////////////////////////////////////////
// _mwCheckAuthentication
// Check if a connected peer is authenticated
////////////////////////////////////////////////////////////////////////////
BOOL _mwCheckAuthentication(HttpSocket* phsSocket)
{
if (!ISFLAGSET(phsSocket,FLAG_AUTHENTICATION))
return TRUE;
if (g_httpParam.dwAuthenticatedNode!=phsSocket->ipAddr.laddr) {
// Not authenticated
g_httpParam.stats.authFailCount++;
return FALSE;
}
// Extend authentication period
g_httpParam.tmAuthExpireTime = time(NULL) + HTTPAUTHTIMEOUT;
return TRUE;
}
#endif
|
from typing import List, Union
def modify_patient_age(dni: str, new_age: int) -> Union[List[str], int]:
# Assume lista_paciente is a predefined list containing patient information
for patient_info in lista_paciente:
if patient_info[0] == dni:
if new_age > 17:
patient_info[2] = str(new_age)
return patient_info
else:
return -1
return -1 |
#!/bin/bash
#SBATCH --account=def-lombaert
#SBATCH --gres=gpu:v100l:1 # Number of GPUs (per node)
#SBATCH --cpus-per-task=8 # CPU cores/threads
#SBATCH --mem=92G # memory (per node)
#SBATCH --time=05-00:00 # time (DD-HH:MM)
#SBATCH --mail-user=pierre-luc.delisle@live.com
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
#SBATCH --mail-type=REQUEUE
#SBATCH --mail-type=ALL
#SBATCH --output=%x-%j.out
#SBATCH --output=DCGAN_school_disc_ratio_0_0001.out
#SBATCH --job-name=DCGAN_school_disc_ratio_0_0001
nvidia-smi
source /home/pld2602/venv/bin/activate
CUDA_VISIBLE_DEVICES=0 python /project/def-lombaert/pld2602/code/deepNormalizev5/main_cc.py --config=/project/def-lombaert/pld2602/code/deepNormalizev5/deepNormalize/experiments/experiments_school/DCGAN/disc_ratio/config_disc_ratio_0.0001.yaml |
<reponame>victorbuckservices/workshop-spring-4.0-to-4.2<gh_stars>1-10
package demo._42.transactional;
import org.junit.Assert;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.annotation.Commit;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.transaction.TestTransaction;
import org.springframework.transaction.annotation.Transactional;
import demo.AbstractTest;
import demo.domain.User;
import demo.repository.UserRepository;
import static demo.domain.User.newUser;
/**
* Created by nlabrot on 02/09/15.
*/
//Deprecated @TransactionConfiguration(defaultRollback=true)
@Transactional
public class TransactionalTest extends AbstractTest {
@Autowired
private UserRepository userRepository;
@Test
@Rollback
public void testRollback() throws Exception {
User user = newUser().address("address").firstName("firstname").lastName("lastname").build();
user = userRepository.save(user);
TestTransaction.end();
Assert.assertEquals(0 , userRepository.findAll().size());
}
@Test
@Commit
public void testCommit() throws Exception {
User user = newUser().address("address").firstName("firstname").lastName("lastname").build();
user = userRepository.save(user);
TestTransaction.end();
Assert.assertEquals(1 , userRepository.findAll().size());
}
}
|
import { debounceTime, interval } from '@tanbo/stream';
describe('debounceTime', () => {
test('确保在忽略在等待时间内数据', done => {
const arr: any[] = []
const sub = interval(5).pipe(debounceTime(10)).subscribe(value => {
arr.push(value)
})
setTimeout(() => {
sub.unsubscribe()
expect(arr).toEqual([]);
done()
}, 100)
})
test('确保达到等等时间发送数据', done => {
const arr: any[] = []
const sub = interval(10).pipe(debounceTime(7)).subscribe(value => {
arr.push(value)
})
setTimeout(() => {
sub.unsubscribe()
expect(arr).toEqual([0, 1, 2, 3]);
done()
}, 55)
})
})
|
package com.haufe.cookbook;
public interface Food {
//everything is public by default
String WhatAmI();
boolean EatableRaw();
}
|
#!/bin/bash
#
# installer for cirrus
# Sets up working dir eg $HOME/.cirrus
# Clones latest stable tag of cirrus into it
# runs setup commands to build venv for cirrus
# installs git alias commands
# gets token for github access & updates .gitconfig
INSTALL_DIR="${HOME}/.cirrus"
DEFAULT_USER="${USER}"
# prerequisites are pip and virtualenv
pip --version 2>/dev/null
if [ $? -eq 127 ]; then
echo "pip binary not found, cannot proceed"
exit 127
fi
virtualenv --version 2>/dev/null
if [ $? -eq 127 ]; then
echo "virtualenv binary not found, cannot proceed"
exit 127
fi
read -p "Installation directory [${INSTALL_DIR}]: " LOCATION
LOCATION=${LOCATION:-$INSTALL_DIR}
echo "Installing cirrus in ${LOCATION}..."
mkdir -p $LOCATION
echo "Installing cirrus to LOCATION=${LOCATION}" > ${LOCATION}/install.log
cd $LOCATION
CUSTOM_PYPI_SERVER=${CIRRUS_PYPI_URL:-""}
CIRRUS_VERSION=${CIRRUS_VERSION_OVERRIDE:-""}
CIRRUS_PIP_REQ="cirrus-cli"
if [ "x$CIRRUS_PIP_REQ" != "x" ];then
CIRRUS_PIP_REQ+="$CIRRUS_VERSION"
fi
# bootstrap virtualenv
virtualenv venv
. venv/bin/activate
if [ "x$CUSTOM_PYPI_SERVER" == "x" ];then
pip install ${CIRRUS_PIP_REQ} 1>> ${LOCATION}/install.log
else
pip install --index-url=${CUSTOM_PYPI_SERVER} ${CIRRUS_PIP_REQ} 1>> ${LOCATION}/install.log
fi
export CIRRUS_HOME=$LOCATION
export VIRTUALENV_HOME=$LOCATION/venv
selfsetup
|
import json
data = '{"apple":["red","green"],"pear":["green"],"grapes":["green","red"],"banana":["yellow"]}'
json_data = json.loads(data)
for item in json_data.items():
if len(item[1]) % 2 == 0:
print(item[0]) |
#!/bin/bash
VERSION=1.9
NAME=cri-tools
BRANCH=release-1.9
REPO="https://github.com/kubernetes-incubator/cri-tools.git"
# Import variables and functions from config.sh
ScriptPath=`dirname $0`
source ${ScriptPath}/config.sh
# Uncomment variables below to overwrite global settings
#CC=
#CXX=
SRC_DIR=${GO_PATH}/src/github.com/kubernetes-incubator/cri-tools
#BIN_DIR=
LIB_DIR=${LIB_DIR}/cri-o
clean()
{
cd ${SRC_DIR}
make clean
}
init()
{
_init_ ${REPO} ${BRANCH} ${SRC_DIR}
}
config()
{
return
}
update()
{
return
}
build()
{
cd ${SRC_DIR}
make
if [ $? -ne 0 ]; then
echo "ERROR: Failed to build cri-tools."
exit 1
fi
}
install()
{
cd ${SRC_DIR}
make BINDIR=${LIB_DIR} install
}
uninstall()
{
cd ${SRC_DIR}
make BINDIR=${LIB_DIR} uninstall
}
# RUN (function defined in config.sh)
run $@
|
<reponame>moemaair/MyRestaurants-App
package com.moringaschool.myrestaurants;
public class MainActivityInstrumentationTest {
}
|
package controllers
import javax.inject._
import play.api.mvc.Action
import play.api.mvc.Results.Ok
import play.api.mvc.Controller
import play.api.libs.oauth.OAuthCalculator
import play.api.libs.oauth.{ ConsumerKey, RequestToken }
import play.api.Play.current
import scala.concurrent.Future
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.ws._
import play.api.libs.iteratee._
import play.api.Logger
import play.api.libs.json._
import play.extras.iteratees._
import play.api.mvc.WebSocket
import play.api.libs.streams.ActorFlow
import akka.actor.ActorSystem
import akka.stream.Materializer
import play.api.mvc.WebSocket.MessageFlowTransformer
import actor.TwitterStreamer
/**
* This controller creates an `Action` to handle HTTP requests to the
* application's home page.
*/
@Singleton
class HomeController @Inject()(implicit system: ActorSystem, materializer: Materializer,configuration: play.api.Configuration) extends Controller {
implicit val messageFlowTransformer = MessageFlowTransformer.jsonMessageFlowTransformer[String, JsValue]
/**
* Create an Action to render an HTML page with a welcome message.
* The configuration in the `routes` file means that this method
* will be called when the application receives a `GET` request with
* a path of `/`.
*/
def index = Action { implicit request =>
Ok(views.html.index("Tweets"))
}
/*def tweets = play.api.mvc.Action.async {
credentials.map {
case (consumerKey, requestToken) =>
WS
.url("https://stream.twitter.com/1.1/statuses/filter.json")
.sign(OAuthCalculator(consumerKey, requestToken))
.withQueryString("track" -> "reactive")
.get { response =>
Logger.info("Status: " + response.status)
loggingIteratee
}.map { _ =>
Ok("Stream closed")
}
}getOrElse {
Future.successful(InternalServerError("No credentials are set"))
}
}*/
def tweets = WebSocket.accept[String, JsValue] { implicit request =>
println("here")
ActorFlow.actorRef(out => TwitterStreamer.props(out))
}
/*
def tweets2 = WebSocket.acceptWithActor[String, JsValue] {
request => out => TwitterStreamer.props(out)
}*/
def replicateFeed = Action { implicit request => Ok
// Ok.feed(TwitterStreamer.subscribeNode)
}
def tweets2 = Action.async {
credentials.map {
case (consumerKey, requestToken) =>
val (iteratee, enumerator) = Concurrent.joined[Array[Byte]]
val jsonStream: Enumerator[JsObject] =
enumerator &>
Encoding.decode() &>
Enumeratee.grouped(JsonIteratees.jsSimpleObject)
val loggingIteratee = Iteratee.foreach[JsObject] { value =>
Logger.info(value.toString)
}
jsonStream run loggingIteratee
WS
.url("https://stream.twitter.com/1.1/statuses/filter.json")
.sign(OAuthCalculator(consumerKey, requestToken))
.withQueryString("track" -> "reactive")
.get { response =>
Logger.info("Status: " + response.status)
iteratee
}.map { _ =>
Ok("Stream closed")
}
}getOrElse {
Future.successful(InternalServerError("No credentials are set"))
}
}
def loggingIteratee = Iteratee.foreach[Array[Byte]] { array =>Logger.info(array.map(_.toChar).mkString)}
def credentials: Option[(ConsumerKey, RequestToken)] = for {
apiKey <- configuration.getString("twitter.apiKey")
apiSecret <- configuration.getString("twitter.apiSecret")
token <- configuration.getString("twitter.token")
tokenSecret <- configuration.getString("twitter.tokenSecret")
} yield (
ConsumerKey(apiKey, apiSecret),
RequestToken(token, tokenSecret))
} |
<reponame>sedv8808/APIs_UBC_ML
# Put your password instead of write<PASSWORD>'
translink = {'key':'your_password'}
splash = {'key':'your_password'}
github_api = {'secret': '<PASSWORD>_password'} |
#!/bin/bash
echo "Starting sage-train.sh"
set -e
COACH_EXP_NAME=sagemaker_rl
cd /opt/amazon/
export PYTHONUNBUFFERED=1
# Start the redis server and Coach training worker
redis-server /etc/redis/redis.conf & (sleep 5 && \
python markov/training_worker.py $@ 2>&1)
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import chrome from 'ui/chrome';
import { management } from 'ui/management';
import { i18n } from '@kbn/i18n';
import { BASE_PATH } from '../common/constants';
const esSection = management.getSection('elasticsearch');
if (chrome.getInjected('ilmUiEnabled')) {
esSection.register('index_lifecycle_policies', {
visible: true,
display: i18n.translate('xpack.indexLifecycleMgmt.appTitle', {
defaultMessage: 'Index Lifecycle Policies',
}),
order: 2,
url: `#${BASE_PATH}policies`,
});
}
|
use futures::{future, Future, FutureExt, TryFutureExt};
use hyper::{Body, Request, Response};
type BoxBody = hyper::body::BoxBody;
type IntoServiceFuture = future::Map<BoxFuture<'static, Response<BoxBody>>, fn(Response<BoxBody>) -> Result<Response<BoxBody>, std::convert::Infallible>>;
fn process_request(request: Request<BoxBody>) -> IntoServiceFuture {
// Perform asynchronous processing of the request
let response_future = async {
// Simulate asynchronous processing with a delay
tokio::time::delay_for(std::time::Duration::from_secs(1)).await;
// Create a response based on the request
let response = Response::new(Body::from("Processed response"));
Ok(response)
};
// Map the future to handle the response
response_future.map(|res| Ok(res)).boxed()
} |
#include "Texture.h"
#include "Core/Resources/Resources.h"
#include <stb_image.h>
#include <stb_image_write.h>
// ------------------------------------------------------------------------------
Texture::Texture(uint width, uint height) : m_Width(width), m_Height(height)
{
// -- Set Parameters --
m_InternalFormat = GL_RGBA8;
m_DataFormat = GL_RGBA;
// -- Create Texture ---
glCreateTextures(GL_TEXTURE_2D, 1, &m_ID);
//glBindTexture(GL_TEXTURE_2D, m_ID);
glTextureStorage2D(m_ID, 1, GL_RGBA8, m_Width, m_Height);
// -- Set Texture Parameters --
glTextureParameteri(m_ID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTextureParameteri(m_ID, GL_TEXTURE_MAG_FILTER, GL_NEAREST); // this gives error: GL_LINEAR_MIPMAP_LINEAR
glTextureParameteri(m_ID, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextureParameteri(m_ID, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextureParameteri(m_ID, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// -- Mipmap & Unbind --
glGenerateMipmap(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
}
Texture::Texture(const std::string& path)
{
int w, h, channels;
stbi_set_flip_vertically_on_load(1);
stbi_uc* texture_data = stbi_load(path.c_str(), &w, &h, &channels, 0);
// -- Check for Failure --
//ASSERT(texture_data, "Failed to load texture data from path: %s", path.c_str());
if (!texture_data)
{
ENGINE_LOG("Failed to load texture data from path: %s\nAborting...", path.c_str());
stbi_image_free(texture_data);
return;
}
// -- Set Parameters --
m_Width = w; m_Height = h;
m_Path = path;
if (channels == 4)
{
m_InternalFormat = GL_RGBA8;
m_DataFormat = GL_RGBA;
}
else if (channels == 3)
{
m_InternalFormat = GL_RGB8;
m_DataFormat = GL_RGB;
}
ASSERT(m_InternalFormat & m_DataFormat, "Image Format not Supported!"); // False (0) if either is 0
// -- Set Texture Parameters --
glCreateTextures(GL_TEXTURE_2D, 1, &m_ID);
//glBindTexture(GL_TEXTURE_2D, m_ID);
//glTexStorage2D(GL_TEXTURE_2D, 1, m_InternalFormat, m_Width, m_Height);
glTextureStorage2D(m_ID, 1, m_InternalFormat, m_Width, m_Height);
glTextureParameteri(m_ID, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTextureParameteri(m_ID, GL_TEXTURE_MAG_FILTER, GL_NEAREST); // this gives error: GL_LINEAR_MIPMAP_LINEAR
glTextureParameteri(m_ID, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTextureParameteri(m_ID, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTextureParameteri(m_ID, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// -- Set Subimage, Mipmap & Unbind --
glTextureSubImage2D(m_ID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, texture_data);
glGenerateMipmap(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
// -- Free STBI Image --
stbi_image_free(texture_data);
}
Texture::~Texture()
{
glDeleteTextures(1, &m_ID);
}
// ------------------------------------------------------------------------------
void Texture::SetData(void* data, uint size)
{
uint bpp = m_DataFormat == GL_RGBA ? 4 : 3; // Bytes per pixel
ASSERT(size == m_Width * m_Height * bpp, "Data passed must be the same size than the entire texture size");
Bind();
//glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
glTextureSubImage2D(m_ID, 0, 0, 0, m_Width, m_Height, m_DataFormat, GL_UNSIGNED_BYTE, data);
}
void Texture::Bind(uint slot) const
{
glActiveTexture(GL_TEXTURE0 + slot);
glBindTexture(GL_TEXTURE_2D, m_ID);
}
void Texture::Unbind() const
{
glBindTexture(GL_TEXTURE_2D, 0);
}
// ------------------------------------------------------------------------------
// ------------------------------------------------------------------------------
// ------------------------------------------------------------------------------
CubemapTexture::CubemapTexture()
{
m_TexturePaths =
{
std::string("Resources/Textures/Skybox/right.jpg"),
std::string("Resources/Textures/Skybox/left.jpg"),
std::string("Resources/Textures/Skybox/bottom.jpg"),
std::string("Resources/Textures/Skybox/top.jpg"),
std::string("Resources/Textures/Skybox/front.jpg"),
std::string("Resources/Textures/Skybox/back.jpg")
};
LoadTextures();
}
void CubemapTexture::SetTexture(CUBEMAP_TEXTURE cubemap_texture_type, const std::string& filepath)
{
if (std::filesystem::exists(filepath))
{
int w, h, channels;
stbi_uc* data = stbi_load(filepath.c_str(), &w, &h, &channels, 0);
if (!data)
{
ENGINE_LOG("Failed to load texture data from path: %s\nAborting...", filepath.c_str());
stbi_image_free(data);
return;
}
m_TexturePaths[(int)cubemap_texture_type] = filepath;
LoadTextures();
}
}
void CubemapTexture::LoadTextures()
{
// -- Set Variables --
int w, h, channels;
std::vector<stbi_uc*> texture_data;
stbi_set_flip_vertically_on_load(1);
// -- Create Cubemap Texture --
glGenTextures(1, &m_ID);
glBindTexture(GL_TEXTURE_CUBE_MAP, m_ID);
// -- Load Cubemap Textures --
for (uint i = 0; i < 6; ++i)
{
texture_data.push_back(stbi_load(m_TexturePaths[i].c_str(), &w, &h, &channels, 0));
if (!texture_data[i])
{
ENGINE_LOG("Failed to load texture data from path: %s\nAborting...", m_TexturePaths[i].c_str());
for (uint j = 0; j <= i; ++j)
stbi_image_free(texture_data[j]);
return;
}
}
// -- Set Cubemap Textures --
m_Width = w; m_Height = h;
for (uint i = 0; i < 6; ++i)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB16F, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, texture_data[i]);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
// -- Unbind & Free --
glBindTexture(GL_TEXTURE_2D, 0);
for (uint i = 0; i < texture_data.size(); ++i)
stbi_image_free(texture_data[i]);
}
|
#! /usr/bin/bash
set -e
set -o xtrace
container=$(buildah from bluesky-base)
buildah run $container -- dnf -y install python3-pycurl
# install some base python packages from pypi
buildah run $container -- pip3 install caproto[standard] jupyter httpie ipython fastapi uvicorn python-jose[cryptography] passlib[bcrypt]
buildah run $container -- pip3 install git+https://github.com/pcdshub/happi.git@master#egg=happi
# copy in source and install the current state of your checkout
targets=( ../event-model ../bluesky ../ophyd ../databroker ../bluesky-adaptive ../bluesky-queueserver ../suitcase-* )
for t in ${targets[@]}; do
if test -f $t/setup.py; then
t="$(basename -- $t)"
# move the source into the container
buildah copy $container ../$t /src/$t;
# run the install
buildah run $container -- pip3 install /src/$t
# nuke the source to save space?
buildah run $container -- rm -rf /src/$t
fi
done
# install everything else ;)
buildah run $container -- pip3 install nslsii
buildah run $container -- pip3 uninstall --yes pyepics
buildah unmount $container
buildah commit $container bluesky-dev
|
<filename>wechat-sdk/src/main/java/com/app/wechat/request/WxCondMenuDelRequest.java
/**
* Copyright (c) 2017. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
* Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
* Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
* Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
* Vestibulum commodo. Ut rhoncus gravida arcu.
*/
package com.app.wechat.request;
import com.app.wechat.domain.menu.WxMenuModel;
import com.app.wechat.internal.code.HttpMethod;
import com.app.wechat.internal.code.WxUrl;
import com.app.wechat.response.WxCondMenuDelResponse;
/**
* <p>功 能:删除个性化菜单API的请求信息</p>
* <p>版 权:Copyright (c) 2017</p>
* <p>创建时间:2017年7月6日 下午6:53:48</p>
* <p>
* <p>开发者可以通过以下条件来设置用户看到的菜单:</p>
* <ol>
* <li>用户标签(开发者的业务需求可以借助用户标签来完成)</li>
* <li>性别</li>
* <li>手机操作系统</li>
* <li>地区(用户在微信客户端设置的地区)</li>
* <li>语言(用户在微信客户端设置的语言)</li>
* </ol>
* <p>个性化菜单接口说明:</p>
* <ol>
* <li>个性化菜单要求用户的微信客户端版本在iPhone6.2.2,Android 6.2.4以上,暂时不支持其他版本微信</li>
* <li>菜单的刷新策略是,在用户进入公众号会话页或公众号profile页时,如果发现上一次拉取菜单的请求在5分钟以前,就会拉取一下菜单,如果菜单有更新,就会刷新客户端的菜单。测试时可以尝试取消关注公众账号后再次关注,则可以看到创建后的效果</li>
* <li>普通公众号的个性化菜单的新增接口每日限制次数为2000次,删除接口也是2000次,测试个性化菜单匹配结果接口为20000次</li>
* <li>出于安全考虑,一个公众号的所有个性化菜单,最多只能设置为跳转到3个域名下的链接</li>
* <li>创建个性化菜单之前必须先创建默认菜单(默认菜单是指使用普通自定义菜单创建接口创建的菜单)。如果删除默认菜单,个性化菜单也会全部删除</li>
* <li>个性化菜单接口支持用户标签,请开发者注意,当用户身上的标签超过1个时,以最后打上的标签为匹配</li>
* </ol>
* <p>个性化菜单匹配规则说明:</p>
* <ul>
* <li>个性化菜单的更新是会被覆盖的</li>
* <li>例如公众号先后发布了默认菜单,个性化菜单1,个性化菜单2,个性化菜单3。那么当用户进入公众号页面时,将从个性化菜单3开始匹配,如果个性化菜单3匹配成功,则直接返回个性化菜单3,否则继续尝试匹配个性化菜单2,直到成功匹配到一个菜单</li>
* <li>根据上述匹配规则,为了避免菜单生效时间的混淆,决定不予提供个性化菜单编辑API,开发者需要更新菜单时,需将完整配置重新发布一轮</li>
* </ul>
*
* @author 王建
* @version 1.0
*/
public class WxCondMenuDelRequest extends AbstractWxRequest<WxCondMenuDelResponse> {
private static final long serialVersionUID = 1L;
private WxMenuModel object;
public WxCondMenuDelRequest(WxMenuModel object) {
this.object = object;
}
public WxMenuModel getObject() {
return object;
}
public Class<WxCondMenuDelResponse> getResponseClass() {
return WxCondMenuDelResponse.class;
}
public String getUrl(String accessToken) {
return String.format(WxUrl.API_MENU_DELCONDITIONAL, accessToken);
}
public HttpMethod getMethod() {
return HttpMethod.POST;
}
} |
var message = "It is a great day!";
message = message.replace('great', 'wonderful');
console.log(message); |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("taskflow3", "0001_initial"),
("core", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="AppMaker",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("name", models.CharField(max_length=255, verbose_name="APP\u540d\u79f0")),
("code", models.CharField(max_length=255, verbose_name="APP\u7f16\u7801")),
(
"info",
models.CharField(max_length=255, null=True, verbose_name="APP\u57fa\u672c\u4fe1\u606f", blank=True),
),
(
"desc",
models.CharField(max_length=255, null=True, verbose_name="APP\u63cf\u8ff0\u4fe1\u606f", blank=True),
),
(
"logo_url",
models.TextField(
default="", verbose_name="\u8f7b\u5e94\u7528logo\u5b58\u653e\u5730\u5740", blank=True
),
),
("link", models.URLField(max_length=255, verbose_name="gcloud\u94fe\u63a5")),
("creator", models.CharField(max_length=100, verbose_name="\u521b\u5efa\u4eba")),
("create_time", models.DateTimeField(auto_now_add=True, verbose_name="\u521b\u5efa\u65f6\u95f4")),
("editor", models.CharField(max_length=100, null=True, verbose_name="\u7f16\u8f91\u4eba", blank=True)),
("default_viewer", models.TextField(default="{}", verbose_name="\u53ef\u89c1\u8303\u56f4")),
("is_deleted", models.BooleanField(default=False, verbose_name="\u662f\u5426\u5220\u9664")),
(
"business",
models.ForeignKey(
verbose_name="\u6240\u5c5e\u4e1a\u52a1", to="core.Business", on_delete=models.CASCADE
),
),
(
"task_flow",
models.ForeignKey(
verbose_name="\u5173\u8054\u4efb\u52a1",
to="taskflow3.TaskFlowInstance",
on_delete=models.CASCADE,
),
),
],
options={
"ordering": ["-id"],
"verbose_name": "\u3010APP:App_maker\u3011App_maker",
"verbose_name_plural": "\u3010APP:App_maker\u3011App_maker",
},
),
]
|
#!/bin/bash
# LinuxGSM info_distro.sh module
# Author: Daniel Gibbs
# Contributors: http://linuxgsm.com/contrib
# Website: https://linuxgsm.com
# Description: Variables providing useful info on the Operating System such as disk and performace info.
# Used for command_details.sh, command_debug.sh and alert.sh.
functionselfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
### Game Server pid
if [ "${status}" == "1" ]; then
gameserverpid=$(tmux list-sessions -F "#{session_name} #{pane_pid}" | grep "^${sessionname} " | awk '{print $NF}')
fi
### Distro information
## Distro
# Returns architecture, kernel and distro/os.
arch=$(uname -m)
kernel=$(uname -r)
# Distro Name - Ubuntu 16.04 LTS
# Distro Version - 16.04
# Distro ID - ubuntu
# Distro Codename - xenial
# Gathers distro info from various sources filling in missing gaps.
distro_info_array=( os-release lsb_release hostnamectl debian_version redhat-release )
for distro_info in "${distro_info_array[@]}"; do
if [ -f "/etc/os-release" ]&&[ "${distro_info}" == "os-release" ]; then
distroname=$(grep PRETTY_NAME /etc/os-release | sed 's/PRETTY_NAME=//g' | tr -d '="' | sed 's/\"//g')
distroversion=$(grep VERSION_ID /etc/os-release | sed 's/VERSION_ID=//g' | sed 's/\"//g')
distroid=$(grep ID /etc/os-release | grep -v _ID | grep -v ID_ | sed 's/ID=//g' | sed 's/\"//g')
distrocodename=$(grep VERSION_CODENAME /etc/os-release | sed 's/VERSION_CODENAME=//g' | sed 's/\"//g')
elif [ "$(command -v lsb_release 2>/dev/null)" ]&&[ "${distro_info}" == "lsb_release" ]; then
if [ -z "${distroname}" ];then
distroname=$(lsb_release -sd)
elif [ -z "${distroversion}" ]; then
distroversion=$(lsb_release -sr)
elif [ -z "${distroid}" ]; then
distroid=$(lsb_release -si)
elif [ -z "${distrocodename}" ]; then
distrocodename=$(lsb_release -sc)
fi
elif [ "$(command -v hostnamectl 2>/dev/null)" ]&&[ "${distro_info}" == "hostnamectl" ]; then
if [ -z "${distroname}" ];then
distroname=$(hostnamectl | grep "Operating System" | sed 's/Operating System: //g')
fi
elif [ -f "/etc/debian_version" ]&&[ "${distro_info}" == "debian_version" ]; then
if [ -z "${distroname}" ]; then
distroname="Debian $(cat /etc/debian_version)"
elif [ -z "${distroversion}" ]; then
distroversion=$(cat /etc/debian_version)
elif [ -z "${distroid}" ]; then
distroid="debian"
fi
elif [ -f "/etc/redhat-release" ]&&[ "${distro_info}" == "redhat-release" ]; then
if [ -z "${distroname}" ]; then
distroname=$(cat /etc/redhat-release)
elif [ -z "${distroversion}" ]; then
distroversion=$(rpm -qa \*-release | grep -Ei "oracle|redhat|centos|fedora" | cut -d"-" -f3)
elif [ -z "${distroid}" ]; then
distroid=$(awk '{print $1}' /etc/redhat-release)
fi
fi
done
## Glibc version
# e.g: 1.17
glibcversion=$(ldd --version | sed -n '1s/.* //p')
## tmux version
# e.g: tmux 1.6
if [ ! "$(command -V tmux 2>/dev/null)" ]; then
tmuxv="${red}NOT INSTALLED!${default}"
tmuxvdigit="0"
else
tmuxvdigit="$(tmux -V | sed "s/tmux //" | sed -n '1 p' | tr -cd '[:digit:]')"
if [ "${tmuxvdigit}" -lt "16" ]; then
tmuxv="$(tmux -V) (>= 1.6 required for console log)"
else
tmuxv=$(tmux -V)
fi
fi
## Uptime
uptime=$(</proc/uptime)
uptime=${uptime/[. ]*/}
minutes=$(( uptime/60%60 ))
hours=$(( uptime/60/60%24 ))
days=$(( uptime/60/60/24 ))
### Performance information
## Average server load
load=$(uptime|awk -F 'load average: ' '{ print $2 }')
## CPU information
cpumodel=$(awk -F: '/model name/ {name=$2} END {print name}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//')
cpucores=$(awk -F: '/model name/ {core++} END {print core}' /proc/cpuinfo)
cpufreqency=$(awk -F: '/cpu MHz/ {freq=$2} END {print freq}' /proc/cpuinfo | sed 's/^[ \t]*//;s/[ \t]*$//')
# CPU usage of the game server pid
if [ -n "${gameserverpid}" ]; then
cpuused=$(ps --forest -o pcpu -g "${gameserverpid}"|awk '{s+=$1} END {print s}')
cpuusedmhz=$(echo "${cpufreqency} * ${cpuused} / 100" | bc )
fi
## Memory information
# Available RAM and swap.
# Newer distros can use numfmt to give more accurate results.
if [ "$(command -v numfmt 2>/dev/null)" ]; then
# Issue #2005 - Kernel 3.14+ contains MemAvailable which should be used. All others will be calculated.
# get the raw KB values of these fields.
physmemtotalkb=$(grep MemTotal /proc/meminfo | awk '{print $2}')
physmemfreekb=$(grep ^MemFree /proc/meminfo | awk '{print $2}')
physmembufferskb=$(grep ^Buffers /proc/meminfo | awk '{print $2}')
physmemcachedkb=$(grep ^Cached /proc/meminfo | awk '{print $2}')
physmemreclaimablekb=$(grep ^SReclaimable /proc/meminfo | awk '{print $2}')
# check if MemAvailable Exists.
if grep -q ^MemAvailable /proc/meminfo; then
physmemactualfreekb=$(grep ^MemAvailable /proc/meminfo | awk '{print $2}')
else
physmemactualfreekb=$((physmemfreekb+physmembufferskb+physmemcachedkb))
fi
# Available RAM and swap.
physmemtotalmb=$((physmemtotalkb/1024))
physmemtotal=$(numfmt --to=iec --from=iec --suffix=B "${physmemtotalkb}K")
physmemfree=$(numfmt --to=iec --from=iec --suffix=B "${physmemactualfreekb}K")
physmemused=$(numfmt --to=iec --from=iec --suffix=B "$((physmemtotalkb-physmemfreekb-physmembufferskb-physmemcachedkb-physmemreclaimablekb))K")
physmemavailable=$(numfmt --to=iec --from=iec --suffix=B "${physmemactualfreekb}K")
physmemcached=$(numfmt --to=iec --from=iec --suffix=B "$((physmemcachedkb+physmemreclaimablekb))K")
swaptotal=$(numfmt --to=iec --from=iec --suffix=B "$(grep ^SwapTotal /proc/meminfo | awk '{print $2}')K")
swapfree=$(numfmt --to=iec --from=iec --suffix=B "$(grep ^SwapFree /proc/meminfo | awk '{print $2}')K")
swapused=$(numfmt --to=iec --from=iec --suffix=B "$(($(grep ^SwapTotal /proc/meminfo | awk '{print $2}')-$(grep ^SwapFree /proc/meminfo | awk '{print $2}')))K")
# RAM usage of the game server pid
# MB
if [ "${gameserverpid}" ]; then
memused=$(ps --forest -o rss -g "${gameserverpid}" | awk '{s+=$1} END {print s}'| awk '{$1/=1024;printf "%.0f",$1}{print $2}')
# %
pmemused=$(ps --forest -o %mem -g "${gameserverpid}" | awk '{s+=$1} END {print s}')
fi
else
# Older distros will need to use free.
# Older versions of free do not support -h option.
if [ "$(free -h > /dev/null 2>&1; echo $?)" -ne "0" ]; then
humanreadable="-m"
else
humanreadable="-h"
fi
physmemtotalmb=$(free -m | awk '/Mem:/ {print $2}')
physmemtotal=$(free ${humanreadable} | awk '/Mem:/ {print $2}')
physmemfree=$(free ${humanreadable} | awk '/Mem:/ {print $4}')
physmemused=$(free ${humanreadable} | awk '/Mem:/ {print $3}')
oldfree=$(free ${humanreadable} | awk '/cache:/')
if [ "${oldfree}" ]; then
physmemavailable="n/a"
physmemcached="n/a"
else
physmemavailable=$(free ${humanreadable} | awk '/Mem:/ {print $7}')
physmemcached=$(free ${humanreadable} | awk '/Mem:/ {print $6}')
fi
swaptotal=$(free ${humanreadable} | awk '/Swap:/ {print $2}')
swapused=$(free ${humanreadable} | awk '/Swap:/ {print $3}')
swapfree=$(free ${humanreadable} | awk '/Swap:/ {print $4}')
fi
### Disk information
## Available disk space on the partition.
filesystem=$(LC_ALL=C df -hP "${rootdir}" | tail -n 1 | awk '{print $1}')
totalspace=$(LC_ALL=C df -hP "${rootdir}" | tail -n 1 | awk '{print $2}')
usedspace=$(LC_ALL=C df -hP "${rootdir}" | tail -n 1 | awk '{print $3}')
availspace=$(LC_ALL=C df -hP "${rootdir}" | tail -n 1 | awk '{print $4}')
## LinuxGSM used space total.
rootdirdu=$(du -sh "${rootdir}" 2> /dev/null | awk '{print $1}')
if [ -z "${rootdirdu}" ]; then
rootdirdu="0M"
fi
## LinuxGSM used space in serverfiles dir.
serverfilesdu=$(du -sh "${serverfiles}" 2> /dev/null | awk '{print $1}')
if [ -z "${serverfilesdu}" ]; then
serverfilesdu="0M"
fi
## LinuxGSM used space total minus backup dir.
rootdirduexbackup=$(du -sh --exclude="${backupdir}" "${serverfiles}" 2> /dev/null | awk '{print $1}')
if [ -z "${rootdirduexbackup}" ]; then
rootdirduexbackup="0M"
fi
## Backup info
if [ -d "${backupdir}" ]; then
# Used space in backups dir.
backupdirdu=$(du -sh "${backupdir}" | awk '{print $1}')
# If no backup dir, size is 0M.
if [ -z "${backupdirdu}" ]; then
backupdirdu="0M"
fi
# number of backups set to 0 by default.
backupcount=0
# If there are backups in backup dir.
if [ "$(find "${backupdir}" -name "*.tar.gz" | wc -l)" -ne "0" ]; then
# number of backups.
backupcount=$(find "${backupdir}"/*.tar.gz | wc -l)
# most recent backup.
lastbackup=$(ls -1t "${backupdir}"/*.tar.gz | head -1)
# date of most recent backup.
lastbackupdate=$(date -r "${lastbackup}")
# no of days since last backup.
lastbackupdaysago=$(( ( $(date +'%s') - $(date -r "${lastbackup}" +'%s') )/60/60/24 ))
# size of most recent backup.
lastbackupsize=$(du -h "${lastbackup}" | awk '{print $1}')
fi
fi
# Network Interface name
netint=$(ip -o addr | grep "${ip}" | awk '{print $2}')
netlink=$(ethtool "${netint}" 2>/dev/null| grep Speed | awk '{print $2}')
# External IP address
if [ -z "${extip}" ]; then
extip=$(curl --connect-timeout 10 -s https://api.ipify.org 2>/dev/null)
exitcode=$?
# Should ifconfig.co return an error will use last known IP.
if [ ${exitcode} -eq 0 ]; then
if [[ "${extip}" != *"DOCTYPE"* ]]; then
echo -e "${extip}" > "${tmpdir}/extip.txt"
else
if [ -f "${tmpdir}/extip.txt" ]; then
extip=$(cat "${tmpdir}/extip.txt")
else
fn_print_error_nl "Unable to get external IP"
fi
fi
else
if [ -f "${tmpdir}/extip.txt" ]; then
extip=$(cat "${tmpdir}/extip.txt")
else
fn_print_error_nl "Unable to get external IP"
fi
fi
fi
# Alert IP address
if [ "${displayip}" ]; then
alertip="${displayip}"
elif [ "${extip}" ]; then
alertip="${extip}"
else
alertip="${ip}"
fi
# Steam Master Server - checks if detected by master server.
if [ "$(command -v jq 2>/dev/null)" ]; then
if [ "${ip}" ]&&[ "${port}" ]; then
if [ "${steammaster}" == "true" ]; then
# Will query server IP addresses first.
for queryip in "${queryips[@]}"; do
masterserver="$(curl --connect-timeout 10 -m 3 -s 'https://api.steampowered.com/ISteamApps/GetServersAtAddress/v0001?addr='${queryip}':'${port}'&format=json' | jq '.response.servers[]|.addr' | wc -l 2>/dev/null)"
done
# Should that not work it will try the external IP.
if [ "${masterserver}" == "0" ]; then
masterserver="$(curl --connect-timeout 10 -m 3 -s 'https://api.steampowered.com/ISteamApps/GetServersAtAddress/v0001?addr='${extip}':'${port}'&format=json' | jq '.response.servers[]|.addr' | wc -l 2>/dev/null)"
fi
if [ "${masterserver}" == "0" ]; then
displaymasterserver="false"
else
displaymasterserver="true"
fi
fi
fi
fi
# Sets the SteamCMD glibc requirement if the game server requirement is less or not required.
if [ "${appid}" ]; then
if [ "${glibc}" = "null" ]||[ -z "${glibc}" ]||[ "$(printf '%s\n'${glibc}'\n' "2.14" | sort -V | head -n 1)" != "2.14" ]; then
glibc="2.14"
fi
fi
|
import os
import sys
def main():
r, w = os.pipe()
pid = os.fork()
if pid > 0: # Parent process
os.close(r) # Close the read end of the pipe in the parent process
message = "Hello, child process!"
os.write(w, message.encode()) # Write the message to the pipe
os.close(w) # Close the write end of the pipe in the parent process
elif pid == 0: # Child process
os.close(w) # Close the write end of the pipe in the child process
message = os.read(r, 100) # Read the message from the pipe
print("Message from parent:", message.decode()) # Print the message received from the parent
os.close(r) # Close the read end of the pipe in the child process
sys.exit(0) # Exit the child process
else:
print("Fork failed")
sys.exit(1)
if __name__ == "__main__":
main() |
<reponame>terrygao88/FirstExample
var form = document.getElementById('contactForm');
function myFunction() {
if (form.checkValidity()) {
alert("Adding Succesful!");
}
} |
#!/bin/bash
TASK=18
MODEL=ctrl_visualbert
MODEL_CONFIG=ctrl_visualbert_base
TASKS_CONFIG=xm-influence_test_tasks
PRETRAINED=/science/image/nlp-datasets/emanuele/checkpoints/mpre-unmasked/conceptual_captions_s1234/volta/ctrl_visualbert/ctrl_visualbert_base/pytorch_model_9.bin
OUTPUT_DIR=/science/image/nlp-datasets/emanuele/results/xm-influence/flickr30kentities_lang4vis/${MODEL}
THR=0.5
source activate /science/image/nlp-datasets/emanuele/envs/xm-influence
cd ../../../../volta
python ablate_lang4vis.py \
--bert_model bert-base-uncased --config_file config/${MODEL_CONFIG}.json --from_pretrained ${PRETRAINED} \
--tasks_config_file config_tasks/${TASKS_CONFIG}.yml --task $TASK --split val \
--output_dir ${OUTPUT_DIR} --dump_results --masking phrase --overlap_threshold $THR
conda deactivate
|
import { NestFactory } from '@nestjs/core';
import { AppModule } from './app.module';
import { Logger } from '@nestjs/common';
async function bootstrap() {
const app = await NestFactory.create(AppModule);
await app.listen(8888);
}
bootstrap()
.then(() => {
Logger.log(`Started at http://localhost:8888 🧘`, 'Bootstrap');
})
.catch((error: Error) => {
Logger.error('Unhandled Rejection', error.stack, 'Bootstrap');
});
|
#!/bin/bash
name=$1
docker build $name -t gpmdp/build-$name
docker push gpmdp/build-$name |
function getFirstElement(arr) {
return arr[0];
} |
<filename>src/main/java/net/dean/jraw/http/AuthenticationMethod.java
package net.dean.jraw.http;
import net.dean.jraw.http.oauth.AppType;
/**
* A list of ways a client can authenticate themselves using Reddit's API
*/
public enum AuthenticationMethod {
/** Not yet authenticated. */
NOT_YET,
/** OAuth2 authentication on a website. See {@link AppType#WEB} for more. */
WEBAPP,
/** OAuth2 authentication on an installed app. See {@link AppType#INSTALLED} for more. */
APP,
/** OAuth2 authentication on a script. See {@link AppType#SCRIPT} for more. */
SCRIPT,
/** OAuth2 authentication without the context of a user. If this is a mobile app, use {@link #USERLESS_APP}. */
USERLESS(true), // Either WEB or SCRIPT could be used, doesn't really matter
/**
* OAuth2 authentication without the context of a user. Use this over {@link #USERLESS} if this is being used on a
* mobile app and thus cannot retain a secret.
*/
USERLESS_APP(true);
private boolean userless;
AuthenticationMethod() {
this(false);
}
AuthenticationMethod(boolean userless) {
this.userless = userless;
}
/**
* Returns true if this AuthenticationMethod does not require a user
*/
public boolean isUserless() {
return userless;
}
}
|
<gh_stars>1-10
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: <NAME>, <NAME>, <NAME>, <NAME>
// =============================================================================
//
// Tractor for the tractor-trailer vehicle model.
// Can be constructed either with solid-axle or with multi-link suspensions.
// Always uses a articulated rack-pinion steering and a 2WD driveline model.
//
// =============================================================================
#ifndef TT_TRACTOR_H
#define TT_TRACTOR_H
#include "chrono_vehicle/wheeled_vehicle/ChWheeledVehicle.h"
class TT_Tractor : public chrono::vehicle::ChWheeledVehicle {
public:
TT_Tractor(const bool fixed,
chrono::vehicle::SuspensionType suspType,
chrono::ChMaterialSurface::ContactMethod contactMethod = chrono::ChMaterialSurface::NSC);
~TT_Tractor() {}
virtual int GetNumberAxles() const override { return 2; }
virtual double GetWheelbase() const override { return 3.38; }
virtual double GetMinTurningRadius() const override { return 7.7; }
virtual double GetMaxSteeringAngle() const override { return 30 * chrono::CH_C_DEG_TO_RAD; }
double GetSpringForce(int axle, chrono::vehicle::VehicleSide side) const;
double GetSpringLength(int axle, chrono::vehicle::VehicleSide side) const;
double GetSpringDeformation(int axle, chrono::vehicle::VehicleSide side) const;
double GetShockForce(int axle, chrono::vehicle::VehicleSide side) const;
double GetShockLength(int axle, chrono::vehicle::VehicleSide side) const;
double GetShockVelocity(int axle, chrono::vehicle::VehicleSide side) const;
virtual void Initialize(const chrono::ChCoordsys<>& chassisPos, double chassisFwdVel = 0) override;
// Log debugging information
void LogHardpointLocations(); /// suspension hardpoints at design
void DebugLog(int what); /// shock forces and lengths, constraints, etc.
private:
chrono::vehicle::SuspensionType m_suspType;
};
#endif
|
<filename>back/src/main/java/com/java110/things/Controller/hardwareManufacturer/ManufacturerController.java<gh_stars>0
package com.java110.things.Controller.hardwareManufacturer;
import com.alibaba.fastjson.JSONObject;
import com.java110.things.Controller.BaseController;
import com.java110.things.entity.manufacturer.ManufacturerDto;
import com.java110.things.entity.response.ResultDto;
import com.java110.things.service.manufacturer.IManufacturerService;
import com.java110.things.util.Assert;
import com.java110.things.util.BeanConvertUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
/**
* @ClassName ManufacturerController
* @Description TODO 硬件厂商 控制类
* @Author wuxw
* @Date 2020/5/16 10:36
* @Version 1.0
* add by wuxw 2020/5/16
**/
@RestController
@RequestMapping(path = "/api/manufacturer")
public class ManufacturerController extends BaseController {
@Autowired
private IManufacturerService manufacturerServiceImpl;
/**
* 添加设备接口类
*
* @param hmId 商户ID
* @return 成功或者失败
* @throws Exception
*/
@RequestMapping(path = "/getManufacturers", method = RequestMethod.GET)
public ResponseEntity<String> getManufacturers(@RequestParam(name = "hmId", required = false) String hmId,
@RequestParam(name = "hmName", required = false) String hmName,
@RequestParam(name = "hmType", required = true) String hmType
) throws Exception {
ManufacturerDto manufacturerDto = new ManufacturerDto();
manufacturerDto.setHmId(hmId);
manufacturerDto.setHmName(hmName);
manufacturerDto.setHmType(hmType);
ResultDto resultDto = manufacturerServiceImpl.getManufacturer(manufacturerDto);
return super.createResponseEntity(resultDto);
}
/**
* 添加设备接口类
*
* @param param 厂商信息
* @return 成功或者失败
* @throws Exception
*/
@RequestMapping(path = "/startManufacturer", method = RequestMethod.POST)
public ResponseEntity<String> startManufacturers(@RequestBody String param
) throws Exception {
JSONObject paramObj = super.getParamJson(param);
Assert.hasKeyAndValue(paramObj, "hmId", "请求报文中未包含厂商编码");
Assert.hasKeyAndValue(paramObj, "hmType", "请求报文中未包含厂商类型");
ResultDto resultDto = manufacturerServiceImpl.startManufacturer(BeanConvertUtil.covertBean(paramObj, ManufacturerDto.class));
return super.createResponseEntity(resultDto);
}
}
|
const date = new Date();
const year = date.getFullYear();
const month = date.getMonth() + 1;
const day = date.getDate();
console.log(`${year}-${month}-${day}`); |
TERMUX_PKG_HOMEPAGE=https://mediaarea.net/en/MediaInfo
TERMUX_PKG_DESCRIPTION="Library for reading information from media files"
TERMUX_PKG_LICENSE="BSD 2-Clause"
TERMUX_PKG_LICENSE_FILE="../../../LICENSE"
TERMUX_PKG_VERSION=20.09
TERMUX_PKG_SRCURL=https://mediaarea.net/download/source/libmediainfo/${TERMUX_PKG_VERSION}/libmediainfo_${TERMUX_PKG_VERSION}.tar.gz
TERMUX_PKG_SHA256=a3d51fae55d8838e372def5d3fd172a795fa93ffa3389214bedb89e9dec90154
TERMUX_PKG_DEPENDS="libcurl, libzen, zlib"
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="--enable-shared --enable-static --with-libcurl"
termux_step_pre_configure() {
TERMUX_PKG_SRCDIR="${TERMUX_PKG_SRCDIR}/Project/GNU/Library"
TERMUX_PKG_BUILDDIR="${TERMUX_PKG_SRCDIR}"
cd "${TERMUX_PKG_SRCDIR}"
./autogen.sh
}
termux_step_post_make_install() {
ln -sf "${TERMUX_PREFIX}/lib/libmediainfo.so" "${TERMUX_PREFIX}/lib/libmediainfo.so.0"
ln -sf "${TERMUX_PREFIX}/lib/libmediainfo.so" "${TERMUX_PREFIX}/lib/libmediainfo.so.0.0"
}
|
// Package convert provides functions for type conversion.
package convert
// ToInteger converts it's argument to an Integer.
func ToInteger(v interface{}) int64 {
return 0
}
// ToByteArray converts it's argument to a ByteArray.
func ToByteArray(v interface{}) []byte {
return nil
}
// ToBool converts it's argument to a Boolean.
func ToBool(v interface{}) bool {
return false
}
|
#!/bin/sh -e
set -x
autoflake --remove-all-unused-imports --recursive --remove-unused-variables --in-place run.py app cogs --exclude=__init__.py
black run.py app cogs --exclude=__init__.py
isort run.py app cogs |
#!/bin/bash
#SBATCH --account=rrg-pbellec
#SBATCH --job-name=smriprep_sub-7308293.job
#SBATCH --output=/scratch/fnadeau/cima-q/1633992344/smriprep_sub-7308293.out
#SBATCH --error=/scratch/fnadeau/cima-q/1633992344/smriprep_sub-7308293.err
#SBATCH --time=24:00:00
#SBATCH --cpus-per-task=16
#SBATCH --mem-per-cpu=4096M
#SBATCH --mail-user=francois.nadeau.1@umontreal.ca
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
export SINGULARITYENV_FS_LICENSE=$HOME/.freesurfer.txt
export SINGULARITYENV_TEMPLATEFLOW_HOME=/templateflow
module load singularity/3.8
#copying input dataset into local scratch space
rsync -rltv --info=progress2 --exclude="sub*" --exclude="derivatives" /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q $SLURM_TMPDIR
rsync -rltv --info=progress2 /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/sub-7308293 $SLURM_TMPDIR/cima-q
singularity run --cleanenv -B $SLURM_TMPDIR:/DATA -B /home/fnadeau/.cache/templateflow:/templateflow -B /etc/pki:/etc/pki/ -B /scratch/fnadeau/cima-q/1633992344:/OUTPUT /lustre03/project/6003287/containers/fmriprep-20.2.1lts.sif -w /DATA/fmriprep_work --participant-label 7308293 --bids-database-dir /DATA/cima-q/.pybids_cache --bids-filter-file /OUTPUT/bids_filters.json --output-spaces MNI152NLin2009cAsym MNI152NLin6Asym fsnative anat --output-layout bids --notrack --skip_bids_validation --write-graph --omp-nthreads 8 --nprocs 16 --mem_mb 65536 --resource-monitor /DATA/cima-q /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives participant
fmriprep_exitcode=$?
if [ $fmriprep_exitcode -ne 0 ] ; then cp -R $SLURM_TMPDIR/fmriprep_work /scratch/fnadeau/cima-q/1633992344/smriprep_sub-7308293.workdir ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then cp $SLURM_TMPDIR/fmriprep_work/fmriprep_wf/resource_monitor.json /scratch/fnadeau/cima-q/1633992344/smriprep_sub-7308293_resource_monitor.json ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then mkdir -p /scratch/fnadeau/cima-q/1633992344//project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives-cima-q ; fi
if [ $fmriprep_exitcode -eq 0 ] ; then cp -R /project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives/* /scratch/fnadeau/cima-q/1633992344//project/rrg-pbellec/fnadeau/fnadeau_cimaq_preproc/data/cima-q/derivatives-cima-q/ ; fi
exit $fmriprep_exitcode
|
#!/usr/bin/env bash
#
# Start a validator
#
here=$(dirname "$0")
# shellcheck source=multinode-demo/common.sh
source "$here"/common.sh
args=(
--max-genesis-archive-unpacked-size 1073741824
--no-poh-speed-test
)
airdrops_enabled=1
node_anlog=500 # 500 ANLOG: number of ANLOG to airdrop the node for transaction fees and vote account rent exemption (ignored if airdrops_enabled=0)
# node_sol=500 # 500 ANLOG: number of ANLOG to airdrop the node for transaction fees and vote account rent exemption (ignored if airdrops_enabled=0)
label=
identity=
vote_account=
no_restart=0
gossip_entrypoint=
ledger_dir=
maybe_allow_private_addr=
usage() {
if [[ -n $1 ]]; then
echo "$*"
echo
fi
cat <<EOF
usage: $0 [OPTIONS] [cluster entry point hostname]
Start a validator with no stake
OPTIONS:
--ledger PATH - store ledger under this PATH
--init-complete-file FILE - create this file, if it doesn't already exist, once node initialization is complete
--label LABEL - Append the given label to the configuration files, useful when running
multiple validators in the same workspace
--node-anlog ANLOG - Number of ANLOG this node has been funded from the genesis config (default: $node_sol)
--no-voting - start node without vote signer
--rpc-port port - custom RPC port for this node
--no-restart - do not restart the node if it exits
--no-airdrop - The genesis config has an account for the node. Airdrops are not required.
EOF
exit 1
}
maybeRequireTower=true
positional_args=()
while [[ -n $1 ]]; do
if [[ ${1:0:1} = - ]]; then
# validator.sh-only options
if [[ $1 = --label ]]; then
label="-$2"
shift 2
elif [[ $1 = --no-restart ]]; then
no_restart=1
shift
elif [[ $1 = --node-sol ]]; then
node_sol="$2"
shift 2
elif [[ $1 = --no-airdrop ]]; then
airdrops_enabled=0
shift
# analog-validator options
elif [[ $1 = --expected-genesis-hash ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --expected-shred-version ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --identity ]]; then
identity=$2
args+=("$1" "$2")
shift 2
elif [[ $1 = --authorized-voter ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --authorized-withdrawer ]]; then
authorized_withdrawer=$2
shift 2
elif [[ $1 = --vote-account ]]; then
vote_account=$2
args+=("$1" "$2")
shift 2
elif [[ $1 = --init-complete-file ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --ledger ]]; then
ledger_dir=$2
shift 2
elif [[ $1 = --entrypoint ]]; then
gossip_entrypoint=$2
args+=("$1" "$2")
shift 2
elif [[ $1 = --no-snapshot-fetch ]]; then
args+=("$1")
shift
elif [[ $1 = --no-voting ]]; then
args+=("$1")
shift
elif [[ $1 = --dev-no-sigverify ]]; then
args+=("$1")
shift
elif [[ $1 = --dev-halt-at-slot ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --rpc-port ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --rpc-faucet-address ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --accounts ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --gossip-port ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --dynamic-port-range ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --snapshot-interval-slots ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --maximum-snapshots-to-retain ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --limit-ledger-size ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --no-rocksdb-compaction ]]; then
args+=("$1")
shift
elif [[ $1 = --enable-rpc-transaction-history ]]; then
args+=("$1")
shift
elif [[ $1 = --enable-cpi-and-log-storage ]]; then
args+=("$1")
shift
elif [[ $1 = --skip-poh-verify ]]; then
args+=("$1")
shift
elif [[ $1 = --log ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --known-validator ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 = --halt-on-known-validators-accounts-hash-mismatch ]]; then
args+=("$1")
shift
elif [[ $1 = --max-genesis-archive-unpacked-size ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 == --wait-for-supermajority ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 == --expected-bank-hash ]]; then
args+=("$1" "$2")
shift 2
elif [[ $1 == --allow-private-addr ]]; then
args+=("$1")
maybe_allow_private_addr=$1
shift
elif [[ $1 == --accounts-db-skip-shrink ]]; then
args+=("$1")
shift
elif [[ $1 == --skip-require-tower ]]; then
maybeRequireTower=false
shift
elif [[ $1 = -h ]]; then
usage "$@"
else
echo "Unknown argument: $1"
exit 1
fi
else
positional_args+=("$1")
shift
fi
done
if [[ "$ANALOG_GPU_MISSING" -eq 1 ]]; then
echo "Testnet requires GPUs, but none were found! Aborting..."
exit 1
fi
if [[ ${#positional_args[@]} -gt 1 ]]; then
usage "$@"
fi
if [[ -n $REQUIRE_LEDGER_DIR ]]; then
if [[ -z $ledger_dir ]]; then
usage "Error: --ledger not specified"
fi
ANALOG_CONFIG_DIR="$ledger_dir"
fi
if [[ -n $REQUIRE_KEYPAIRS ]]; then
if [[ -z $identity ]]; then
usage "Error: --identity not specified"
fi
if [[ -z $vote_account ]]; then
usage "Error: --vote-account not specified"
fi
if [[ -z $authorized_withdrawer ]]; then
usage "Error: --authorized_withdrawer not specified"
fi
fi
if [[ -z "$ledger_dir" ]]; then
ledger_dir="$ANALOG_CONFIG_DIR/validator$label"
fi
mkdir -p "$ledger_dir"
if [[ -n $gossip_entrypoint ]]; then
# Prefer the --entrypoint argument if supplied...
if [[ ${#positional_args[@]} -gt 0 ]]; then
usage "$@"
fi
else
# ...but also support providing the entrypoint's hostname as the first
# positional argument
entrypoint_hostname=${positional_args[0]}
if [[ -z $entrypoint_hostname ]]; then
gossip_entrypoint=127.0.0.1:8001
else
gossip_entrypoint="$entrypoint_hostname":8001
fi
fi
faucet_address="${gossip_entrypoint%:*}":9900
: "${identity:=$ledger_dir/identity.json}"
: "${vote_account:=$ledger_dir/vote-account.json}"
: "${authorized_withdrawer:=$ledger_dir/authorized-withdrawer.json}"
default_arg --entrypoint "$gossip_entrypoint"
if ((airdrops_enabled)); then
default_arg --rpc-faucet-address "$faucet_address"
fi
default_arg --identity "$identity"
default_arg --vote-account "$vote_account"
default_arg --ledger "$ledger_dir"
default_arg --log -
if [[ $maybeRequireTower = true ]]; then
default_arg --require-tower
fi
if [[ -n $ANALOG_CUDA ]]; then
program=$analog_validator_cuda
else
program=$analog_validator
fi
set -e
PS4="$(basename "$0"): "
pid=
kill_node() {
# Note: do not echo anything from this function to ensure $pid is actually
# killed when stdout/stderr are redirected
set +ex
if [[ -n $pid ]]; then
declare _pid=$pid
pid=
kill "$_pid" || true
wait "$_pid" || true
fi
}
kill_node_and_exit() {
kill_node
exit
}
trap 'kill_node_and_exit' INT TERM ERR
wallet() {
(
set -x
$analog_cli --keypair "$identity" --url "$rpc_url" "$@"
)
}
setup_validator_accounts() {
declare node_sol=$1
if [[ -n "$SKIP_ACCOUNTS_CREATION" ]]; then
return 0
fi
if ! wallet vote-account "$vote_account"; then
if ((airdrops_enabled)); then
echo "Adding $node_sol to validator identity account:"
(
set -x
$analog_cli \
--keypair "$ANALOG_CONFIG_DIR/faucet.json" --url "$rpc_url" \
transfer --allow-unfunded-recipient "$identity" "$node_sol"
) || return $?
fi
echo "Creating validator vote account"
wallet create-vote-account "$vote_account" "$identity" "$authorized_withdrawer" || return $?
fi
echo "Validator vote account configured"
echo "Validator identity account balance:"
wallet balance || return $?
return 0
}
# shellcheck disable=SC2086 # Don't want to double quote "$maybe_allow_private_addr"
rpc_url=$($analog_gossip $maybe_allow_private_addr rpc-url --timeout 180 --entrypoint "$gossip_entrypoint")
[[ -r "$identity" ]] || $analog_keygen new --no-passphrase -so "$identity"
[[ -r "$vote_account" ]] || $analog_keygen new --no-passphrase -so "$vote_account"
[[ -r "$authorized_withdrawer" ]] || $analog_keygen new --no-passphrase -so "$authorized_withdrawer"
setup_validator_accounts "$node_sol"
while true; do
echo "$PS4$program ${args[*]}"
$program "${args[@]}" &
pid=$!
echo "pid: $pid"
if ((no_restart)); then
wait "$pid"
exit $?
fi
while true; do
if [[ -z $pid ]] || ! kill -0 "$pid"; then
echo "############## validator exited, restarting ##############"
break
fi
sleep 1
done
kill_node
done
|
require('should');
const request = require('supertest');
const mongoose = require('mongoose');
const app = require('../app');
process.env.ENV = 'Test';
const Book = mongoose.model('Book');
const agent = request.agent(app);
describe('Book CRUD Test:', () => {
it('should allow a book to be posted and return read and _id', (done) => {
const testBook = {
title: 'War and Peace',
genre: 'Historical Fiction',
author: '<NAME>',
bookId: 656
};
agent.post('/api/books')
.send(testBook)
.expect(200)
.end((err, results) => {
// console.log(results);
// results.body.read.should.not.equal(false);
results.body.should.have.property('_id');
done();
});
});
afterEach((done) => {
Book.deleteMany({}).exec();
done();
});
after((done) => {
mongoose.connection.close();
app.server.close(done());
});
});
|
// Copyright (c) 2022 <NAME>. All Rights Reserved.
// https://github.com/cinar/indicatorts
import { deepStrictEqual } from 'assert';
import { roundDigitsAll } from '../../helper/numArray';
import { mstd } from './mstd';
describe('Standard deviation', () => {
it('should be able to compute std', () => {
const values = [2, 4, 6, 8, 12, 14, 16, 18, 20];
const expected = [0, 0, 0, 2.236, 2.958, 3.162, 2.958, 2.236, 2.236];
const period = 4;
const actual = mstd(period, values);
deepStrictEqual(roundDigitsAll(3, actual), expected);
});
});
|
import {
GraphQLInt,
GraphQLObjectType,
GraphQLString,
} from 'graphql';
import { TypedFields } from '../../../lib/strongTypes';
export interface MediaSize {
/** File name including extension. */
file: string;
/** The height of the file. */
height: number;
/** The mime_type for the file. */
mime_type: string;
/** The name of the size. (eg. "thumbnail") */
slug: string;
/** The full URL for the file. */
source_url: string;
/** The width of the file. */
width: number;
}
const mediaSizeFields: TypedFields<MediaSize> = {
file: {
description: 'File name including extension.',
type: GraphQLString,
},
height: {
description: 'The height of the file.',
type: GraphQLInt,
},
mime_type: {
description: 'The mime_type for the file.',
type: GraphQLString,
},
slug: {
description: 'The name of the size. (eg. "thumbnail")',
type: GraphQLString,
},
source_url: {
description: 'The full URL for the file.',
type: GraphQLString,
},
width: {
description: 'The width of the file.',
type: GraphQLInt,
},
};
export default new GraphQLObjectType({
name: 'MediaSize',
description: 'Details about a single media item at a specific size.',
fields: () => ({
...mediaSizeFields,
}),
});
|
#!/bin/bash
regenerate_performances=false
memory=2G
#generate file with list of ensemble models (avoids the trouble with parsing files with * character)
file_list_ensemble_models="../data/list_ensemble_models.txt"
ls ../data/Predictions_*_\*_* > $file_list_ensemble_models
#parse the file line by line to submit a job for each ensemble model
declare -a IDs=()
while IFS= read -r model
do
IFS='_' read -ra PARAMETERS <<< ${model%".csv"}
pred_type="${PARAMETERS[1]}"
target="${PARAMETERS[2]}"
organ="${PARAMETERS[3]}"
view="${PARAMETERS[4]}"
transformation="${PARAMETERS[5]}"
architecture="${PARAMETERS[6]}"
n_fc_layers="${PARAMETERS[7]}"
n_fc_nodes="${PARAMETERS[8]}"
optimizer="${PARAMETERS[9]}"
learning_rate="${PARAMETERS[10]}"
weight_decay="${PARAMETERS[11]}"
dropout_rate="${PARAMETERS[12]}"
data_augmentation_factor="${PARAMETERS[13]}"
fold="${PARAMETERS[14]}"
version="${pred_type}_${target}_${organ}_${view}_${transformation}_${architecture}_${n_fc_layers}_${n_fc_nodes}_${optimizer}_${learning_rate}_${weight_decay}_${dropout_rate}_${data_augmentation_factor}_${fold}"
name=MI05B-"$version"
job_name="$name.job"
out_file="../eo/$name.out"
err_file="../eo/$name.err"
time=15 #5
#time=2 #debug mode
#allocate more time for the training fold because of the larger sample size
if [ $fold == "train" ]; then
time=$(( 8*$time ))
fi
#check if the predictions have already been generated. If not, do not run the model.
if ! test -f "../data/Predictions_${version}.csv"; then
echo The predictions at "../data/Predictions_${version}.csv" cannot be found. The job cannot be run.
break
fi
#if regenerate_performances option is on or if the performances have not yet been generated, run the job
if ! test -f "../data/Performances_${version}.csv" || $regenerate_performances; then
echo Submitting job for "$version"
ID=$(sbatch --dependency=$1 --error=$err_file --output=$out_file --job-name=$job_name --mem-per-cpu=$memory -t $time MI04A05B_Performances_generate.sh "${target}" "${organ}" "${view}" "${transformation}" "${architecture}" "${n_fc_layers}" "${n_fc_nodes}" "${optimizer}" "${learning_rate}" "${weight_decay}" "${dropout_rate}" "${data_augmentation_factor}" "${fold}" "${pred_type}")
IDs+=($ID)
#else
# echo Performance for $version have already been generated.
fi
done < "$file_list_ensemble_models"
# Produce the list of job dependencies fr the next step
printf -v IDs_list '%s:' "${IDs[@]}"
dependencies="${IDs_list%:}"
echo $dependencies
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.