text stringlengths 1 1.05M |
|---|
<reponame>mokiat/bricksweep
'use strict';
const DISPLACE_THRESHOLD = 0.5;
class DisplaceSystem {
constructor(ecsManager, board, facade) {
this.ecsManager = ecsManager;
this.board = board;
this.facade = facade;
this.ghostBrick = ECS_INVALID_ENTITY;
this.displacementQuery = new ECSQuery(
(entity) => ecsManager.hasComponent(entity, 'position'),
(entity) => ecsManager.hasComponent(entity, 'brick'),
(entity) => ecsManager.hasComponent(entity, 'displacement')
);
this.dragStart = new Vec2();
this.reset();
}
reset() {
this.moves = 0;
this.selectedEntity = ECS_INVALID_ENTITY;
if (this.ecsManager.hasEntity(this.ghostBrick)) {
this.ecsManager.deleteEntity(this.ghostBrick);
}
this.ghostBrick = this._createGhostBrickEntity();
}
getMoves() {
return this.moves;
}
update() {
for (let entity of this.ecsManager.search(this.displacementQuery)) {
const displacementComponent = this.ecsManager.getComponent(entity, 'displacement');
if (!displacementComponent.active) {
continue;
}
const positionComponent = this.ecsManager.getComponent(entity, 'position');
const brickComponent = this.ecsManager.getComponent(entity, 'brick');
const position = this.board.calculateBrickIdealScreenPosition(brickComponent.row, brickComponent.column);
position.inc(displacementComponent.amount);
positionComponent.coords.setTo(position);
}
}
onMouseDown(x, y) {
// console.log('[displace system] mouse down: %d / %d', x, y);
const mousePosition = new Vec2(x, y);
for (let entity of this.ecsManager.search(this.displacementQuery)) {
if (this._isEntityUnderMouse(entity, mousePosition)) {
this.dragStart.setTo(mousePosition);
this.selectedEntity = entity;
break;
}
}
}
onMouseUp(x, y) {
// console.log('[displace system] mouse up: %d / %d', x, y);
this._cancelDisplacement();
}
onMouseMove(x, y) {
this._resetDisplacedBricks();
if (!this.ecsManager.hasEntity(this.selectedEntity)) {
this.selectedEntity = ECS_INVALID_ENTITY;
return false;
}
const mousePosition = new Vec2(x, y);
const dragDistance = mousePosition.getDec(this.dragStart);
const brickComponent = this.ecsManager.getComponent(this.selectedEntity, 'brick');
const isHorizontalDrag = Math.abs(dragDistance.x) > Math.abs(dragDistance.y);
if (isHorizontalDrag) {
if (dragDistance.x > BRICK_WIDTH * DISPLACE_THRESHOLD) {
if (this._shiftBricksHorizontally(brickComponent.row, brickComponent.column, +1)) {
this.moves++;
return true;
}
this._cancelDisplacement();
return false;
}
if (dragDistance.x < -BRICK_WIDTH * DISPLACE_THRESHOLD) {
if (this._shiftBricksHorizontally(brickComponent.row, brickComponent.column, -1)) {
this.moves++;
return true;
}
this._cancelDisplacement();
return false;
}
this._displaceBricksHorizontally(brickComponent.row, brickComponent.column, dragDistance.x);
} else {
if (dragDistance.y > BRICK_HEIGHT * DISPLACE_THRESHOLD) {
if (this._shiftBricksVertically(brickComponent.row, brickComponent.column, +1)) {
this.moves++;
return true;
}
this._cancelDisplacement();
return false;
}
if (dragDistance.y < -BRICK_HEIGHT * DISPLACE_THRESHOLD) {
if (this._shiftBricksVertically(brickComponent.row, brickComponent.column, -1)) {
this.moves++;
return true;
}
this._cancelDisplacement();
return false;
}
this._displaceBricksVertically(brickComponent.row, brickComponent.column, dragDistance.y);
}
return false;
}
_createGhostBrickEntity() {
const entity = this.ecsManager.createEntity();
const positionComponent = new PositionComponent();
this.ecsManager.addComponent(entity, 'position', positionComponent);
const spriteComponent = new SpriteComponent();
spriteComponent.width = BRICK_WIDTH;
spriteComponent.height = BRICK_HEIGHT;
spriteComponent.opacity = 0.5;
spriteComponent.spriteClass = '';
spriteComponent.depth = 80;
this.ecsManager.addComponent(entity, 'sprite', spriteComponent);
return entity;
}
_isEntityUnderMouse(entity, mousePosition) {
const positionComponent = this.ecsManager.getComponent(entity, 'position');
const entityCoords = positionComponent.coords;
return (entityCoords.x <= mousePosition.x) &&
(entityCoords.y <= mousePosition.y) &&
(entityCoords.x + BRICK_WIDTH > mousePosition.x) &&
(entityCoords.y + BRICK_HEIGHT > mousePosition.y);
}
_cancelDisplacement() {
this.selectedEntity = ECS_INVALID_ENTITY;
this._resetDisplacedBricks();
const spriteComponent = this.ecsManager.getComponent(this.ghostBrick, 'sprite');
spriteComponent.spriteClass = '';
}
_resetDisplacedBricks() {
for (let entity of this.ecsManager.search(this.displacementQuery)) {
const displacementComponent = this.ecsManager.getComponent(entity, 'displacement');
displacementComponent.active = false;
const spriteComponent = this.ecsManager.getComponent(entity, 'sprite');
spriteComponent.opacity = 1.0;
spriteComponent.depth = 50;
}
}
_displaceBricksHorizontally(row, column, horizontalDistance) {
const minColumn = this._findFurthestHorizontally(row, column, -1);
const maxColumn = this._findFurthestHorizontally(row, column, +1);
const displaceAmount = new Vec2(horizontalDistance, 0.0);
if (horizontalDistance > 0) {
const ghostPositionComponent = this.ecsManager.getComponent(this.ghostBrick, 'position');
ghostPositionComponent.coords.setTo(this.board.calculateBrickIdealScreenPosition(row, minColumn - 1));
ghostPositionComponent.coords.inc(displaceAmount);
const brick = this.board.getBrick(row, maxColumn);
const brickSpriteComponent = this.ecsManager.getComponent(brick, 'sprite');
const ghostSpriteComponent = this.ecsManager.getComponent(this.ghostBrick, 'sprite');
ghostSpriteComponent.spriteClass = brickSpriteComponent.spriteClass;
} else {
const ghostPositionComponent = this.ecsManager.getComponent(this.ghostBrick, 'position');
ghostPositionComponent.coords.setTo(this.board.calculateBrickIdealScreenPosition(row, maxColumn + 1));
ghostPositionComponent.coords.inc(displaceAmount);
const brick = this.board.getBrick(row, minColumn);
const brickSpriteComponent = this.ecsManager.getComponent(brick, 'sprite');
const ghostSpriteComponent = this.ecsManager.getComponent(this.ghostBrick, 'sprite');
ghostSpriteComponent.spriteClass = brickSpriteComponent.spriteClass;
}
for (let c = minColumn; c <= maxColumn; c++) {
this._displaceBrick(row, c, displaceAmount);
}
}
_displaceBricksVertically(row, column, verticalDistance) {
const minRow = this._findFurthestVertically(row, column, -1);
const maxRow = this._findFurthestVertically(row, column, +1);
const displaceAmount = new Vec2(0.0, verticalDistance);
if (verticalDistance > 0) {
const ghostPositionComponent = this.ecsManager.getComponent(this.ghostBrick, 'position');
ghostPositionComponent.coords.setTo(this.board.calculateBrickIdealScreenPosition(minRow - 1, column));
ghostPositionComponent.coords.inc(displaceAmount);
const brick = this.board.getBrick(maxRow, column);
const brickSpriteComponent = this.ecsManager.getComponent(brick, 'sprite');
const ghostSpriteComponent = this.ecsManager.getComponent(this.ghostBrick, 'sprite');
ghostSpriteComponent.spriteClass = brickSpriteComponent.spriteClass;
} else {
const ghostPositionComponent = this.ecsManager.getComponent(this.ghostBrick, 'position');
ghostPositionComponent.coords.setTo(this.board.calculateBrickIdealScreenPosition(maxRow + 1, column));
ghostPositionComponent.coords.inc(displaceAmount);
const brick = this.board.getBrick(minRow, column);
const brickSpriteComponent = this.ecsManager.getComponent(brick, 'sprite');
const ghostSpriteComponent = this.ecsManager.getComponent(this.ghostBrick, 'sprite');
ghostSpriteComponent.spriteClass = brickSpriteComponent.spriteClass;
}
for (let r = minRow; r <= maxRow; r++) {
this._displaceBrick(r, column, displaceAmount);
}
}
_displaceBrick(row, column, amount) {
const entity = this.board.getBrick(row, column);
if (entity === ECS_INVALID_ENTITY) {
return;
}
const displacementComponent = this.ecsManager.getComponent(entity, 'displacement');
displacementComponent.active = true;
displacementComponent.amount.setTo(amount);
const spriteComponent = this.ecsManager.getComponent(entity, 'sprite');
spriteComponent.opacity = 0.5;
spriteComponent.depth = 80;
}
_shiftBricksHorizontally(row, column, direction) {
const minColumn = this._findFurthestHorizontally(row, column, -1);
const maxColumn = this._findFurthestHorizontally(row, column, +1);
if (minColumn === maxColumn) {
return false;
}
if (direction < 0) {
for (let c = minColumn; c < maxColumn; c++) {
this.facade.swapBricks(row, c, row, c + 1);
}
} else {
for (let c = maxColumn; c > minColumn; c--) {
this.facade.swapBricks(row, c, row, c - 1);
}
}
return true;
}
_shiftBricksVertically(row, column, direction) {
const minRow = this._findFurthestVertically(row, column, -1);
const maxRow = this._findFurthestVertically(row, column, +1);
if (minRow === maxRow) {
return false;
}
if (direction < 0) {
for (let r = minRow; r < maxRow; r++) {
this.facade.swapBricks(r, column, r + 1, column);
}
} else {
for (let r = maxRow; r > minRow; r--) {
this.facade.swapBricks(r, column, r - 1, column);
}
}
return true;
}
_findFurthestHorizontally(row, column, direction) {
let entity = this.board.getBrick(row, column + direction);
while (this.ecsManager.hasEntity(entity) && this.ecsManager.hasComponent(entity, 'displacement')) {
column += direction;
entity = this.board.getBrick(row, column + direction);
}
return column;
}
_findFurthestVertically(row, column, direction) {
let entity = this.board.getBrick(row + direction, column);
while (this.ecsManager.hasEntity(entity) && this.ecsManager.hasComponent(entity, 'displacement')) {
row += direction;
entity = this.board.getBrick(row + direction, column);
}
return row;
}
}
|
#########################
#### Common ENV File ####
#########################
# Author: Dylan Thomas
# [NOTE]: This file should be POSIX compliant
# Common environment variables to be set in all shells
# Guard to prevent sourcing multiple times
[ -n "$_dt_shared_env_loaded" ] && return 0
############################
### XDG Base Directories ###
############################
[ -z "$XDG_CONFIG_HOME" ] && export XDG_CONFIG_HOME="$HOME/.config"
[ -z "$XDG_CACHE_HOME" ] && export XDG_CACHE_HOME="$HOME/.cache"
[ -z "$XDG_DATA_HOME" ] && export XDG_DATA_HOME="$HOME/.local/share"
#####################
### History Files ###
#####################
# Change history directory for less, redis, and sqlite
export LESSHISTFILE="$XDG_CACHE_HOME/less/history"
export SQLITE_HISTORY="$XDG_CACHE_HOME/sqlite/history"
export REDISCLI_HISTFILE="$XDG_CACHE_HOME/redis/history"
##############
### Python ###
##############
# Cache pip-installed packages to avoid re-downloading
export PIP_DOWNLOAD_CACHE="$XDG_CACHE_HOME/pip"
# Python startup file
export PYTHONSTARTUP="$XDG_CONFIG_HOME/python/pythonrc.py"
# Make Python use UTF-8 encoding for output to stdin, stdout, and stderr.
export PYTHONIOENCODING='UTF-8';
# Set conda config file location
export CONDARC="$XDG_CONFIG_HOME/conda/condarc"
# Set IPython directory
export IPYTHONDIR="$XDG_CONFIG_HOME/ipython"
# Set pylint home
export PYLINTHOME="$XDG_CACHE_HOME/pylint"
# Set jupyter config home
export JUPYTER_CONFIG_DIR="$XDG_CONFIG_HOME/jupyter"
#############
### C/C++ ###
#############
############
### Rust ###
############
# Rust-specific settings
export CARGO_HOME="$XDG_DATA_HOME/cargo"
export RUSTUP_HOME="$XDG_DATA_HOME/rustup"
##########
### Go ###
##########
# Go-specific settings
export GOPATH="$XDG_DATA_HOME/go"
export GOBIN="$HOME/.local/bin"
#####################
### Miscellaneous ###
#####################
# Preferred editor for local and remote sessions
export EDITOR=vim
export VISUAL=vim
# Set the readline config file
export INPUTRC="$XDG_CONFIG_HOME/readline/inputrc"
# Path to manpages
export MANPATH="/usr/local/man:$MANPATH"
# Colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# You may need to manually set your language environment
export LANG=en_US.UTF-8
export LC_ALL='en_US.UTF-8';
###########################
### System/Library Path ###
###########################
# Save local bin and lib directories
export LOCAL_BIN="$HOME/.local/bin"
export LOCAL_LIB="$HOME/.local/lib"
## Path Manipulation. Allows adding to paths in POSIX shell without duplication.
## See: https://unix.stackexchange.com/a/124447
pathprepend() { case ":${PATH:=$1}:" in *:"$1":*) ;; *) PATH="$1:$PATH" ;; esac; }
pathappend() { case ":${PATH:=$1}:" in *:"$1":*) ;; *) PATH="$PATH:$1" ;; esac; }
# Add LOCAL_BIN to PATH if exists
[ -d "$LOCAL_BIN" ] && pathprepend "$LOCAL_BIN"
# Set lib PATH, and add LOCAL_LIB if it exists
[ -z "$LD_LIBRARY_PATH" ] && export LD_LIBRARY_PATH="/usr/local/lib:/usr/lib"
case ":${LD_LIBRARY_PATH:=$LOCAL_LIB}:" in
*:"$LOCAL_LIB":*) ;;
*) LD_LIBRARY_PATH="$LOCAL_LIB:$LD_LIBRARY_PATH" ;;
esac
# Guard to prevent sourcing multiple times
_dt_shared_env_loaded=y
|
<filename>raigad/src/main/java/com/netflix/raigad/monitoring/ThreadPoolStatsMonitor.java
/**
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.raigad.monitoring;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.raigad.configuration.IConfiguration;
import com.netflix.raigad.scheduler.SimpleTimer;
import com.netflix.raigad.scheduler.Task;
import com.netflix.raigad.scheduler.TaskTimer;
import com.netflix.raigad.utils.ESTransportClient;
import com.netflix.raigad.utils.ElasticsearchProcessMonitor;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Monitors;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.threadpool.ThreadPoolStats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicReference;
@Singleton
public class ThreadPoolStatsMonitor extends Task
{
private static final Logger logger = LoggerFactory.getLogger(ThreadPoolStatsMonitor.class);
public static final String METRIC_NAME = "Elasticsearch_ThreadPoolMonitor";
private final Elasticsearch_ThreadPoolStatsReporter tpStatsReporter;
@Inject
public ThreadPoolStatsMonitor(IConfiguration config)
{
super(config);
tpStatsReporter = new Elasticsearch_ThreadPoolStatsReporter();
Monitors.registerObject(tpStatsReporter);
}
@Override
public void execute() throws Exception {
// If Elasticsearch is started then only start the monitoring
if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) {
String exceptionMsg = "Elasticsearch is not yet started, check back again later";
logger.info(exceptionMsg);
return;
}
ThreadPoolStatsBean tpStatsBean = new ThreadPoolStatsBean();
try
{
NodesStatsResponse ndsStatsResponse = ESTransportClient.getNodesStatsResponse(config);
ThreadPoolStats tpstats = null;
NodeStats ndStat = null;
if (ndsStatsResponse.getNodes().length > 0) {
ndStat = ndsStatsResponse.getAt(0);
}
if (ndStat == null) {
logger.info("NodeStats is null,hence returning (No ThreadPoolStats).");
return;
}
tpstats = ndStat.getThreadPool();
if (tpstats == null) {
logger.info("ThreadPoolStats is null,hence returning (No ThreadPoolStats).");
return;
}
Iterator<ThreadPoolStats.Stats> iter = tpstats.iterator();
while( iter.hasNext() ) {
ThreadPoolStats.Stats stat = iter.next();
if( stat.getName().equals("index") ) {
tpStatsBean.indexThreads = stat.getThreads();
tpStatsBean.indexQueue = stat.getQueue();
tpStatsBean.indexActive = stat.getActive();
tpStatsBean.indexRejected = stat.getRejected();
tpStatsBean.indexLargest = stat.getLargest();
tpStatsBean.indexCompleted = stat.getCompleted();
}
else if( stat.getName().equals("get") ) {
tpStatsBean.getThreads = stat.getThreads();
tpStatsBean.getQueue = stat.getQueue();
tpStatsBean.getActive = stat.getActive();
tpStatsBean.getRejected = stat.getRejected();
tpStatsBean.getLargest = stat.getLargest();
tpStatsBean.getCompleted = stat.getCompleted();
}
else if( stat.getName().equals("search") ) {
tpStatsBean.searchThreads = stat.getThreads();
tpStatsBean.searchQueue = stat.getQueue();
tpStatsBean.searchActive = stat.getActive();
tpStatsBean.searchRejected = stat.getRejected();
tpStatsBean.searchLargest = stat.getLargest();
tpStatsBean.searchCompleted = stat.getCompleted();
}
else if( stat.getName().equals("bulk") ) {
tpStatsBean.bulkThreads = stat.getThreads();
tpStatsBean.bulkQueue = stat.getQueue();
tpStatsBean.bulkActive = stat.getActive();
tpStatsBean.bulkRejected = stat.getRejected();
tpStatsBean.bulkLargest = stat.getLargest();
tpStatsBean.bulkCompleted = stat.getCompleted();
}
}
}
catch(Exception e)
{
logger.warn("failed to load Thread Pool stats data", e);
}
tpStatsReporter.threadPoolBean.set(tpStatsBean);
}
public class Elasticsearch_ThreadPoolStatsReporter
{
private final AtomicReference<ThreadPoolStatsBean> threadPoolBean;
public Elasticsearch_ThreadPoolStatsReporter()
{
threadPoolBean = new AtomicReference<ThreadPoolStatsBean>(new ThreadPoolStatsBean());
}
@Monitor(name="IndexThreads", type=DataSourceType.GAUGE)
public long getIndexThreads()
{
return threadPoolBean.get().indexThreads;
}
@Monitor(name="IndexQueue", type=DataSourceType.GAUGE)
public long getIndexQueue()
{
return threadPoolBean.get().indexQueue;
}
@Monitor(name="indexActive", type=DataSourceType.GAUGE)
public long getIndexActive()
{
return threadPoolBean.get().indexActive;
}
@Monitor(name="indexRejected", type=DataSourceType.COUNTER)
public long getIndexRejected()
{
return threadPoolBean.get().indexRejected;
}
@Monitor(name="indexLargest", type=DataSourceType.GAUGE)
public long getIndexLargest()
{
return threadPoolBean.get().indexLargest;
}
@Monitor(name="indexCompleted", type=DataSourceType.COUNTER)
public long getIndexCompleted()
{
return threadPoolBean.get().indexCompleted;
}
@Monitor(name="getThreads", type=DataSourceType.GAUGE)
public long getGetThreads()
{
return threadPoolBean.get().getThreads;
}
@Monitor(name="getQueue", type=DataSourceType.GAUGE)
public long getGetQueue()
{
return threadPoolBean.get().getQueue;
}
@Monitor(name="getActive", type=DataSourceType.GAUGE)
public long getGetActive()
{
return threadPoolBean.get().getActive;
}
@Monitor(name="getRejected", type=DataSourceType.COUNTER)
public long getGetRejected()
{
return threadPoolBean.get().getRejected;
}
@Monitor(name="getLargest", type=DataSourceType.GAUGE)
public long getGetLargest()
{
return threadPoolBean.get().getLargest;
}
@Monitor(name="getCompleted", type=DataSourceType.COUNTER)
public long getGetCompleted()
{
return threadPoolBean.get().getCompleted;
}
@Monitor(name="searchThreads", type=DataSourceType.GAUGE)
public long getSearchThreads()
{
return threadPoolBean.get().searchThreads;
}
@Monitor(name="searchQueue", type=DataSourceType.GAUGE)
public long getSearchQueue()
{
return threadPoolBean.get().searchQueue;
}
@Monitor(name="searchActive", type=DataSourceType.GAUGE)
public long getSearchActive()
{
return threadPoolBean.get().searchActive;
}
@Monitor(name="searchRejected", type=DataSourceType.COUNTER)
public long getSearchRejected()
{
return threadPoolBean.get().searchRejected;
}
@Monitor(name="searchLargest", type=DataSourceType.GAUGE)
public long getSearchLargest()
{
return threadPoolBean.get().searchLargest;
}
@Monitor(name="searchCompleted", type=DataSourceType.COUNTER)
public long getSearchCompleted()
{
return threadPoolBean.get().searchCompleted;
}
@Monitor(name="bulkThreads", type=DataSourceType.GAUGE)
public long getBulkThreads()
{
return threadPoolBean.get().bulkThreads;
}
@Monitor(name="bulkQueue", type=DataSourceType.GAUGE)
public long getBulkQueue()
{
return threadPoolBean.get().bulkQueue;
}
@Monitor(name="bulkActive", type=DataSourceType.GAUGE)
public long getBulkActive()
{
return threadPoolBean.get().bulkActive;
}
@Monitor(name="bulkRejected", type=DataSourceType.COUNTER)
public long getBulkRejected()
{
return threadPoolBean.get().bulkRejected;
}
@Monitor(name="bulkLargest", type=DataSourceType.GAUGE)
public long getBulkLargest()
{
return threadPoolBean.get().bulkLargest;
}
@Monitor(name="bulkCompleted", type=DataSourceType.COUNTER)
public long getBulkCompleted()
{
return threadPoolBean.get().bulkCompleted;
}
}
private static class ThreadPoolStatsBean
{
private long indexThreads;
private long indexQueue;
private long indexActive;
private long indexRejected;
private long indexLargest;
private long indexCompleted;
private long getThreads;
private long getQueue;
private long getActive;
private long getRejected;
private long getLargest;
private long getCompleted;
private long searchThreads;
private long searchQueue;
private long searchActive;
private long searchRejected;
private long searchLargest;
private long searchCompleted;
private long bulkThreads;
private long bulkQueue;
private long bulkActive;
private long bulkRejected;
private long bulkLargest;
private long bulkCompleted;
}
public static TaskTimer getTimer(String name)
{
return new SimpleTimer(name, 60 * 1000);
}
@Override
public String getName()
{
return METRIC_NAME;
}
}
|
#!/bin/sh
PORT=/dev/ttyUSB0
export MPYCROSS=`realpath ../../../micropython/mpy-cross/mpy-cross`
# switch to test sources
cd ../src
# create test source directories on board
find testing -type d | \
grep -v -E "(^./.git.*|^./.idea|^./.vscode|__pycache__)" | \
xargs -n1 -I {} sh -c "echo Creating directory {} ...; ampy --port ${PORT} mkdir --exists-okay {}"
# compile source .py files to .mpy
find . -type f -name '*.py' | \
xargs -n1 -I {} sh -c "echo compiling {} ...; ${MPYCROSS} {}"
# upload bytecode .mpy files
find . -type f -name '*.mpy' | \
xargs -n1 -I {} sh -c "echo uploading {} ...; ampy --port ${PORT} put {} {}"
#switch to test libraries
cd ../libraries/
# Compile adafruit libraries to bytecode and upload
for SUBMODULE in `find . -mindepth 1 -maxdepth 1 -type d `
do
cd ${SUBMODULE}
# create adafruit library directories on board
find . -mindepth 1 -type d | \
grep -v -E "(^./.git.*|__pycache__|^./doc.*|^./example.*)" | \
xargs -n1 -I {} sh -c "echo Creating directory {} ...; ampy --port ${PORT} mkdir --exists-okay {}"
# compile adafruit library .py files to .mpy
find . -type f -name '*.py' | \
grep -v -E "(^./conf.py|^./docs/conf.py|^./setup.py|^./example.*)" | \
xargs -n1 -I {} sh -c "echo compiling {} ...; ${MPYCROSS} {}"
# upload adafruit library .mpy files
find . -type f -name '*.mpy' | \
xargs -n1 -I {} sh -c "echo uploading {} ...; ampy --port ${PORT} put {} {}"
cd ../
done
# switch to adafruit_blinka source
cd ../../src
find . -mindepth 1 -type d | \
grep -v -E "(^./.git.*|__pycache__)" | \
xargs -n1 -I {} sh -c "echo Creating directory {} ...; ampy --port ${PORT} mkdir --exists-okay {}"
# compile adafruit blinka .py files to .mpy
find . -type f -name '*.py' | \
xargs -n1 -I {} sh -c "echo compiling {} ...; ${MPYCROSS} {}"
# upload adafruit blinka .mpy files
find . -type f -name '*.mpy' | \
xargs -n1 -I {} sh -c "echo uploading {} ...; ampy --port ${PORT} put {} {}" |
#!/usr/bin/env bash
# nbdkit
# Copyright (C) 2017-2020 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Red Hat nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# Since HAVE_PLUGINS is defined for this test, and since example1
# should always be compiled (as it's the simplest possible example
# with no dependencies), this is a fail-safe in case other basic tests
# don't work properly. We don't check the output since we are mainly
# interested in whether something crashes.
source ./functions.sh
set -e
set -x
nbdkit example1 --dump-plugin
|
package org.usfirst.frc.team2706.robot.commands;
import org.usfirst.frc.team2706.robot.Robot;
/**
* Sets the lift to a specified height after the delay has passed and waits until it arrives to
* complete
*/
public class SetLiftHeightBlockingAfterTime extends SetLiftHeightUntilCancelled {
private final int minDoneCycles;
private final double error;
private int currentCycles = 0;
private final long timeMs;
private long currentMs;
/**
* Sets the lift to a specified height after the delay has passed and waits until it arrives to
* complete
*
* @param height The height to set the lift to
* @param minDoneCycles The number of cycles that the lift is within the minimum error to
* complete
* @param error The maximum acceptable distance from the setpoint
* @param timeMs The time in milliseconds to delay setting the lift height
*/
public SetLiftHeightBlockingAfterTime(double height, int minDoneCycles, double error,
long timeMs) {
super(height);
this.timeMs = timeMs;
this.minDoneCycles = minDoneCycles;
this.error = error;
}
@Override
public void initialize() {
currentMs = System.currentTimeMillis();
currentCycles = 0;
}
@Override
public void execute() {
// XXX: Command will be initialized multiple times
if (System.currentTimeMillis() - currentMs >= timeMs) {
// Delay has passed so initialize object11
super.initialize();
}
}
@Override
public boolean isFinished() {
// Wait until command has started to check if the lift is at its destination
return !(System.currentTimeMillis() - currentMs < timeMs) &&
// Then check to see if the lift is in the correct position and for the minimum number of ticks
Math.abs(Robot.lift.getPID().getSetpoint() - Robot.lift.getEncoderHeight()) < error
&& ++currentCycles >= minDoneCycles;
}
}
|
from typing import List
def resolve_symbols(instance: dict, prefixes: List[str]) -> dict:
resolved_instance = instance.copy() # Create a copy to avoid modifying the original instance
for key, value in resolved_instance.items():
if isinstance(value, str): # Process only string values
for prefix in prefixes:
prefix_key = prefix + key
if prefix_key in value:
resolved_value = value.replace(prefix_key, str(instance.get(key, "")))
resolved_instance[key] = resolved_value
return resolved_instance |
const path = require('path');
const { createFilePath } = require(`gatsby-source-filesystem`);
// Generate slug field for all markdown files
exports.onCreateNode = ({ node, getNode, actions }) => {
const { createNodeField } = actions;
if (node.internal.type === `MarkdownRemark`) {
const slug = createFilePath({ node, getNode, basePath: `content` });
createNodeField({
node,
name: `slug`,
value: slug
});
}
};
// Create pages from markdown files
exports.createPages = ({ graphql, actions }) => {
const { createPage } = actions;
return new Promise((resolve, reject) => {
resolve(
graphql(
`
query {
products: allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "content/products\/.*/" } }
sort: { fields: [frontmatter___date], order: DESC }
) {
edges {
node {
id
excerpt
frontmatter {
title
date(formatString: "DD MMMM YYYY")
}
fields {
slug
}
}
}
}
services: allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "content/services\/.*/" } }
sort: { fields: [frontmatter___date], order: DESC }
) {
edges {
node {
id
excerpt
frontmatter {
title
date(formatString: "DD MMMM YYYY")
}
fields {
slug
}
}
}
}
team: allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "content/team\/.*/" } }
sort: { fields: [frontmatter___date], order: DESC }
) {
edges {
node {
id
excerpt
frontmatter {
title
promoted
image
date(formatString: "DD MMMM YYYY")
}
fields {
slug
}
}
}
}
basic: allMarkdownRemark(
filter: { fileAbsolutePath: { regex: "content/basic\/.*/" } }
) {
edges {
node {
id
excerpt
html
frontmatter {
title
path
template
}
fields {
slug
}
}
}
}
}
`,
).then(result => {
result.data.products.edges.forEach(({ node }) => {
const component = path.resolve('src/templates/product.js');
createPage({
path: node.frontmatter.path ? node.frontmatter.path : node.fields.slug,
component,
context: {
id: node.id
}
});
});
result.data.services.edges.forEach(({ node }) => {
const component = path.resolve('src/templates/service.js');
createPage({
path: node.frontmatter.path ? node.frontmatter.path : node.fields.slug,
component,
context: {
id: node.id
}
});
});
result.data.team.edges.forEach(({ node }) => {
const component = path.resolve('src/templates/team.js');
createPage({
path: node.frontmatter.path ? node.frontmatter.path : node.fields.slug,
component,
context: {
id: node.id
}
});
});
result.data.basic.edges.forEach(({ node }) => {
let component = path.resolve('src/templates/basic.js');
if (node.frontmatter.template) {
component = path.resolve(`src/templates/${node.frontmatter.template}.js`);
}
createPage({
path: node.frontmatter.path ? node.frontmatter.path : node.fields.slug,
component,
context: {
id: node.id
}
});
});
resolve();
}),
);
});
};
|
<reponame>luisventurae/genepass<filename>src/lib/permuter.ts<gh_stars>1-10
/**
* Text to permute charts
* @param {String} word Word
* @returns {String}
*/
const _shuffle_ = (word: string): string => {
const _charts = word.split("");
let _currentIndex = _charts.length,
_temporaryValue,
_randomIndex;
// While there remain elements to shuffle...
while (0 !== _currentIndex) {
// Pick a remaining element...
_randomIndex = Math.floor(Math.random() * _currentIndex);
_currentIndex -= 1;
// And swap it with the current element.
_temporaryValue = _charts[_currentIndex];
_charts[_currentIndex] = _charts[_randomIndex];
_charts[_randomIndex] = _temporaryValue;
}
return _charts.join("");
};
export { _shuffle_ };
|
<reponame>bradmccoydev/keptn
package main
import (
"os"
"testing"
"time"
)
func Test_getDurationFromEnvVar(t *testing.T) {
type args struct {
envVarValue string
}
tests := []struct {
name string
args args
want time.Duration
}{
{
name: "get default value",
args: args{
envVarValue: "",
},
want: 432000 * time.Second,
},
{
name: "get configured value",
args: args{
envVarValue: "10s",
},
want: 10 * time.Second,
},
{
name: "get configured value",
args: args{
envVarValue: "2m",
},
want: 120 * time.Second,
},
{
name: "get configured value",
args: args{
envVarValue: "1h30m",
},
want: 5400 * time.Second,
},
{
name: "get default value because of invalid config",
args: args{
envVarValue: "invalid",
},
want: 432000 * time.Second,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
os.Setenv("LOG_TTL", tt.args.envVarValue)
if got := getDurationFromEnvVar("LOG_TTL", envVarLogsTTLDefault); got != tt.want {
t.Errorf("getLogTTLDurationInSeconds() = %v, want %v", got, tt.want)
}
})
}
}
|
from typing import List
class BaseCommand:
pass # Placeholder for the BaseCommand class
class Extension:
def __init__(self, name: str):
self.name = name
self._commands = []
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str) -> None:
self._name = value
@property
def commands(self) -> List["BaseCommand"]:
"""Get the commands from this Extension."""
return self._commands
def add_command(self, command: "BaseCommand") -> None:
"""Add a command to the Extension."""
self._commands.append(command)
def remove_command(self, command_name: str) -> None:
"""Remove a command from the Extension based on the command name."""
self._commands = [cmd for cmd in self._commands if cmd.name != command_name]
def get_command_names(self) -> List[str]:
"""Get a list of names of all commands associated with the Extension."""
return [cmd.name for cmd in self._commands] |
printf "%-3s|%-6s|%s\n" N SUM AVG
FACT=1
for i in {1..6}
do
FACT=$((FACT*i))
INF=`cat l${i}.txt | grep -e "^%" | cut -d " " -f 2 | awk '{s+=$1} END {print s}'`
AVG=$((INF/FACT))
printf "%3d|%6d|%d\n" $i $INF $AVG
done |
<reponame>mikitamironenka/job4j
package ru.job4j.sorting;
import java.util.Comparator;
//Вам нужно реализовать компаратор для сравнения двух массивов символов.
//Необходимо реализовать поэлементное сравнение двух списков, т.е. сравниваем элементы двух списков,
// находящихся на одних и тех же позициях (под одним и тем же индексом).
// Сравнение в лексикографическом порядке.
//В этом задании нельзя использовать метод String.compareTo.
//Вы можете использовать
//String.charAt(int index)
//Integer.compare(int left, int right),
//Character.compare(char left, char right);
public class StringCompare implements Comparator<String> {
@Override
public int compare(String left, String right) {
char[] leftChars = left.toCharArray();
char[] rightChars = right.toCharArray();
int result = 0;
int length = leftChars.length < rightChars.length ? leftChars.length : rightChars.length;
for (int i = 0; i < length; i++) {
int temp = Character.compare(leftChars[i], rightChars[i]);
if (temp != 0) {
result = temp;
break;
}
}
if (result == 0) {
result = Integer.compare(leftChars.length, rightChars.length);
}
return result;
}
}
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package com.microsoft.accessibilityinsightsforandroidservice;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import android.view.WindowManager;
import android.view.accessibility.AccessibilityEvent;
import android.view.accessibility.AccessibilityNodeInfo;
import java.util.Date;
import java.util.function.Consumer;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class FocusVisualizerControllerTest {
@Mock FocusVisualizer focusVisualizerMock;
@Mock FocusVisualizationStateManager focusVisualizationStateManagerMock;
@Mock AccessibilityEvent accessibilityEventMock;
@Mock UIThreadRunner uiThreadRunner;
@Mock WindowManager windowManager;
@Mock LayoutParamGenerator layoutParamGenerator;
@Mock FocusVisualizationCanvas focusVisualizationCanvas;
@Mock WindowManager.LayoutParams layoutParams;
@Mock AccessibilityNodeInfo accessibilityNodeInfo;
@Mock DateProvider dateProvider;
@Mock Date oldDateMock;
@Mock Date newDateMock;
Consumer<Boolean> listener;
FocusVisualizerController testSubject;
@Before
public void prepare() {
when(layoutParamGenerator.get()).thenReturn(layoutParams);
listener = null;
when(dateProvider.get()).thenReturn(oldDateMock);
testSubject =
new FocusVisualizerController(
focusVisualizerMock,
focusVisualizationStateManagerMock,
uiThreadRunner,
windowManager,
layoutParamGenerator,
focusVisualizationCanvas,
dateProvider);
}
@Test
public void exists() {
Assert.assertNotNull(testSubject);
}
@Test
public void onFocusEventDoesNotCallVisualizerIfStateIsFalse() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(false);
testSubject.onFocusEvent(accessibilityEventMock);
verify(focusVisualizerMock, times(0)).addNewFocusedElement(accessibilityNodeInfo);
}
@Test
public void onFocusEventDoesNotCallVisualizerIfOrientationChangedRecently() {
reset(dateProvider);
when(dateProvider.get()).thenReturn(newDateMock);
when(focusVisualizationStateManagerMock.getState()).thenReturn(true);
when(oldDateMock.getTime()).thenReturn((long) 500);
when(newDateMock.getTime()).thenReturn((long) 501);
testSubject.onFocusEvent(accessibilityEventMock);
verify(focusVisualizerMock, times(0)).addNewFocusedElement(accessibilityNodeInfo);
}
@Test
public void onFocusEventCallsVisualizerIfStateIsTrueAndOrientationHasNotChangedRecently()
throws Exception {
reset(dateProvider);
when(dateProvider.get()).thenReturn(newDateMock);
when(focusVisualizationStateManagerMock.getState()).thenReturn(true);
when(accessibilityEventMock.getSource()).thenReturn(accessibilityNodeInfo);
when(oldDateMock.getTime()).thenReturn((long) 500);
when(newDateMock.getTime()).thenReturn((long) 10000);
testSubject.onFocusEvent(accessibilityEventMock);
verify(focusVisualizerMock, times(1)).addNewFocusedElement(accessibilityNodeInfo);
}
@Test
public void onRedrawEventDoesNotCallVisualizerIfStateIsFalse() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(false);
testSubject.onRedrawEvent(accessibilityEventMock);
verify(focusVisualizerMock, times(0)).refreshHighlights();
}
@Test
public void onRedrawEventCallsVisualizerIfStateIsTrue() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(true);
testSubject.onRedrawEvent(accessibilityEventMock);
verify(focusVisualizerMock, times(1)).refreshHighlights();
}
@Test
public void onAppChangeDoesNotCallVisualizerIfStateIsFalse() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(false);
testSubject.onAppChanged(accessibilityNodeInfo);
verify(focusVisualizerMock, times(0)).resetVisualizations();
}
@Test
public void onAppChangeDoesCallVisualizerIfStateIsTrue() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(true);
testSubject.onAppChanged(accessibilityNodeInfo);
verify(focusVisualizerMock, times(1)).resetVisualizations();
}
@Test
public void onOrientationChangeDoesNothingIfStateIsFalse() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(false);
testSubject.onOrientationChanged(0);
verify(focusVisualizerMock, times(0)).resetVisualizations();
verify(windowManager, times(0)).updateViewLayout(focusVisualizationCanvas, layoutParams);
}
@Test
public void onOrientationChangeUpdatesVisualizationAsNecessaryIfStateIsTrue() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(true);
testSubject.onOrientationChanged(0);
verify(focusVisualizerMock, times(1)).resetVisualizations();
verify(windowManager, times(1)).updateViewLayout(focusVisualizationCanvas, layoutParams);
}
@Test
public void onFocusVisualizationStateChangeToEnabledAddsVisualization() {
doAnswer(
invocation -> {
listener = invocation.getArgument(0);
listener.accept(true);
return null;
})
.when(focusVisualizationStateManagerMock)
.subscribe(any());
doAnswer(
invocation -> {
Runnable runnable = invocation.getArgument(0);
runnable.run();
return null;
})
.when(uiThreadRunner)
.run(any());
testSubject =
new FocusVisualizerController(
focusVisualizerMock,
focusVisualizationStateManagerMock,
uiThreadRunner,
windowManager,
layoutParamGenerator,
focusVisualizationCanvas,
dateProvider);
verify(windowManager).addView(focusVisualizationCanvas, layoutParams);
}
@Test
public void onFocusVisualizationStateChangeToEnabledAddsVisualizationWithLastEventSource() {
when(focusVisualizationStateManagerMock.getState()).thenReturn(false);
when(accessibilityEventMock.getSource()).thenReturn(accessibilityNodeInfo);
doAnswer(
invocation -> {
listener = invocation.getArgument(0);
return null;
})
.when(focusVisualizationStateManagerMock)
.subscribe(any());
doAnswer(
invocation -> {
Runnable runnable = invocation.getArgument(0);
runnable.run();
return null;
})
.when(uiThreadRunner)
.run(any());
testSubject =
new FocusVisualizerController(
focusVisualizerMock,
focusVisualizationStateManagerMock,
uiThreadRunner,
windowManager,
layoutParamGenerator,
focusVisualizationCanvas,
dateProvider);
testSubject.onFocusEvent(accessibilityEventMock);
listener.accept(true);
verify(windowManager).addView(focusVisualizationCanvas, layoutParams);
verify(focusVisualizerMock).addNewFocusedElement(accessibilityNodeInfo);
}
@Test
public void onFocusVisualizationStateChangToDisabledRemovesVisualizations() {
doAnswer(
invocation -> {
listener = invocation.getArgument(0);
listener.accept(false);
return null;
})
.when(focusVisualizationStateManagerMock)
.subscribe(any());
doAnswer(
invocation -> {
Runnable runnable = invocation.getArgument(0);
runnable.run();
return null;
})
.when(uiThreadRunner)
.run(any());
testSubject =
new FocusVisualizerController(
focusVisualizerMock,
focusVisualizationStateManagerMock,
uiThreadRunner,
windowManager,
layoutParamGenerator,
focusVisualizationCanvas,
dateProvider);
verify(focusVisualizerMock).resetVisualizations();
}
}
|
export * from './crl.models';
export * from './dcc.models';
export * from './person.models';
export * from './recovery.models';
export * from './rule.models';
export * from './test.models';
export * from './vaccine.models';
export * from './validator.models';
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/YouTubePlayer-Swift/YouTubePlayer_Swift.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/YouTubePlayer-Swift/YouTubePlayer_Swift.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
CREATE TABLE products (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
price DECIMAL(10,2)NOT NULL
); |
int findIndex(int arr[], int num, int n)
{
// arr[] : the input array
// num : the element to be searched
// n : number of elements in arr[]
// Variables to store index of element to be searched
int i;
//Traverse through the array
for (i=0; i<n; i++)
{
// Return the index of element if the element is found
if (arr[i] == num)
return i;
}
// Return -1 if element is not found
return -1;
} |
#! /bin/bash
npm run deploy
|
set -e
set -u
#set -x
source ../util.sh
source ../file-util.sh
touch myfile.txt
if [ -e "myfile.txt" ]; then
echo "File myfile.txt exists"
else
echo "File myfile.txt does not exist"
fi
trap_add "rm -f myfile.txt" EXIT
declare temp_file_name
create_temp_file temp_file_name
echo "temp_file_name=${temp_file_name}"
if [ -e "${temp_file_name}" ]; then
echo "File ${temp_file_name} exists"
else
echo "File ${temp_file_name} does not exist"
fi
declare temp_file_name2
create_temp_file temp_file_name2
echo "temp_file_name2=${temp_file_name2}"
if [ -e "${temp_file_name2}" ]; then
echo "File ${temp_file_name2} exists"
else
echo "File ${temp_file_name2} does not exist"
fi
declare my_temp_dir
create_temp_dir my_temp_dir
echo "my_temp_dir=${my_temp_dir}"
if [ -d "${my_temp_dir}" ]; then
echo "Directory ${my_temp_dir} exists"
else
echo "Directory ${my_temp_dir} does not exist"
fi
# end with different results and verify file does not exist
# exit with error condition
# exit 1
# exit with success condition
exit 0
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_folder_special_twotone = void 0;
var ic_folder_special_twotone = {
"viewBox": "0 0 24 24",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0h24v24H0V0z",
"fill": "none"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M11.17 8l-2-2H4v12h16V8h-8.83zM15 9l1.19 2.79 3.03.26-2.3 1.99.69 2.96L15 15.47 12.39 17l.69-2.96-2.3-1.99 3.03-.26L15 9z",
"opacity": ".3"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M20 6h-8l-2-2H4c-1.1 0-2 .9-2 2v12c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V8c0-1.1-.9-2-2-2zm0 12H4V6h5.17l2 2H20v10zm-6.92-3.96L12.39 17 15 15.47 17.61 17l-.69-2.96 2.3-1.99-3.03-.26L15 9l-1.19 2.79-3.03.26z"
},
"children": []
}]
};
exports.ic_folder_special_twotone = ic_folder_special_twotone; |
// See
// https://nodejs.dev/accept-input-from-the-command-line-in-nodejs
// https://stackoverflow.com/a/18267308/6928824
// https://stackoverflow.com/a/16048083/6928824
const readline = require('readline')
const {KeyboardInterrupt} = require('./_errors')
module.exports = (prompt='', async=true) => {
process.stdin.setEncoding('utf8')
rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
})
if (async) {
return new Promise((resolve, reject) => {
try {
rl.question(prompt, answer => {
rl.close()
resolve(answer)
})
rl.on('SIGINT', () => {
rl.close()
reject(new KeyboardInterrupt())
})
}
catch (error) {
reject(error)
}
})
}
else {
let response
rl.question(prompt, answer => {
rl.close()
response = answer
})
let sigint = false
rl.on('SIGINT', () => {
rl.close()
sigint = true
})
let sleep
try {
sleep = require('system-sleep')
}
catch (error) {
sleep = () => {}
}
while (response === undefined) {
// (semi) busy waiting
if (sigint) {
throw new KeyboardInterrupt()
}
sleep(200)
}
return response
}
}
|
#!/bin/bash
#Petit script pour installer la bonne config pour Odoo
#zf190410.0730
#pour installer Docker et Docker compose sur une machine Ubuntu c'est ici:
#https://github.com/zuzu59/docker_demo
mkdir config
mkdir addons
cp odoo.conf config/
cd addons
# source: https://apps.odoo.com/apps/modules/12.0/om_account_accountant/
wget -O compta.zip https://apps.odoo.com/loempia/download/om_account_accountant/12.0.1.0.0/4x9rMIZVlNa6Ps0P6MdN94.zip?deps
unzip compta.zip
|
<reponame>mc18g13/teensy-drone<filename>test-cli/lib/MotorNoiseTest/MotorNoiseTest.h
#ifndef MOTOR_NOISE_TEST_HANDLER
#define MOTOR_NOISE_TEST_HANDLER
#include "MenuOptionHandler.h"
#include "Constants.h"
#include "MARG.h"
// #include "MotorControlManager.h"
// #include "MotorSignalController.h"
#include "DShotMotorController.h"
#include <arm_math.h>
class MotorNoiseTest : public MenuOptionHandler {
private:
long timer;
MARG* m_marg;
DShotMotorController m_motorController;
public:
MotorNoiseTest();
~MotorNoiseTest() {
delete m_marg;
}
virtual void setup() override;
virtual void printTitle() override;
float32_t m_speed = THROTTLE_MAP_START;
void runTestOnMotors(Vector (MARG::*sensor)());
void runAllMotorTest();
};
#endif |
const httpStatus = require('http-status');
const ConsulService = require('../services/consul.service');
const ConsulUtils = require('../utils/consul.utils');
/**
* Get keys
* @param req
* @param res
* @returns {*}
*/
exports.keys = (req, res) => {
const key = res.locals.key;
ConsulService.keys(key)
.then((keys) => {
keys = keys.map(key => ConsulUtils.stripKeySlashes(key))
.map(key => ConsulUtils.stripSvcRoot(key))
.filter(key => !!key)
.filter(key => !ConsulUtils.isSecretKey(key));
res.status(httpStatus.OK).json({
result: true,
data: keys
});
})
// if no keys, return empty array
.catch(() => res.status(httpStatus.NO_CONTENT).json({
result: true,
data: []
}));
};
/**
* Get key
* @param req
* @param res
* @param next
* @returns {*}
*/
exports.get = (req, res, next) => {
const key = res.locals.key;
// if secret key, don't
if (ConsulUtils.isSecretKey(key)) {
return next(new Error('Key can not start with a dot (.)'));
}
// if key is folder, set value to null,
// to avoid unexpected results
if (ConsulUtils.isLastSlash(key)) {
return next(new Error('Key can not end with a slash (/)'));
}
ConsulService.get(key)
.then((config) => {
res.status(httpStatus.OK).json({
result: true,
data: config
});
})
.catch(() => next(new Error('Error getting keys')));
};
/**
* Set key
* @param req
* @param res
* @param next
* @returns {*}
*/
exports.set = (req, res, next) => {
const key = res.locals.key;
let value = req.body.config;
// if no key, don't
if (ConsulUtils.isEmptyKey(key)) {
return next(new Error('Key is required'));
}
// if secret key, don't
if (ConsulUtils.isSecretKey(key)) {
return next(new Error('Key can not start with a dot (.)'));
}
// if key is folder, set value to null,
// to avoid unexpected results
if (ConsulUtils.isLastSlash(key) && value) {
value = null;
}
ConsulService.set(key, value)
.then(() => {
res.status(httpStatus.OK).json({
result: true,
data: true
});
res.locals.event = {
type: 'updated',
what: `config ${key}`,
project: res.locals.project
};
// next middleware: parse-consul-key
next();
})
.catch(() => next(new Error('Error setting key')));
};
/**
* Delete key
* @param req
* @param res
* @param next
* @returns {*}
*/
exports.del = (req, res, next) => {
const key = res.locals.key;
// if no key, don't
if (ConsulUtils.isEmptyKey(key)) {
return next(new Error('Key is required'));
}
// if secret key, don't
if (ConsulUtils.isSecretKey(key)) {
return next(new Error('Key can not start with a dot (.)'));
}
ConsulService.del(key)
.then(() => {
res.status(httpStatus.OK).json({
result: true,
data: true
});
res.locals.event = {
type: 'deleted',
what: `config ${key}`,
project: res.locals.project
};
// next middleware: parse-consul-key
next();
})
.catch(() => next(new Error('Error deleting key')));
};
|
class Cs_model extends CI_Model {
// Database table name for storing survey responses
private $surveyTable = 'survey_responses';
public function storeResponse($response) {
// Assume $response is an associative array containing survey response data
// Implement logic to store $response in the database table $surveyTable
// Example: $this->db->insert($this->surveyTable, $response);
}
public function getSurveyData() {
// Implement logic to retrieve all survey data from the database table $surveyTable
// Example: $query = $this->db->get($this->surveyTable);
// return $query->result_array();
}
public function calculateAverageScore() {
// Implement logic to calculate the average score from the survey responses
// Example: $query = $this->db->select_avg('score')->get($this->surveyTable);
// $result = $query->row_array();
// return $result['score'];
}
} |
package output
import (
"fmt"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/util/aws/session"
"github.com/Jeffail/benthos/v3/lib/util/retries"
"github.com/Jeffail/benthos/v3/lib/x/docs"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSQS] = TypeSpec{
constructor: NewAmazonSQS,
Summary: `
Sends messages to an SQS queue.`,
Description: `
Metadata values are sent along with the payload as attributes with the data type
String. If the number of metadata values in a message exceeds the message
attribute limit (10) then the top ten keys ordered alphabetically will be
selected.
The fields ` + "`message_group_id` and `message_deduplication_id`" + ` can be
set dynamically using
[function interpolations](/docs/configuration/interpolation#functions), which are
resolved individually for each message of a batch.
### Credentials
By default Benthos will use a shared credentials file when connecting to AWS
services. It's also possible to set them explicitly at the component level,
allowing you to transfer data across accounts. You can find out more
[in this document](/docs/guides/aws).`,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
return sanitiseWithBatch(conf.SQS, conf.SQS.Batching)
},
Async: true,
Batches: true,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("url", "The URL of the target SQS queue."),
docs.FieldCommon("message_group_id", "An optional group ID to set for messages.").SupportsInterpolation(false),
docs.FieldCommon("message_deduplication_id", "An optional deduplication ID to set for messages.").SupportsInterpolation(false),
docs.FieldCommon("max_in_flight", "The maximum number of messages to have in flight at a given time. Increase this to improve throughput."),
batch.FieldSpec(),
}.Merge(session.FieldSpecs()).Merge(retries.FieldSpecs()),
}
}
//------------------------------------------------------------------------------
// NewAmazonSQS creates a new AmazonSQS output type.
func NewAmazonSQS(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
s, err := writer.NewAmazonSQS(conf.SQS, log, stats)
if err != nil {
return nil, err
}
var w Type
if conf.SQS.MaxInFlight == 1 {
w, err = NewWriter(
TypeSQS, s, log, stats,
)
} else {
w, err = NewAsyncWriter(
TypeSQS, conf.SQS.MaxInFlight, s, log, stats,
)
}
if bconf := conf.SQS.Batching; err == nil && !bconf.IsNoop() {
policy, err := batch.NewPolicy(bconf, mgr, log.NewModule(".batching"), metrics.Namespaced(stats, "batching"))
if err != nil {
return nil, fmt.Errorf("failed to construct batch policy: %v", err)
}
w = NewBatcher(policy, w, log, stats)
}
return w, err
}
//------------------------------------------------------------------------------
|
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprints', 'preallocated_credits')
op.drop_column('blueprints', 'cost_multiplier') |
SELECT Name, Cost
FROM Product
ORDER BY Cost DESC
LIMIT 1; |
<reponame>kawael/optapy
from functools import reduce
from optapy import get_class, solve
from optapy.types import SolverConfig, Duration
from domain import TimeTable, Lesson, generate_problem
from constraints import define_constraints
def print_timetable(timetable: TimeTable):
room_list = timetable.room_list
lesson_list = timetable.lesson_list
timeslot_room_lesson_triple_list = list(map(lambda the_lesson: (the_lesson.timeslot, the_lesson.room, the_lesson),
filter(lambda the_lesson:
the_lesson.timeslot is not None and
the_lesson.room is not None,
lesson_list)))
lesson_map = dict()
for timeslot, room, lesson in timeslot_room_lesson_triple_list:
if timeslot in lesson_map:
if room in lesson_map[timeslot]:
lesson_map[timeslot][room].append(lesson)
else:
lesson_map[timeslot][room] = [lesson]
else:
lesson_map[timeslot] = {room: [lesson]}
print("|" + ("------------|" * (len(room_list) + 1)))
print(reduce(lambda a, b: a + b + " | ",
map(lambda the_room: "{:<10}".format(the_room.name)[0:10], room_list),
"| | "))
print("|" + ("------------|" * (len(room_list) + 1)))
for timeslot in timetable.timeslot_list:
cell_list = list(map(lambda the_room: lesson_map.get(timeslot, {}).get(the_room, []),
room_list))
out = "| " + (timeslot.day_of_week[0:3] + " " + str(timeslot.start_time))[0:10] + " | "
for cell in cell_list:
if len(cell) == 0:
out += " | "
else:
out += "{:<10}".format(reduce(lambda a, b: a + "," + b,
map(lambda assigned_lesson: assigned_lesson.subject,
cell)))[0:10] + " | "
print(out)
out = "| | "
for cell in cell_list:
if len(cell) == 0:
out += " | "
else:
out += "{:<10}".format(reduce(lambda a, b: a + "," + b,
map(lambda assigned_lesson: assigned_lesson.teacher,
cell)))[0:10] + " | "
print(out)
out = "| | "
for cell in cell_list:
if len(cell) == 0:
out += " | "
else:
out += "{:<10}".format(reduce(lambda a, b: a + "," + b,
map(lambda assigned_lesson: assigned_lesson.student_group,
cell)))[0:10] + " | "
print(out)
print("|" + ("------------|" * (len(room_list) + 1)))
unassigned_lessons = list(
filter(lambda unassigned_lesson: unassigned_lesson.timeslot is None or unassigned_lesson.room is None,
lesson_list))
if len(unassigned_lessons) > 0:
print()
print("Unassigned lessons")
for lesson in unassigned_lessons:
print(" " + lesson.subject + " - " + lesson.teacher + " - " + lesson.student_group)
solver_config = SolverConfig().withEntityClasses(get_class(Lesson)) \
.withSolutionClass(get_class(TimeTable)) \
.withConstraintProviderClass(get_class(define_constraints)) \
.withTerminationSpentLimit(Duration.ofSeconds(30))
solution = solve(solver_config, generate_problem())
print_timetable(solution)
|
#!/bin/sh -e
#
# Copyright (c) 2012-2014 Robert Nelson <robertcnelson@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
DIR=$PWD
host_arch="$(uname -m)"
time=$(date +%Y-%m-%d)
OIB_DIR="$(dirname "$( cd "$(dirname "$0")" ; pwd -P )" )"
abi=aa
. ${DIR}/.project
check_defines () {
if [ ! "${tempdir}" ] ; then
echo "scripts/deboostrap_first_stage.sh: Error: tempdir undefined"
exit 1
fi
if [ ! "${export_filename}" ] ; then
echo "scripts/deboostrap_first_stage.sh: Error: export_filename undefined"
exit 1
fi
if [ ! "${deb_distribution}" ] ; then
echo "scripts/deboostrap_first_stage.sh: Error: deb_distribution undefined"
exit 1
fi
if [ ! "${deb_codename}" ] ; then
echo "scripts/deboostrap_first_stage.sh: Error: deb_codename undefined"
exit 1
fi
if [ ! "${deb_arch}" ] ; then
echo "scripts/deboostrap_first_stage.sh: Error: deb_arch undefined"
exit 1
fi
if [ ! "${apt_proxy}" ] ; then
apt_proxy=""
fi
case "${deb_distribution}" in
debian)
deb_components=${deb_components:-"main contrib non-free"}
deb_mirror=${deb_mirror:-"ftp.us.debian.org/debian/"}
;;
ubuntu)
deb_components=${deb_components:-"main universe multiverse"}
deb_mirror=${deb_mirror:-"ports.ubuntu.com/ubuntu-ports/"}
;;
esac
if [ ! "${rfs_username}" ] ; then
##Backwards compat pre variables.txt doc
if [ "${user_name}" ] ; then
rfs_username="${user_name}"
else
rfs_username="${deb_distribution}"
echo "rfs_username: undefined using: [${rfs_username}]"
fi
fi
if [ ! "${rfs_fullname}" ] ; then
##Backwards compat pre variables.txt doc
if [ "${full_name}" ] ; then
rfs_fullname="${full_name}"
else
rfs_fullname="Demo User"
echo "rfs_fullname: undefined using: [${rfs_fullname}]"
fi
fi
if [ ! "${rfs_password}" ] ; then
##Backwards compat pre variables.txt doc
if [ "${password}" ] ; then
rfs_password="${password}"
else
rfs_password="temppwd"
echo "rfs_password: undefined using: [${rfs_password}]"
fi
fi
if [ ! "${rfs_hostname}" ] ; then
##Backwards compat pre variables.txt doc
if [ "${image_hostname}" ] ; then
rfs_hostname="${image_hostname}"
else
rfs_hostname="arm"
echo "rfs_hostname: undefined using: [${rfs_hostname}]"
fi
fi
if [ "x${deb_additional_pkgs}" = "x" ] ; then
##Backwards compat pre configs
if [ ! "x${base_pkg_list}" = "x" ] ; then
deb_additional_pkgs="$(echo ${base_pkg_list} | sed 's/,/ /g')"
fi
else
deb_additional_pkgs="$(echo ${deb_additional_pkgs} | sed 's/,/ /g')"
fi
if [ "x${repo_rcnee}" = "xenable" ] ; then
if [ ! "x${repo_rcnee_pkg_list}" = "x" ] ; then
deb_additional_pkgs="${deb_additional_pkgs} ${repo_rcnee_pkg_list}"
fi
fi
}
report_size () {
echo "Log: Size of: [${tempdir}]: `du -sh ${tempdir} 2>/dev/null | awk '{print $1}'`"
}
chroot_mount () {
if [ "$(mount | grep ${tempdir}/sys | awk '{print $3}')" != "${tempdir}/sys" ] ; then
sudo mount -t sysfs sysfs ${tempdir}/sys
fi
if [ "$(mount | grep ${tempdir}/proc | awk '{print $3}')" != "${tempdir}/proc" ] ; then
sudo mount -t proc proc ${tempdir}/proc
fi
if [ ! -d ${tempdir}/dev/pts ] ; then
sudo mkdir -p ${tempdir}/dev/pts || true
fi
if [ "$(mount | grep ${tempdir}/dev/pts | awk '{print $3}')" != "${tempdir}/dev/pts" ] ; then
sudo mount -t devpts devpts ${tempdir}/dev/pts
fi
}
chroot_umount () {
if [ "$(mount | grep ${tempdir}/dev/pts | awk '{print $3}')" = "${tempdir}/dev/pts" ] ; then
sudo umount -f ${tempdir}/dev/pts
fi
if [ "$(mount | grep ${tempdir}/proc | awk '{print $3}')" = "${tempdir}/proc" ] ; then
sudo umount -f ${tempdir}/proc
fi
if [ "$(mount | grep ${tempdir}/sys | awk '{print $3}')" = "${tempdir}/sys" ] ; then
sudo umount -f ${tempdir}/sys
fi
}
check_defines
if [ "x${host_arch}" != "xarmv7l" ] ; then
sudo cp $(which qemu-arm-static) ${tempdir}/usr/bin/
fi
echo "Log: Running: debootstrap second-stage in [${tempdir}]"
sudo chroot ${tempdir} debootstrap/debootstrap --second-stage
echo "Log: Complete: [sudo chroot ${tempdir} debootstrap/debootstrap --second-stage]"
report_size
if [ "x${chroot_very_small_image}" = "xenable" ] ; then
#so debootstrap just extracts the *.deb's, so lets clean this up hackish now,
#but then allow dpkg to delete these extra files when installed later..
sudo rm -rf ${tempdir}/usr/share/locale/* || true
sudo rm -rf ${tempdir}/usr/share/man/* || true
sudo rm -rf ${tempdir}/usr/share/doc/* || true
#dpkg 1.15.8++, No Docs...
sudo mkdir -p ${tempdir}/etc/dpkg/dpkg.cfg.d/ || true
echo "# Delete locales" > /tmp/01_nodoc
echo "path-exclude=/usr/share/locale/*" >> /tmp/01_nodoc
echo "" >> /tmp/01_nodoc
echo "# Delete man pages" >> /tmp/01_nodoc
echo "path-exclude=/usr/share/man/*" >> /tmp/01_nodoc
echo "" >> /tmp/01_nodoc
echo "# Delete docs" >> /tmp/01_nodoc
echo "path-exclude=/usr/share/doc/*" >> /tmp/01_nodoc
echo "path-include=/usr/share/doc/*/copyright" >> /tmp/01_nodoc
echo "" >> /tmp/01_nodoc
sudo mv /tmp/01_nodoc ${tempdir}/etc/dpkg/dpkg.cfg.d/01_nodoc
sudo mkdir -p ${tempdir}/etc/apt/apt.conf.d/ || true
#apt: no local cache
echo "Dir::Cache {" > /tmp/02nocache
echo " srcpkgcache \"\";" >> /tmp/02nocache
echo " pkgcache \"\";" >> /tmp/02nocache
echo "}" >> /tmp/02nocache
sudo mv /tmp/02nocache ${tempdir}/etc/apt/apt.conf.d/02nocache
#apt: drop translations...
echo "Acquire::Languages \"none\";" > /tmp/02translations
sudo mv /tmp/02translations ${tempdir}/etc/apt/apt.conf.d/02translations
echo "Log: after locale/man purge"
report_size
fi
sudo mkdir -p ${tempdir}/etc/dpkg/dpkg.cfg.d/ || true
echo "# neuter flash-kernel" > /tmp/01_noflash_kernel
echo "path-exclude=/usr/share/flash-kernel/db/all.db" >> /tmp/01_noflash_kernel
echo "path-exclude=/etc/initramfs/post-update.d/flash-kernel" >> /tmp/01_noflash_kernel
echo "path-exclude=/etc/kernel/postinst.d/zz-flash-kernel" >> /tmp/01_noflash_kernel
echo "path-exclude=/etc/kernel/postrm.d/zz-flash-kernel" >> /tmp/01_noflash_kernel
echo "" >> /tmp/01_noflash_kernel
sudo mv /tmp/01_noflash_kernel ${tempdir}/etc/dpkg/dpkg.cfg.d/01_noflash_kernel
sudo mkdir -p ${tempdir}/usr/share/flash-kernel/db/ || true
sudo cp -v ${OIB_DIR}/target/other/rcn-ee.db ${tempdir}/usr/share/flash-kernel/db/
if [ "x${deb_distribution}" = "xdebian" ] ; then
#generic apt.conf tweaks for flash/mmc devices to save on wasted space...
sudo mkdir -p ${tempdir}/etc/apt/apt.conf.d/ || true
#apt: emulate apt-get clean:
echo '#Custom apt-get clean' > /tmp/02apt-get-clean
echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> /tmp/02apt-get-clean
echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> /tmp/02apt-get-clean
echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> /tmp/02apt-get-clean
sudo mv /tmp/02apt-get-clean ${tempdir}/etc/apt/apt.conf.d/02apt-get-clean
#apt: drop translations
echo 'Acquire::Languages "none";' > /tmp/02-no-languages
sudo mv /tmp/02-no-languages ${tempdir}/etc/apt/apt.conf.d/02-no-languages
#apt: /var/lib/apt/lists/, store compressed only
echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > /tmp/02compress-indexes
sudo mv /tmp/02compress-indexes ${tempdir}/etc/apt/apt.conf.d/02compress-indexes
#apt: make sure apt-cacher-ng doesn't break oracle-java8-installer
echo 'Acquire::http::Proxy::download.oracle.com "DIRECT";' > /tmp/03-proxy-oracle
sudo mv /tmp/03-proxy-oracle ${tempdir}/etc/apt/apt.conf.d/03-proxy-oracle
fi
#set initial 'seed' time...
sudo sh -c "date --utc \"+%4Y%2m%2d%2H%2M\" > ${tempdir}/etc/timestamp"
wfile="/tmp/sources.list"
echo "deb http://${deb_mirror} ${deb_codename} ${deb_components}" > ${wfile}
echo "#deb-src http://${deb_mirror} ${deb_codename} ${deb_components}" >> ${wfile}
echo "" >> ${wfile}
case "${deb_codename}" in
stretch|buster|sid)
echo "#deb http://${deb_mirror} ${deb_codename}-updates ${deb_components}" >> ${wfile}
echo "##deb-src http://${deb_mirror} ${deb_codename}-updates ${deb_components}" >> ${wfile}
;;
*)
echo "deb http://${deb_mirror} ${deb_codename}-updates ${deb_components}" >> ${wfile}
echo "#deb-src http://${deb_mirror} ${deb_codename}-updates ${deb_components}" >> ${wfile}
;;
esac
case "${deb_codename}" in
wheezy)
echo "" >> ${wfile}
echo "deb http://security.debian.org/ ${deb_codename}/updates ${deb_components}" >> ${wfile}
echo "#deb-src http://security.debian.org/ ${deb_codename}/updates ${deb_components}" >> ${wfile}
echo "" >> ${wfile}
if [ "x${chroot_enable_debian_backports}" = "xenable" ] ; then
echo "deb http://ftp.debian.org/debian ${deb_codename}-backports ${deb_components}" >> ${wfile}
echo "#deb-src http://ftp.debian.org/debian ${deb_codename}-backports ${deb_components}" >> ${wfile}
else
echo "#deb http://ftp.debian.org/debian ${deb_codename}-backports ${deb_components}" >> ${wfile}
echo "##deb-src http://ftp.debian.org/debian ${deb_codename}-backports ${deb_components}" >> ${wfile}
fi
;;
jessie)
echo "" >> ${wfile}
echo "#deb http://security.debian.org/ ${deb_codename}/updates ${deb_components}" >> ${wfile}
echo "##deb-src http://security.debian.org/ ${deb_codename}/updates ${deb_components}" >> ${wfile}
echo "" >> ${wfile}
;;
esac
if [ "x${repo_external}" = "xenable" ] ; then
echo "" >> ${wfile}
echo "deb [arch=${repo_external_arch}] ${repo_external_server} ${repo_external_dist} ${repo_external_components}" >> ${wfile}
echo "#deb-src [arch=${repo_external_arch}] ${repo_external_server} ${repo_external_dist} ${repo_external_components}" >> ${wfile}
fi
if [ "x${repo_rcnee}" = "xenable" ] ; then
#no: precise
echo "" >> ${wfile}
echo "#Kernel source (repos.rcn-ee.net) : https://github.com/RobertCNelson/linux-stable-rcn-ee" >> ${wfile}
echo "#" >> ${wfile}
echo "#git clone https://github.com/RobertCNelson/linux-stable-rcn-ee" >> ${wfile}
echo "#cd ./linux-stable-rcn-ee" >> ${wfile}
echo "#git checkout \`uname -r\` -b tmp" >> ${wfile}
echo "#" >> ${wfile}
echo "deb [arch=armhf] http://repos.rcn-ee.net/${deb_distribution}/ ${deb_codename} main" >> ${wfile}
echo "#deb-src [arch=armhf] http://repos.rcn-ee.net/${deb_distribution}/ ${deb_codename} main" >> ${wfile}
sudo cp -v ${OIB_DIR}/target/keyring/repos.rcn-ee.net-archive-keyring.asc ${tempdir}/tmp/repos.rcn-ee.net-archive-keyring.asc
fi
if [ -f /tmp/sources.list ] ; then
sudo mv /tmp/sources.list ${tempdir}/etc/apt/sources.list
fi
if [ "x${repo_external}" = "xenable" ] ; then
if [ ! "x${repo_external_key}" = "x" ] ; then
sudo cp -v ${OIB_DIR}/target/keyring/${repo_external_key} ${tempdir}/tmp/${repo_external_key}
fi
fi
if [ "${apt_proxy}" ] ; then
echo "Acquire::http::Proxy \"http://${apt_proxy}\";" > /tmp/apt.conf
sudo mv /tmp/apt.conf ${tempdir}/etc/apt/apt.conf
fi
echo "127.0.0.1 localhost" > /tmp/hosts
echo "127.0.1.1 ${rfs_hostname}" >> /tmp/hosts
sudo mv /tmp/hosts ${tempdir}/etc/hosts
echo "${rfs_hostname}" > /tmp/hostname
sudo mv /tmp/hostname ${tempdir}/etc/hostname
case "${deb_distribution}" in
debian)
sudo cp ${OIB_DIR}/target/init_scripts/generic-${deb_distribution}.sh ${tempdir}/etc/init.d/generic-boot-script.sh
sudo cp ${OIB_DIR}/target/init_scripts/capemgr-${deb_distribution}.sh ${tempdir}/etc/init.d/capemgr.sh
sudo cp ${OIB_DIR}/target/init_scripts/capemgr ${tempdir}/etc/default/
distro="Debian"
;;
ubuntu)
sudo cp ${OIB_DIR}/target/init_scripts/generic-${deb_distribution}.conf ${tempdir}/etc/init/generic-boot-script.conf
sudo cp ${OIB_DIR}/target/init_scripts/capemgr-${deb_distribution}.sh ${tempdir}/etc/init/capemgr.sh
sudo cp ${OIB_DIR}/target/init_scripts/capemgr ${tempdir}/etc/default/
distro="Ubuntu"
if [ -f ${tempdir}/etc/init/failsafe.conf ] ; then
#Ubuntu: with no ethernet cable connected it can take up to 2 mins to login, removing upstart sleep calls..."
sudo sed -i -e 's:sleep 20:#sleep 20:g' ${tempdir}/etc/init/failsafe.conf
sudo sed -i -e 's:sleep 40:#sleep 40:g' ${tempdir}/etc/init/failsafe.conf
sudo sed -i -e 's:sleep 59:#sleep 59:g' ${tempdir}/etc/init/failsafe.conf
fi
;;
esac
#Backward compatibility, as setup_sdcard.sh expects [lsb_release -si > /etc/rcn-ee.conf]
echo "distro=${distro}" > /tmp/rcn-ee.conf
echo "rfs_username=${rfs_username}" >> /tmp/rcn-ee.conf
echo "release_date=${time}" >> /tmp/rcn-ee.conf
echo "third_party_modules=${third_party_modules}" >> /tmp/rcn-ee.conf
echo "abi=${abi}" >> /tmp/rcn-ee.conf
sudo mv /tmp/rcn-ee.conf ${tempdir}/etc/rcn-ee.conf
#use /etc/dogtag for all:
if [ ! "x${rts_etc_dogtag}" = "x" ] ; then
echo "${rts_etc_dogtag} ${time}" > /tmp/dogtag
sudo mv /tmp/dogtag ${tempdir}/etc/dogtag
fi
cat > ${DIR}/chroot_script.sh <<-__EOF__
#!/bin/sh -e
export LC_ALL=C
export DEBIAN_FRONTEND=noninteractive
dpkg_check () {
unset pkg_is_not_installed
LC_ALL=C dpkg --list | awk '{print \$2}' | grep "^\${pkg}$" >/dev/null || pkg_is_not_installed="true"
}
dpkg_package_missing () {
echo "Log: (chroot) package [\${pkg}] was not installed... (add to deb_include if functionality is really needed)"
}
is_this_qemu () {
unset warn_qemu_will_fail
if [ -f /usr/bin/qemu-arm-static ] ; then
warn_qemu_will_fail=1
fi
}
qemu_warning () {
if [ "\${warn_qemu_will_fail}" ] ; then
echo "Log: (chroot) Warning, qemu can fail here... (run on real armv7l hardware for production images)"
echo "Log: (chroot): [\${qemu_command}]"
fi
}
stop_init () {
cat > /usr/sbin/policy-rc.d <<EOF
#!/bin/sh
exit 101
EOF
chmod +x /usr/sbin/policy-rc.d
#set distro:
. /etc/rcn-ee.conf
if [ "x\${distro}" = "xUbuntu" ] ; then
dpkg-divert --local --rename --add /sbin/initctl
ln -s /bin/true /sbin/initctl
fi
}
install_pkg_updates () {
if [ "x${repo_rcnee}" = "xenable" ] ; then
apt-key add /tmp/repos.rcn-ee.net-archive-keyring.asc
rm -f /tmp/repos.rcn-ee.net-archive-keyring.asc || true
fi
if [ "x${repo_external}" = "xenable" ] ; then
apt-key add /tmp/${repo_external_key}
rm -f /tmp/${repo_external_key} || true
fi
apt-get update
apt-get upgrade -y --force-yes
}
install_pkgs () {
if [ ! "x${deb_additional_pkgs}" = "x" ] ; then
#Install the user choosen list.
echo "Log: (chroot) Installing: ${deb_additional_pkgs}"
apt-get -y --force-yes install ${deb_additional_pkgs}
fi
if [ ! "x${repo_rcnee_pkg_version}" = "x" ] ; then
echo "Log: (chroot) Installing modules for: ${repo_rcnee_pkg_version}"
apt-get -y --force-yes install mt7601u-modules-${repo_rcnee_pkg_version} || true
depmod -a ${repo_rcnee_pkg_version}
update-initramfs -u -k ${repo_rcnee_pkg_version}
fi
if [ "x${chroot_enable_debian_backports}" = "xenable" ] ; then
if [ ! "x${chroot_debian_backports_pkg_list}" = "x" ] ; then
echo "Log: (chroot) Installing (from backports): ${chroot_debian_backports_pkg_list}"
sudo apt-get -y --force-yes install ${chroot_debian_backports_pkg_list}
fi
fi
if [ ! "x${repo_external_pkg_list}" = "x" ] ; then
echo "Log: (chroot) Installing (from external repo): ${repo_external_pkg_list}"
apt-get -y --force-yes install ${repo_external_pkg_list}
fi
}
system_tweaks () {
echo "[options]" > /etc/e2fsck.conf
echo "broken_system_clock = true" >> /etc/e2fsck.conf
if [ -f /etc/systemd/systemd-journald.conf ] ; then
sed -i -e 's:#SystemMaxUse=:SystemMaxUse=8M:g' /etc/systemd/systemd-journald.conf
fi
}
set_locale () {
pkg="locales"
dpkg_check
if [ "x\${pkg_is_not_installed}" = "x" ] ; then
if [ ! "x${rfs_default_locale}" = "x" ] ; then
case "\${distro}" in
Debian)
echo "Log: (chroot) Debian: setting up locales: [${rfs_default_locale}]"
sed -i -e 's:# ${rfs_default_locale} UTF-8:${rfs_default_locale} UTF-8:g' /etc/locale.gen
locale-gen
;;
Ubuntu)
echo "Log: (chroot) Ubuntu: setting up locales: [${rfs_default_locale}]"
locale-gen ${rfs_default_locale}
;;
esac
echo "LANG=${rfs_default_locale}" > /etc/default/locale
fi
else
dpkg_package_missing
fi
}
run_deborphan () {
apt-get -y --force-yes install deborphan
# Prevent deborphan from removing explicitly required packages
deborphan -A ${deb_additional_pkgs} ${repo_external_pkg_list} ${deb_include}
deborphan | xargs apt-get -y remove --purge
# Purge keep file
deborphan -Z
#FIXME, only tested on wheezy...
apt-get -y remove deborphan dialog gettext-base libasprintf0c2 --purge
apt-get clean
}
dl_kernel () {
wget --no-verbose --directory-prefix=/tmp/ \${kernel_url}
#This should create a list of files on the server
#<a href="file"></a>
cat /tmp/index.html | grep "<a href=" > /tmp/temp.html
#Note: cat drops one \...
#sed -i -e "s/<a href/\\n<a href/g" /tmp/temp.html
sed -i -e "s/<a href/\\\n<a href/g" /tmp/temp.html
sed -i -e 's/\"/\"><\/a>\n/2' /tmp/temp.html
cat /tmp/temp.html | grep href > /tmp/index.html
deb_file=\$(cat /tmp/index.html | grep linux-image)
deb_file=\$(echo \${deb_file} | awk -F ".deb" '{print \$1}')
deb_file=\${deb_file##*linux-image-}
kernel_version=\$(echo \${deb_file} | awk -F "_" '{print \$1}')
echo "Log: Using: \${kernel_version}"
deb_file="linux-image-\${deb_file}.deb"
wget --directory-prefix=/tmp/ \${kernel_url}\${deb_file}
unset dtb_file
dtb_file=\$(cat /tmp/index.html | grep dtbs.tar.gz | head -n 1)
dtb_file=\$(echo \${dtb_file} | awk -F "\"" '{print \$2}')
if [ "\${dtb_file}" ] ; then
wget --directory-prefix=/boot/ \${kernel_url}\${dtb_file}
fi
dpkg -x /tmp/\${deb_file} /
if [ "x\${third_party_modules}" = "xenable" ] ; then
unset thirdparty_file
thirdparty_file=\$(cat /tmp/index.html | grep thirdparty)
thirdparty_file=\$(echo \${thirdparty_file} | awk -F "\"" '{print \$2}')
if [ "\${thirdparty_file}" ] ; then
wget --directory-prefix=/tmp/ \${kernel_url}\${thirdparty_file}
if [ -f /tmp/thirdparty ] ; then
/bin/sh /tmp/thirdparty
fi
fi
fi
pkg="initramfs-tools"
dpkg_check
if [ "x\${pkg_is_not_installed}" = "x" ] ; then
depmod \${kernel_version} -a
update-initramfs -c -k \${kernel_version}
else
dpkg_package_missing
fi
unset source_file
source_file=\$(cat /tmp/index.html | grep .diff.gz | head -n 1)
source_file=\$(echo \${source_file} | awk -F "\"" '{print \$2}')
if [ "\${source_file}" ] ; then
wget --directory-prefix=/opt/source/ \${kernel_url}\${source_file}
fi
rm -f /tmp/index.html || true
rm -f /tmp/temp.html || true
rm -f /tmp/\${deb_file} || true
rm -f /boot/System.map-\${kernel_version} || true
mv /boot/config-\${kernel_version} /opt/source || true
rm -rf /usr/src/linux-headers* || true
}
add_user () {
groupadd -r admin || true
groupadd -r spi || true
cat /etc/group | grep ^i2c || groupadd -r i2c || true
cat /etc/group | grep ^kmem || groupadd -r kmem || true
cat /etc/group | grep ^netdev || groupadd -r netdev || true
cat /etc/group | grep ^systemd-journal || groupadd -r systemd-journal || true
cat /etc/group | grep ^weston-launch || groupadd -r weston-launch || true
cat /etc/group | grep ^xenomai || groupadd -r xenomai || true
echo "KERNEL==\"spidev*\", GROUP=\"spi\", MODE=\"0660\"" > /etc/udev/rules.d/50-spi.rules
default_groups="admin,adm,dialout,i2c,kmem,spi,cdrom,floppy,audio,dip,video,netdev,plugdev,users,systemd-journal,weston-launch,xenomai"
pkg="sudo"
dpkg_check
if [ "x\${pkg_is_not_installed}" = "x" ] ; then
echo "Log: (chroot) adding admin group to /etc/sudoers"
echo "%admin ALL=(ALL) ALL" >>/etc/sudoers
else
dpkg_package_missing
if [ "x${rfs_disable_root}" = "xenable" ] ; then
echo "Log: (Chroot) WARNING: sudo not installed and no root user"
fi
fi
pass_crypt=\$(perl -e 'print crypt(\$ARGV[0], "rcn-ee-salt")' ${rfs_password})
useradd -G "\${default_groups}" -s /bin/bash -m -p \${pass_crypt} -c "${rfs_fullname}" ${rfs_username}
grep ${rfs_username} /etc/passwd
mkdir -p /home/${rfs_username}/bin
chown ${rfs_username}:${rfs_username} /home/${rfs_username}/bin
echo "default username:password is [${rfs_username}:${rfs_password}]" >> /etc/issue
echo "" >> /etc/issue
case "\${distro}" in
Debian)
if [ "x${rfs_disable_root}" = "xenable" ] ; then
passwd -l root || true
else
passwd <<-EOF
root
root
EOF
fi
sed -i -e 's:#EXTRA_GROUPS:EXTRA_GROUPS:g' /etc/adduser.conf
sed -i -e 's:dialout:dialout i2c spi:g' /etc/adduser.conf
sed -i -e 's:#ADD_EXTRA_GROUPS:ADD_EXTRA_GROUPS:g' /etc/adduser.conf
;;
Ubuntu)
passwd -l root || true
;;
esac
}
debian_startup_script () {
if [ "x${rfs_startup_scripts}" = "xenable" ] ; then
if [ -f /etc/init.d/generic-boot-script.sh ] ; then
chown root:root /etc/init.d/generic-boot-script.sh
chmod +x /etc/init.d/generic-boot-script.sh
insserv generic-boot-script.sh || true
fi
if [ -f /etc/init.d/capemgr.sh ] ; then
chown root:root /etc/init.d/capemgr.sh
chown root:root /etc/default/capemgr
chmod +x /etc/init.d/capemgr.sh
insserv capemgr.sh || true
fi
fi
}
ubuntu_startup_script () {
if [ "x${rfs_startup_scripts}" = "xenable" ] ; then
if [ -f /etc/init/generic-boot-script.conf ] ; then
chown root:root /etc/init/generic-boot-script.conf
fi
fi
#Not Optional...
#(protects your kernel, from Ubuntu repo which may try to take over your system on an upgrade)...
if [ -f /etc/flash-kernel.conf ] ; then
chown root:root /etc/flash-kernel.conf
fi
}
startup_script () {
case "\${distro}" in
Debian)
debian_startup_script
;;
Ubuntu)
ubuntu_startup_script
;;
esac
if [ ! "x${rfs_opt_scripts}" = "x" ] ; then
if [ -f /usr/bin/git ] ; then
mkdir -p /opt/scripts/ || true
qemu_command="git clone ${rfs_opt_scripts} /opt/scripts/ --depth 1 || true"
qemu_warning
git clone ${rfs_opt_scripts} /opt/scripts/ --depth 1 || true
sync
if [ -f /opt/scripts/.git/config ] ; then
echo "/opt/scripts/ : ${rfs_opt_scripts}" >> /opt/source/list.txt
chown -R ${rfs_username}:${rfs_username} /opt/scripts/
fi
fi
fi
}
cleanup () {
mkdir -p /boot/uboot/
if [ -f /etc/apt/apt.conf ] ; then
rm -rf /etc/apt/apt.conf || true
fi
apt-get clean
rm -rf /var/lib/apt/lists/*
if [ -d /var/cache/ti-c6000-cgt-v8.0.x-installer/ ] ; then
rm -rf /var/cache/ti-c6000-cgt-v8.0.x-installer/ || true
fi
if [ -d /var/cache/ti-pru-cgt-installer/ ] ; then
rm -rf /var/cache/ti-pru-cgt-installer/ || true
fi
rm -f /usr/sbin/policy-rc.d
if [ "x\${distro}" = "xUbuntu" ] ; then
rm -f /sbin/initctl || true
dpkg-divert --local --rename --remove /sbin/initctl
fi
#left over from init/upstart scripts running in chroot...
if [ -d /var/run/ ] ; then
rm -rf /var/run/* || true
fi
}
#cat /chroot_script.sh
is_this_qemu
stop_init
install_pkg_updates
install_pkgs
system_tweaks
set_locale
if [ "x${chroot_very_small_image}" = "xenable" ] ; then
run_deborphan
fi
add_user
mkdir -p /opt/source || true
touch /opt/source/list.txt
startup_script
pkg="wget"
dpkg_check
if [ "x\${pkg_is_not_installed}" = "x" ] ; then
if [ "${rfs_kernel}" ] ; then
for kernel_url in ${rfs_kernel} ; do dl_kernel ; done
fi
else
dpkg_package_missing
fi
cleanup
rm -f /chroot_script.sh || true
__EOF__
sudo mv ${DIR}/chroot_script.sh ${tempdir}/chroot_script.sh
if [ "x${include_firmware}" = "xenable" ] ; then
if [ ! -d ${tempdir}/lib/firmware/ ] ; then
sudo mkdir -p ${tempdir}/lib/firmware/ || true
fi
if [ -d ${DIR}/git/linux-firmware/brcm/ ] ; then
sudo mkdir -p ${tempdir}/lib/firmware/brcm
sudo cp -v ${DIR}/git/linux-firmware/LICENCE.broadcom_bcm43xx ${tempdir}/lib/firmware/
sudo cp -v ${DIR}/git/linux-firmware/brcm/* ${tempdir}/lib/firmware/brcm
fi
if [ -f ${DIR}/git/linux-firmware/carl9170-1.fw ] ; then
sudo cp -v ${DIR}/git/linux-firmware/carl9170-1.fw ${tempdir}/lib/firmware/
fi
if [ -f ${DIR}/git/linux-firmware/htc_9271.fw ] ; then
sudo cp -v ${DIR}/git/linux-firmware/LICENCE.atheros_firmware ${tempdir}/lib/firmware/
sudo cp -v ${DIR}/git/linux-firmware/htc_9271.fw ${tempdir}/lib/firmware/
fi
if [ -d ${DIR}/git/linux-firmware/rtlwifi/ ] ; then
sudo mkdir -p ${tempdir}/lib/firmware/rtlwifi
sudo cp -v ${DIR}/git/linux-firmware/LICENCE.rtlwifi_firmware.txt ${tempdir}/lib/firmware/
sudo cp -v ${DIR}/git/linux-firmware/rtlwifi/* ${tempdir}/lib/firmware/rtlwifi
fi
if [ -d ${DIR}/git/linux-firmware/ti-connectivity/ ] ; then
sudo mkdir -p ${tempdir}/lib/firmware/ti-connectivity
sudo cp -v ${DIR}/git/linux-firmware/LICENCE.ti-connectivity ${tempdir}/lib/firmware/
sudo cp -v ${DIR}/git/linux-firmware/ti-connectivity/* ${tempdir}/lib/firmware/ti-connectivity
fi
fi
if [ -n "${early_chroot_script}" -a -r "${DIR}/target/chroot/${early_chroot_script}" ] ; then
report_size
echo "Calling early_chroot_script script: ${early_chroot_script}"
sudo cp -v ${DIR}/.project ${tempdir}/etc/oib.project
sudo /bin/sh -e ${DIR}/target/chroot/${early_chroot_script} ${tempdir}
early_chroot_script=""
sudo rm -f ${tempdir}/etc/oib.project || true
fi
chroot_mount
sudo chroot ${tempdir} /bin/sh -e chroot_script.sh
echo "Log: Complete: [sudo chroot ${tempdir} /bin/sh -e chroot_script.sh]"
if [ ! "x${rfs_opt_scripts}" = "x" ] ; then
if [ ! -f ${tempdir}/opt/scripts/.git/config ] ; then
echo "Log: ERROR: git clone of ${rfs_opt_scripts} failed.."
exit 1
fi
fi
if [ -n "${chroot_before_hook}" -a -r "${DIR}/${chroot_before_hook}" ] ; then
report_size
echo "Calling chroot_before_hook script: ${chroot_before_hook}"
. "${DIR}/${chroot_before_hook}"
chroot_before_hook=""
fi
if [ -n "${chroot_script}" -a -r "${DIR}/target/chroot/${chroot_script}" ] ; then
report_size
echo "Calling chroot_script script: ${chroot_script}"
sudo cp -v ${DIR}/.project ${tempdir}/etc/oib.project
sudo cp -v ${DIR}/target/chroot/${chroot_script} ${tempdir}/final.sh
sudo chroot ${tempdir} /bin/sh -e final.sh
sudo rm -f ${tempdir}/final.sh || true
sudo rm -f ${tempdir}/etc/oib.project || true
chroot_script=""
if [ -f ${tempdir}/npm-debug.log ] ; then
echo "Log: ERROR: npm error in script, review log [cat ${tempdir}/npm-debug.log]..."
exit 1
fi
fi
##Building final tar file...
if [ -d ${DIR}/deploy/${export_filename}/ ] ; then
rm -rf ${DIR}/deploy/${export_filename}/ || true
fi
mkdir -p ${DIR}/deploy/${export_filename}/ || true
cp -v ${DIR}/.project ${DIR}/deploy/${export_filename}/image-builder.project
if [ -n "${chroot_after_hook}" -a -r "${DIR}/${chroot_after_hook}" ] ; then
report_size
echo "Calling chroot_after_hook script: ${chroot_after_hook}"
. "${DIR}/${chroot_after_hook}"
chroot_after_hook=""
fi
#add /boot/uEnv.txt update script
if [ -d ${tempdir}/etc/kernel/postinst.d/ ] ; then
sudo cp -v ${OIB_DIR}/target/other/zz-uenv_txt ${tempdir}/etc/kernel/postinst.d/
sudo chmod +x ${tempdir}/etc/kernel/postinst.d/zz-uenv_txt
fi
if [ -f ${tempdir}/usr/bin/qemu-arm-static ] ; then
sudo rm -f ${tempdir}/usr/bin/qemu-arm-static || true
fi
if [ "${rfs_kernel}" ] ; then
if ls ${tempdir}/boot/vmlinuz-* >/dev/null 2>&1 ; then
sudo cp -v ${tempdir}/boot/vmlinuz-* ${DIR}/deploy/${export_filename}/
else
if [ "${rfs_kernel}" ] ; then
echo "Log: ERROR: kernel install failure..."
exit 1
fi
fi
if ls ${tempdir}/boot/initrd.img-* >/dev/null 2>&1 ; then
sudo cp -v ${tempdir}/boot/initrd.img-* ${DIR}/deploy/${export_filename}/
fi
if ls ${tempdir}/boot/*dtbs.tar.gz >/dev/null 2>&1 ; then
sudo cp -v ${tempdir}/boot/*dtbs.tar.gz ${DIR}/deploy/${export_filename}/
fi
fi
echo "${rfs_username}:${rfs_password}" > /tmp/user_password.list
sudo mv /tmp/user_password.list ${DIR}/deploy/${export_filename}/user_password.list
#Fixes:
if [ -d ${tempdir}/etc/ssh/ -a "x${keep_ssh_keys}" = "x" ] ; then
#Remove pre-generated ssh keys, these will be regenerated on first bootup...
sudo rm -rf ${tempdir}/etc/ssh/ssh_host_* || true
sudo touch ${tempdir}/etc/ssh/ssh.regenerate || true
fi
#extra home, from chroot machine when running npm install xyz:
unset extra_home
extra_home=$(ls -lh ${tempdir}/home/ | grep -v ${rfs_username} | awk '{print $9}' | tail -1 || true)
if [ ! "x${extra_home}" = "x" ] ; then
if [ -d ${tempdir}/home/${extra_home}/ ] ; then
sudo rm -rf ${tempdir}/home/${extra_home}/ || true
fi
fi
#ID.txt:
if [ -f ${tempdir}/etc/dogtag ] ; then
sudo cp ${tempdir}/etc/dogtag ${DIR}/deploy/${export_filename}/ID.txt
fi
report_size
chroot_umount
if [ "x${chroot_COPY_SETUP_SDCARD}" = "xenable" ] ; then
echo "Log: copying setup_sdcard.sh related files"
sudo cp ${DIR}/tools/setup_sdcard.sh ${DIR}/deploy/${export_filename}/
sudo mkdir -p ${DIR}/deploy/${export_filename}/hwpack/
sudo cp ${DIR}/tools/hwpack/*.conf ${DIR}/deploy/${export_filename}/hwpack/
if [ -n "${chroot_uenv_txt}" -a -r "${OIB_DIR}/target/boot/${chroot_uenv_txt}" ] ; then
sudo cp "${OIB_DIR}/target/boot/${chroot_uenv_txt}" ${DIR}/deploy/${export_filename}/uEnv.txt
fi
if [ -n "${chroot_flasher_uenv_txt}" -a -r "${OIB_DIR}/target/boot/${chroot_flasher_uenv_txt}" ] ; then
sudo cp "${OIB_DIR}/target/boot/${chroot_flasher_uenv_txt}" ${DIR}/deploy/${export_filename}/eMMC-flasher.txt
fi
if [ -n "${chroot_post_uenv_txt}" -a -r "${OIB_DIR}/target/boot/${chroot_post_uenv_txt}" ] ; then
sudo cp "${OIB_DIR}/target/boot/${chroot_post_uenv_txt}" ${DIR}/deploy/${export_filename}/post-uEnv.txt
fi
fi
if [ "x${chroot_directory}" = "xenable" ]; then
echo "Log: moving rootfs to directory: [${deb_arch}-rootfs-${deb_distribution}-${deb_codename}]"
sudo mv -v ${tempdir} ${DIR}/deploy/${export_filename}/${deb_arch}-rootfs-${deb_distribution}-${deb_codename}
du -h --max-depth=0 ${DIR}/deploy/${export_filename}/${deb_arch}-rootfs-${deb_distribution}-${deb_codename}
else
cd ${tempdir}
echo "Log: packaging rootfs: [${deb_arch}-rootfs-${deb_distribution}-${deb_codename}.tar]"
sudo LANG=C tar --numeric-owner -cf ${DIR}/deploy/${export_filename}/${deb_arch}-rootfs-${deb_distribution}-${deb_codename}.tar .
cd ${DIR}/
ls -lh ${DIR}/deploy/${export_filename}/${deb_arch}-rootfs-${deb_distribution}-${deb_codename}.tar
fi
sudo chown -R ${USER}:${USER} ${DIR}/deploy/${export_filename}/
if [ "x${chroot_tarball}" = "xenable" ] ; then
echo "Compressing ${export_filename}"
cd ${DIR}/deploy/
tar cvf ${export_filename}.tar ./${export_filename}
cd ${DIR}/
fi
#
|
'use strict';
function makeService(deps) {
const {
QueueService,
ValidatorService,
logger,
QueueManager
} = deps;
return {
/**
* Set the initial configuration for the library
* @param {Object} queueConfig - The queue configuration object
* @param {Object} [extraOptions] - The extra options for library configuration
* @param {Object} [extraOptions.exchangeOptions] - The exchange configuration object that overrides the defaults
* @param {Object} [extraOptions.queueOptions] - The consumerQueue configuration object that overrides the defaults
* @param {Object} [extraOptions.publisherOptions] - The publisher configuration object that overrides the defaults
* @param {Object} [extraOptions.retryOptions] - The publishing retry policy options that overrides the defaults
* @param {Object} routes - The file with all routes declared
*/
async create({queueConfig, extraOptions, routes}) {
let configValidation = ValidatorService.validateConfigSchema(queueConfig);
if(configValidation.error !== null) { throw 'Bad configuration object: ' + JSON.stringify(configValidation.error.details); }
QueueService.setQueueOptions(configValidation.value);
QueueService.setExtraOptions(extraOptions);
const qm = await QueueService.start(routes);
logger.info('Queue library initialized correctly');
return {
publish: (key, msg) => qm.getQueue().publishToTopic({ key, msg }),
publishHTTP: (key, msg) => qm.getQueue().publishHTTPToTopic(key, msg),
publishDelayedHTTP: (key, msg, delay) => qm.getQueue().publishDelayedHTTPToTopic(key, msg, delay),
setQueueOptions: (queueOptions) => QueueService.setQueueOptions(queueOptions),
closeConnection: () => qm.close(),
queueManager: QueueManager
};
}
};
}
module.exports = makeService;
|
<reponame>secondsun/quarkus-operator-sdk
/**
* Copyright 2021 Red Hat, Inc. and/or its affiliates.
*
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.quarkiverse.operatorsdk.runtime;
import java.util.Optional;
import io.javaoperatorsdk.operator.api.config.RetryConfiguration;
/**
* @author <a href="<EMAIL>"><NAME></a>
*/
public class RetryConfigurationResolver implements RetryConfiguration {
private final RetryConfiguration delegate;
private RetryConfigurationResolver(Optional<ExternalRetryConfiguration> retry) {
delegate = retry
.<RetryConfiguration> map(ExternalRetryConfigurationAdapter::new)
.orElse(RetryConfiguration.DEFAULT);
}
public static RetryConfiguration resolve(Optional<ExternalRetryConfiguration> retry) {
final var delegate = new RetryConfigurationResolver(retry);
return new PlainRetryConfiguration(
delegate.getMaxAttempts(),
delegate.getInitialInterval(),
delegate.getIntervalMultiplier(),
delegate.getMaxInterval());
}
@Override
public int getMaxAttempts() {
return delegate.getMaxAttempts();
}
@Override
public long getInitialInterval() {
return delegate.getInitialInterval();
}
@Override
public double getIntervalMultiplier() {
return delegate.getIntervalMultiplier();
}
@Override
public long getMaxInterval() {
return delegate.getMaxInterval();
}
private static class ExternalRetryConfigurationAdapter implements RetryConfiguration {
private final int maxAttempts;
private final IntervalConfigurationAdapter interval;
public ExternalRetryConfigurationAdapter(ExternalRetryConfiguration config) {
maxAttempts = config.maxAttempts.orElse(RetryConfiguration.DEFAULT.getMaxAttempts());
interval = config.interval
.map(IntervalConfigurationAdapter::new)
.orElse(new IntervalConfigurationAdapter());
}
@Override
public int getMaxAttempts() {
return maxAttempts;
}
@Override
public long getInitialInterval() {
return interval.initial;
}
@Override
public double getIntervalMultiplier() {
return interval.multiplier;
}
@Override
public long getMaxInterval() {
return interval.max;
}
}
private static class IntervalConfigurationAdapter {
private final long initial;
private final double multiplier;
private final long max;
IntervalConfigurationAdapter(ExternalIntervalConfiguration config) {
initial = config.initial.orElse(RetryConfiguration.DEFAULT.getInitialInterval());
multiplier = config.multiplier
.orElse(RetryConfiguration.DEFAULT.getIntervalMultiplier());
max = config.max.orElse(RetryConfiguration.DEFAULT.getMaxInterval());
}
IntervalConfigurationAdapter() {
this.initial = RetryConfiguration.DEFAULT.getInitialInterval();
this.multiplier = RetryConfiguration.DEFAULT.getIntervalMultiplier();
this.max = RetryConfiguration.DEFAULT.getMaxInterval();
}
}
}
|
#docker pull k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
docker pull k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0
#docker pull gcr.io/kubernetes-helm/tiller:v2.9.1
docker pull gcr.io/kubernetes-helm/tiller:v2.11.0
docker pull quay.io/coreos/flannel:v0.10.0-amd64
# For Ingress
docker pull quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.14.0
docker pull k8s.gcr.io/defaultbackend:1.3
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
version='2017-04-04'
curl -fSL https://github.com/bwipp/postscriptbarcode/releases/download/$version/postscriptbarcode-monolithic-package-$version.tgz -o psbc.tgz
tar -zxC ./elaphe/postscriptbarcode/ -f psbc.tgz --strip=2 --no-anchored barcode.ps LICENSE README
rm psbc.tgz
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package brooklyn.util.stream;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
public class ReaderInputStream extends InputStream {
/** Source Reader */
private Reader in;
private String encoding = System.getProperty("file.encoding");
private byte[] slack;
private int begin;
/**
* Construct a <{@link ReaderInputStream}
* for the specified {@link Reader}.
*
* @param reader {@link Reader}; must not be {@code null}.
*/
public ReaderInputStream(Reader reader) {
in = reader;
}
/**
* Construct a {@link ReaderInputStream}
* for the specified {@link Reader},
* with the specified encoding.
*
* @param reader non-null {@link Reader}.
* @param encoding non-null {@link String} encoding.
*/
public ReaderInputStream(Reader reader, String encoding) {
this(reader);
if (encoding == null) {
throw new IllegalArgumentException("encoding must not be null");
} else {
this.encoding = encoding;
}
}
/**
* Reads from the {@link Reader}, returning the same value.
*
* @return the value of the next character in the {@link Reader}.
*
* @exception IOException if the original {@link Reader} fails to be read
*/
public synchronized int read() throws IOException {
if (in == null) {
throw new IOException("Stream Closed");
}
byte result;
if (slack != null && begin < slack.length) {
result = slack[begin];
if (++begin == slack.length) {
slack = null;
}
} else {
byte[] buf = new byte[1];
if (read(buf, 0, 1) <= 0) {
result = -1;
}
result = buf[0];
}
if (result < -1) {
result += 256;
}
return result;
}
/**
* Reads from the {@link Reader} into a byte array
*
* @param b the byte array to read into
* @param off the offset in the byte array
* @param len the length in the byte array to fill
* @return the actual number read into the byte array, -1 at
* the end of the stream
* @exception IOException if an error occurs
*/
public synchronized int read(byte[] b, int off, int len)
throws IOException {
if (in == null) {
throw new IOException("Stream Closed");
}
while (slack == null) {
char[] buf = new char[len]; // might read too much
int n = in.read(buf);
if (n == -1) {
return -1;
}
if (n > 0) {
slack = new String(buf, 0, n).getBytes(encoding);
begin = 0;
}
}
if (len > slack.length - begin) {
len = slack.length - begin;
}
System.arraycopy(slack, begin, b, off, len);
if ((begin += len) >= slack.length) {
slack = null;
}
return len;
}
/**
* Marks the read limit of the StringReader.
*
* @param limit the maximum limit of bytes that can be read before the
* mark position becomes invalid
*/
public synchronized void mark(final int limit) {
try {
in.mark(limit);
} catch (IOException ioe) {
throw new RuntimeException(ioe.getMessage());
}
}
/**
* @return the current number of bytes ready for reading
* @exception IOException if an error occurs
*/
public synchronized int available() throws IOException {
if (in == null) {
throw new IOException("Stream Closed");
}
if (slack != null) {
return slack.length - begin;
}
if (in.ready()) {
return 1;
} else {
return 0;
}
}
/**
* @return false - mark is not supported
*/
public boolean markSupported () {
return false; // would be imprecise
}
/**
* Resets the StringReader.
*
* @exception IOException if the StringReader fails to be reset
*/
public synchronized void reset() throws IOException {
if (in == null) {
throw new IOException("Stream Closed");
}
slack = null;
in.reset();
}
/**
* Closes the Stringreader.
*
* @exception IOException if the original StringReader fails to be closed
*/
public synchronized void close() throws IOException {
if (in != null) {
in.close();
slack = null;
in = null;
}
}
} |
package fwcd.fructose.chiffre.huffman;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.function.BooleanSupplier;
import fwcd.fructose.structs.TreeNode;
/**
* A binary huffman tree.
*
* @param <T> - The item data type
*/
public class HuffmanTree implements Comparable<HuffmanTree>, TreeNode, Serializable {
private static final long serialVersionUID = -8986725416846768964L;
private final boolean isLeaf;
private HuffmanTree zero = null;
private HuffmanTree one = null;
private double probability;
private char character;
public HuffmanTree(char character, double probability) {
this.probability = probability;
this.character = character;
isLeaf = true;
}
public HuffmanTree(HuffmanTree zero, HuffmanTree one) {
this.zero = Objects.requireNonNull(zero);
this.one = Objects.requireNonNull(one);
probability = zero.probability + one.probability;
isLeaf = false;
}
@Override
public boolean isLeaf() {
return isLeaf;
}
public boolean[] encode(char c) {
if (isLeaf) {
return c == character ? new boolean[0] : null;
}
if (zero != null) {
boolean[] zeroPath = zero.encode(c);
if (zeroPath != null) {
return concat(new boolean[] {false}, zeroPath);
}
}
if (one != null) {
boolean[] onePath = one.encode(c);
return onePath == null ? null : concat(new boolean[] {true}, onePath);
}
throw new IllegalStateException("Node can't have zero childs and be non-leaf.");
}
private boolean[] concat(boolean[] a, boolean[] b) {
boolean[] res = new boolean[a.length + b.length];
System.arraycopy(a, 0, res, 0, a.length);
System.arraycopy(b, 0, res, a.length, b.length);
return res;
}
public HuffmanTree getChild(boolean bit) {
return bit ? one : zero;
}
public double getProbability() {
return probability;
}
public char getCharacter() {
return character;
}
@Override
public int compareTo(HuffmanTree o) {
return Double.compare(probability, o.probability);
}
public char decode(BooleanSupplier bitStream) {
if (isLeaf) {
return character;
} else {
return getChild(bitStream.getAsBoolean()).decode(bitStream);
}
}
@Override
public List<? extends TreeNode> getChildren() {
return Arrays.asList(zero, one);
}
}
|
package io.opensphere.core.projection;
import org.junit.Test;
import io.opensphere.core.math.Vector3d;
import io.opensphere.core.model.Altitude.ReferenceLevel;
import io.opensphere.core.model.GeographicPosition;
import io.opensphere.core.model.LatLonAlt;
import io.opensphere.core.util.lang.UnexpectedEnumException;
import org.junit.Assert;
/** Test for {@link SphereBody}. */
public class SphereBodyTest
{
/**
* Test {@link SphereBody#convertToModel(GeographicPosition, Vector3d)}.
*/
@Test
public void testConvertToModel()
{
double r = 5.3;
SphereBody body = new SphereBody(r);
Assert.assertEquals(new Vector3d(r, 0., 0.),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(0., 0.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(0., r, 0.),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(0., 90.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(-r, 0., 0.),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(0., 180.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(0., -r, 0.),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(0., 270.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(0., -r, 0.),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(0., -90.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(-r, 0., 0.),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(0., -180.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(0., 0., r),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(90., 0.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(0., 0., -r),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(-90., 0.)), Vector3d.ORIGIN));
Assert.assertEquals(new Vector3d(r / 2., r / 2., r / Math.sqrt(2)),
body.convertToModel(new GeographicPosition(LatLonAlt.createFromDegrees(45., 45.)), Vector3d.ORIGIN));
}
/**
* Test {@link SphereBody#convertToPosition(Vector3d, ReferenceLevel)}.
*/
@Test
public void testConvertToPosition()
{
double r = 5.3;
SphereBody body = new SphereBody(r);
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., 0., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(r, 0., 0.), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., 90., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(0., r, 0.), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., 180., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(-r, 0., 0.), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., -90., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(0., -r, 0.), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(90., 0., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(0., 0., r), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(-90., 0., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(0., 0., -r), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(45., 45., 0., ReferenceLevel.ELLIPSOID)),
body.convertToPosition(new Vector3d(r / 2., r / 2., r / Math.sqrt(2)), ReferenceLevel.ELLIPSOID));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., 0., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(r, 0., 0.), ReferenceLevel.ORIGIN));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., 90., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(0., r, 0.), ReferenceLevel.ORIGIN));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., 180., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(-r, 0., 0.), ReferenceLevel.ORIGIN));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(0., -90., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(0., -r, 0.), ReferenceLevel.ORIGIN));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(90., 0., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(0., 0., r), ReferenceLevel.ORIGIN));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(-90., 0., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(0., 0., -r), ReferenceLevel.ORIGIN));
Assert.assertEquals(new GeographicPosition(LatLonAlt.createFromDegreesMeters(45., 45., r, ReferenceLevel.ORIGIN)),
body.convertToPosition(new Vector3d(r / 2., r / 2., r / Math.sqrt(2)), ReferenceLevel.ORIGIN));
}
/**
* Test {@link SphereBody#convertToPosition(Vector3d, ReferenceLevel)} with
* a bad {@link ReferenceLevel}..
*/
@Test(expected = UnexpectedEnumException.class)
public void testConvertToPositionBadReferenceLevel()
{
double r = 5.3;
SphereBody body = new SphereBody(r);
body.convertToPosition(new Vector3d(r, 0., 0.), ReferenceLevel.TERRAIN);
}
/**
* Test {@link SphereBody#getDefaultNormalAtPosition(GeographicPosition)}.
*/
@Test
public void testGetDefaultNormalAtPosition()
{
Assert.assertEquals(new Vector3d(1., 0., 0.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(0., 0.))));
Assert.assertEquals(new Vector3d(0., 1., 0.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(0., 90.))));
Assert.assertEquals(new Vector3d(-1., 0., 0.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(0., 180.))));
Assert.assertEquals(new Vector3d(0., -1., 0.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(0., 270.))));
Assert.assertEquals(new Vector3d(0., -1., 0.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(0., -90.))));
Assert.assertEquals(new Vector3d(-1., 0., 0.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(0., -180.))));
Assert.assertEquals(new Vector3d(0., 0., 1.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(90., 0.))));
Assert.assertEquals(new Vector3d(0., 0., -1.),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(-90., 0.))));
Assert.assertEquals(new Vector3d(.5, .5, 1. / Math.sqrt(2)),
SphereBody.getDefaultNormalAtPosition(new GeographicPosition(LatLonAlt.createFromDegrees(45., 45.))));
}
/** Test constructor. */
@SuppressWarnings("unused")
@Test(expected = IllegalArgumentException.class)
public void testSphereBody()
{
new SphereBody(-1.);
}
}
|
<filename>src/domain/repository/command/File.ts<gh_stars>1-10
import { FileEntity } from "../../entity/File"
import { FileId } from "../../types"
export interface IFileCommandRepository {
add(file: FileEntity): Promise<FileId>
delete(file: FileEntity): Promise<boolean>
update(file: FileEntity): Promise<boolean>
}
|
<reponame>janScheible/stjs-vuejs
package org.stjs.bridge.vuejs;
import org.stjs.bridge.vuejs.annotation.Lifecycle;
/**
*
* @author sj
*/
public abstract class VueComponent {
@Lifecycle
public void beforeCreate() {
}
@Lifecycle
public void created() {
}
@Lifecycle
public void beforeMount() {
}
@Lifecycle
public void mounted() {
}
@Lifecycle
public void beforeUpdate() {
}
@Lifecycle
public void updated() {
}
@Lifecycle
public void beforeDestroy() {
}
@Lifecycle
public void destroyed() {
}
public void $emit(String eventName, Object data) {
}
}
|
<reponame>mykaelandrade/fiscal4j
package br.indie.fiscal4j.nfe310.classes.lote.consulta;
import br.indie.fiscal4j.DFAmbiente;
import br.indie.fiscal4j.DFBase;
import br.indie.fiscal4j.DFUnidadeFederativa;
import br.indie.fiscal4j.nfe310.classes.NFProtocolo;
import org.simpleframework.xml.*;
import java.time.ZonedDateTime;
import java.util.List;
@Root(name = "retConsReciNFe")
@Namespace(reference = "http://www.portalfiscal.inf.br/nfe")
public class NFLoteConsultaRetorno extends DFBase {
private static final long serialVersionUID = -4164491132370082153L;
@Attribute(name = "versao")
private String versao;
@Element(name = "tpAmb")
private DFAmbiente ambiente;
@Element(name = "verAplic")
private String versaoAplicacao;
@Element(name = "nRec", required = false)
private String numeroRecibo;
@Element(name = "cStat")
private String status;
@Element(name = "dhRecbto")
private ZonedDateTime dataHoraRecebimento;
@Element(name = "xMotivo")
private String motivo;
@Element(name = "cUF")
private DFUnidadeFederativa uf;
@Element(name = "cMsg", required = false)
private String codigoMessage;
@Element(name = "xMsg", required = false)
private String mensagem;
@ElementList(entry = "protNFe", inline = true, required = false)
protected List<NFProtocolo> protocolos;
public String getVersao() {
return this.versao;
}
public void setVersao(final String versao) {
this.versao = versao;
}
public DFAmbiente getAmbiente() {
return this.ambiente;
}
public void setAmbiente(final DFAmbiente ambiente) {
this.ambiente = ambiente;
}
public String getVersaoAplicacao() {
return this.versaoAplicacao;
}
public void setVersaoAplicacao(final String versaoAplicacao) {
this.versaoAplicacao = versaoAplicacao;
}
public String getNumeroRecibo() {
return this.numeroRecibo;
}
public void setNumeroRecibo(final String numeroRecibo) {
this.numeroRecibo = numeroRecibo;
}
public String getStatus() {
return this.status;
}
public void setStatus(final String status) {
this.status = status;
}
public String getMotivo() {
return this.motivo;
}
public void setMotivo(final String motivo) {
this.motivo = motivo;
}
public DFUnidadeFederativa getUf() {
return this.uf;
}
public void setUf(final DFUnidadeFederativa uf) {
this.uf = uf;
}
public List<NFProtocolo> getProtocolos() {
return this.protocolos;
}
public void setProtocolos(final List<NFProtocolo> protocolos) {
this.protocolos = protocolos;
}
public String getCodigoMessage() {
return this.codigoMessage;
}
public void setCodigoMessage(final String codigoMessage) {
this.codigoMessage = codigoMessage;
}
public String getMensagem() {
return this.mensagem;
}
public void setMensagem(final String mensagem) {
this.mensagem = mensagem;
}
public ZonedDateTime getDataHoraRecebimento() {
return this.dataHoraRecebimento;
}
public void setDataHoraRecebimento(final ZonedDateTime dataHoraRecebimento) {
this.dataHoraRecebimento = dataHoraRecebimento;
}
} |
class Constant:
"""Class used to store a generic robot parameter that has a constant value (and may be defined using several values)"""
def __init__(self, pObjects=None, values=[]):
self.pObjects = pObjects # initially an array of strings and later cross-referenced to form an array of Pep P object
self.currentValue = values
def restorePobject(self):
"""Restores the constant parameter value of the P object that could have been reset to 0 if it was part of a production function"""
for i, pObject in enumerate(self.pObjects):
pObject.value = self.currentValue[i]
# end restorePobject() |
// https://open.kattis.com/problems/drmmessages
#include <iostream>
#include <vector>
using namespace std;
typedef vector<int> vi;
int main() {
string s;
cin >> s;
int n = s.size() / 2;
vi a(n), b(n);
int c = 0, d = 0;
for (int i = 0; i < n; i++) {
a[i] = s[i] - 'A';
b[i] = s[i + n] - 'A';
c += a[i];
d += b[i];
}
for (int i = 0; i < n; i++) cout << char('A' + (a[i] + b[i] + c + d) % 26);
cout << endl;
}
|
import { PassportStrategy } from '@nestjs/passport';
import { Injectable, Logger, NotImplementedException, UnauthorizedException } from '@nestjs/common';
import { Strategy } from 'passport-github';
import { AccountsService } from '../accounts.service';
import { ConsulConfig, InjectConfig } from '@nestcloud/config';
import { LoginServiceTypes, LoginRequest, CreateRequest } from '@ultimatebackend/proto-schema/account';
@Injectable()
export class GithubStrategy extends PassportStrategy(Strategy) {
logger = new Logger(this.constructor.name);
constructor(
@InjectConfig() private readonly config: ConsulConfig,
private readonly accountService: AccountsService,
) {
super({
clientID: config.get<string>('app.auth.github.clientID'),
clientSecret: config.get<string>('app.auth.github.clientSecret'),
callbackURL: config.get<string>('app.auth.github.callbackURL'),
profileFields: ['id', 'email', 'read:user', 'user:email'],
});
}
async validate(accessToken, refreshToken, profile, done): Promise<any> {
if (profile && profile.emails.length > 0) {
const logCmd: LoginRequest = {
service: LoginServiceTypes.Github,
params: {
accessToken,
userId: profile.id,
email: profile.emails[0].value,
password: <PASSWORD>,
},
};
const names = profile.displayName.split(' ');
const regCmd: CreateRequest = {
service: LoginServiceTypes.Github,
tokens: {
accessToken,
userId: profile.id,
},
email: profile.emails[0].value,
firstname: names[0],
lastname: names[1],
password: <PASSWORD>,
username: profile.username,
};
const user = await this.accountService.validateOrCreateUser(logCmd, regCmd);
if (!user) {
throw new UnauthorizedException();
}
return user;
}
throw new NotImplementedException();
}
}
|
<reponame>luoyhang003/sql-ddl-sync
const test = require('test')
test.setup()
var index = require("../../lib");
describe("index", function () {
describe("exports", function () {
it("should expose Sync function", function () {
assert.exist(index.Sync)
assert.equal(typeof index.Sync, 'function');
});
it("should expose dialect function", function () {
assert.exist(index.dialect)
assert.equal(typeof index.dialect, 'function');
});
});
describe("#dialect", function () {
['mysql', 'postgresql', 'sqlite'].forEach(function (dialectName) {
describe("should expose " + dialectName + " dialect", function () {
var dialect = index.dialect(dialectName);
it(`dialect ${dialectName} exists`, () => {
assert.exist(dialect);
});
;[
'hasCollection',
'addPrimaryKey',
'dropPrimaryKey',
'addForeignKey',
'dropForeignKey',
'getCollectionProperties',
'createCollection',
'dropCollection',
'addCollectionColumn',
'renameCollectionColumn',
'modifyCollectionColumn',
'dropCollectionColumn',
'getCollectionIndexes',
'addIndex',
'removeIndex',
'getType',
]
.concat(
dialectName === 'sqlite' ? [
'processKeys',
'supportsType'
] : []
)
.forEach(dialect_func => {
it(`should be function: ${dialect_func}`, () => {
assert.isFunction(dialect[dialect_func])
})
})
});
});
});
});
if (require.main === module) {
test.run(console.DEBUG)
} |
def isAnagram(s1, s2):
s1 = s1.replace(" ","").lower()
s2 = s2.replace(" ","").lower()
return sorted(s1) == sorted(s2) |
function removeAllOccurrences(full_string, string_to_remove) {
return full_string.replace(new RegExp(string_to_remove, "g"), "");
} |
<reponame>bchu7796/plaRPC
package com.plarpc.implementation;
import com.google.protobuf.ByteString;
import com.plarpc.api.GrpcClientApi;
import com.plarpc.api.PlaRpcClientApi;
import com.plarpc.api.SerializationToolApi;
import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
public class PlaRpcClientImpl<T> implements PlaRpcClientApi, InvocationHandler{
private static final Logger logger = Logger.getLogger(PlaRpcClientImpl.class.getName());
private Constructor<?> proxyConstructor;
private String className;
private String host;
private int port;
/**
* Initialize the proxy constructor with the class we wish to send the RPC to.
*
* @param clazz Class we wish to send the RPC to.
*
*/
public PlaRpcClientImpl(Class<T> clazz, String host, int port) {
this.className = clazz.getName();
this.host = host;
this.port = port;
try {
this.proxyConstructor = Proxy.getProxyClass(clazz.getClassLoader(),
new Class[] { clazz }).getConstructor(InvocationHandler.class);
} catch (NoSuchMethodException | SecurityException | IllegalArgumentException e) {
logger.log(Level.SEVERE, "Cannot create PlaRpcHandlerClientImpl instance: ", e);
throw new RuntimeException(e);
}
}
/**
* Things to do when the target class method is invoked.
*
* @param proxy Proxy class instance.
* @param method The method that is invoked.
* @param args The arguments that is passed.
*
* @throws Throwable When serializing arguments or invoke method
* through gRPC.
*
*/
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
String targetClassName = this.className;
/* Serialize arguments */
List<ByteString> serializedObjects = new ArrayList<>();
if(args != null) {
SerializationToolApi serializationTool = new SerializationToolImpl();
try {
for (int i = 0; i < args.length; i++) {
serializedObjects.add(serializationTool.toString(args[i]));
}
} catch (Exception e) {
logger.log(Level.WARNING, "Failed to serialize objects: ", e);
throw new RuntimeException(e);
}
}
/* Invoke the method through gRPC */
GrpcClientApi grpcClient = new GrpcClientImpl(this.host, this.port);
Object returnObject = null;
try {
returnObject = grpcClient.callMethod(method.getName(), serializedObjects);
} catch (Exception e){
logger.log(Level.WARNING, "Failed to invoke gRPC method: ", e);
throw new RuntimeException(e);
} finally{
grpcClient.shutdown();
}
return returnObject;
}
/**
* Build a proxy for the class we wish to RPC to.
*
* @return a proxy instance
*
*/
@Override
public T rpc() {
try{
return (T) proxyConstructor.newInstance(new Object[] {this});
} catch (InstantiationException | IllegalAccessException |
IllegalArgumentException | InvocationTargetException e) {
logger.log(Level.WARNING, "Cannot create proxy instance: ", e);
throw new RuntimeException(e);
}
}
}
|
class Person:
def __init__(self, name, job):
self.name = name
self.job = job
def display_info(self):
print(f"Name: {self.name}, Job: {self.job}")
# Creating an instance of the Person class
object3 = Person("John Doe", "Software Engineer")
# Calling the display_info method to print the person's information
object3.display_info() |
<reponame>YourBetterAssistant/yourbetterassistant
import {
Client,
CommandInteraction,
GuildMember,
MessageEmbed,
} from "discord.js";
import serverConfSchema from "../Schemas/serverConfSchema";
const roles: { [key: string]: any } = {};
module.exports = {
name: "unmute",
description: "unmute",
options: [{ name: "user", description: "user", type: 6, required: true }],
run: async (client: Client, interaction: CommandInteraction) => {
if (!interaction.guild) return;
let user = interaction.options.getMember("user") as GuildMember;
let result = await serverConfSchema.findOne({
_id: interaction.guild.id,
});
let admin = result?.adminroleID;
let member = result?.memberroleID;
let owner = result?.ownerroleID;
roles[interaction.guild.id] = { admin, member, owner };
let memberrole = roles[interaction.guild.id].member;
if (!memberrole)
return interaction.reply({
content:
"I cannot unmute without a member role, please do b!serverconfig to set up `ROLES` so that i can give the user the specified roles for member when their unmute is up",
ephemeral: true,
});
if (!interaction?.member?.permissions.toString().includes("MANAGE_ROLES"))
return interaction.reply({
content: "Invalid Permissions, Expected Perms `MANAGE_ROLES`",
ephemeral: true,
});
if (user.permissions.has("MANAGE_GUILD"))
return interaction.reply({
content:
"The user who you attempted to unmute has the permissions `MANAGE_GUILD` I am not allowed to unmute people with such permissions",
ephemeral: true,
});
user.roles.set([memberrole]);
let embed = new MessageEmbed()
.setTitle("Unmute")
.setDescription(`${user}'s mute has ended`)
.setColor("RANDOM");
interaction?.channel?.send({ embeds: [embed] });
},
};
//https://api.weky.xyz/canvas/whodidthis?image=${img}
|
package com.benmu.framework.manager.impl;
import android.app.Activity;
import android.app.AlertDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.os.Looper;
import android.text.TextUtils;
import android.util.Log;
import android.view.Gravity;
import android.widget.Toast;
import com.benmu.framework.R;
import com.benmu.framework.activity.AbstractWeexActivity;
import com.benmu.framework.manager.Manager;
import com.benmu.widget.view.BMAlert;
import com.benmu.widget.view.BMGridDialog;
import com.benmu.widget.view.BMLoding;
import java.util.List;
/**
* Created by Carry on 2017/8/7.
*/
public class ModalManager extends Manager {
public static class BmAlert {
// private static AlertDialog mBmAlert = null;
private static BMAlert mBmAlert = null;
public static void showAlert(Context context, String title, String message, String okBtn,
DialogInterface.OnClickListener okListenner, String cancelBtn,
DialogInterface.OnClickListener cancelListenner, String
titleAlign, String contentAlign, boolean isPrompt,
String promptType, String promptPlaceholder) {
// AlertDialog.Builder builder = new AlertDialog.Builder(context);
BMAlert.Builder builder = new BMAlert.Builder(context);
builder.setTitle(title).setMessage(message).setPositiveButton(okBtn, okListenner)
.setTitleAlign(titleAlign).setMessageAlign(contentAlign).setIsPrompt(isPrompt)
.setPromptType(promptType).setPromptPlaceholder(promptPlaceholder);
// builder.setTitle(title).setMessage(message).setPositiveButton(okBtn, okListenner);
if (!TextUtils.isEmpty(cancelBtn)) {
builder.setNegativeButton(cancelBtn, cancelListenner);
}
mBmAlert = builder.create();
if (mBmAlert != null && !mBmAlert.isShowing() && !((Activity) context).isFinishing()) {
mBmAlert.show();
}
}
}
public static class BmLoading {
private static BMLoding mBmLoading = null;
public static void showLoading(Context context, final String message, boolean
canWatchOutsideTouch) {
if (context instanceof AbstractWeexActivity) {
final AbstractWeexActivity activity = (AbstractWeexActivity) context;
if (activity.isFinishing()) return;
if (Looper.myLooper() == Looper.getMainLooper()) {
activity.showLoadingDialog(message);
} else {
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
activity.showLoadingDialog(message);
}
});
}
}
}
public static void dismissLoading(Context context) {
if (context instanceof AbstractWeexActivity) {
final AbstractWeexActivity activity = (AbstractWeexActivity) context;
if (activity.isFinishing()) return;
activity.closeDialog();
if (Looper.myLooper() == Looper.getMainLooper()) {
} else {
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
activity.closeDialog();
}
});
}
}
}
}
public static class BmToast {
private static Toast mToast = null;
private static void makeToast(Context context, String message, int duration) {
if (TextUtils.isEmpty(message) || context == null) {
return;
}
if (Looper.myLooper() == Looper.getMainLooper()) {
if (mToast == null) {
mToast = Toast.makeText(context, message, duration);
//mToast.setGravity(Gravity.CENTER, 0, 0);
}
mToast.setDuration(duration);
mToast.setText(message);
mToast.show();
} else {
Log.i("BMModalManager", "toast can not show in child thread");
}
}
public static void toast(Context context, String message, int duration) {
makeToast(context, message, duration);
}
}
public static class BmShareDialog {
private static BMGridDialog mDialog;
public static void show(Activity activity, List<BMGridDialog.GridItem> list, BMGridDialog
.OnItemClickListener onItemClickListener) {
if (list == null) return;
BMGridDialog.Builder builder = new BMGridDialog.Builder(activity, R.style
.ActionSheetDialogStyle);
mDialog = builder.setGravity(Gravity.BOTTOM).setNegativeButton("取消",
null).setAdapter(new
BMGridDialog.Adapter(activity, list, 4)).setOnItemClickListenner
(onItemClickListener).build();
mDialog.show();
}
public static void dismiss() {
if (mDialog != null) {
mDialog.hide();
}
}
}
}
|
/**
* @jest-environment ./prisma/prisma-test-environment.js
*/
import { v4 as uuid } from 'uuid'
import { prisma } from '@infra/prisma/client'
import { redisConnection } from '@infra/redis/connection'
import { makeUnsubscribeUserHandler } from '../factories/UnsubscribeUserHandlerFactory'
const unsubscribeUserHandler = makeUnsubscribeUserHandler()
describe('Unsubscribe User Handler (Kafka)', () => {
afterAll(async () => {
redisConnection.disconnect()
await prisma.$disconnect()
})
it('should be able to unsubscribe the user from a team', async () => {
await prisma.contact.create({
data: {
id: uuid(),
name: '<NAME>',
email: '<EMAIL>',
integration_id: 'user-integration-id',
subscriptions: {
create: {
id: uuid(),
tag: {
create: {
id: uuid(),
title: 'Tag 01',
integration_id: 'team-integration-id',
},
},
},
},
},
})
await unsubscribeUserHandler.handle({
userId: 'user-integration-id',
teamsIds: ['team-integration-id'],
})
const contactInDatabase = await prisma.contact.findUnique({
where: {
integration_id: 'user-integration-id',
},
include: {
subscriptions: {
include: {
tag: true,
},
},
},
})
expect(contactInDatabase.integration_id).toEqual('user-integration-id')
expect(contactInDatabase.subscriptions.length).toEqual(0)
})
})
|
<filename>index.js
const core = require('@actions/core');
const exec = require('@actions/exec');
const github = require('@actions/github');
const io = require('@actions/io');
const ioUtil = require('@actions/io/lib/io-util');
const { readdirSync } = require('fs');
const path = require('path');
async function run() {
try {
const accessToken = core.getInput('access-token');
const sourceRepo = `${github.context.repo.owner}/${github.context.repo.repo}`;
let sourceBranch = github.context.ref.replace('refs/heads/', '')
sourceBranch = sourceBranch.replace('refs/tags/', '')
const sourceChartsDir = core.getInput('source-charts-folder') ? core.getInput('source-charts-folder') : 'charts';
const destinationRepo = core.getInput('destination-repo');
const destinationBranch = core.getInput('destination-branch') ? core.getInput('destination-branch') : 'master'
const destinationChartsDir = core.getInput('destination-charts-folder') ?core.getInput('destination-charts-folder') : 'charts';
let useHelm3 = true;
if (!core.getInput('helm-version')) {
useHelm3 = true
}
else useHelm3 = core.getInput('helm-version') === 'v3' ? true : false;
console.log('Running Push Helm Chart job with:')
console.log('Source Branch:' + sourceBranch)
console.log('Source Charts Directory:' + sourceChartsDir)
console.log('Destination Repo:' + destinationRepo)
console.log('Destination Branch:' + destinationBranch)
console.log('Destination Charts Directory:' + destinationChartsDir)
if (!accessToken) {
core.setFailed(
'No personal access token found. Please provide one by setting the `access-token` input for this action.'
);
return;
}
if (!destinationRepo) {
core.setFailed(
'No destination repository found. Please provide one by setting the `destination-repos` input for this action.'
);
return;
}
if (useHelm3) {
await InstallHelm3Latest();
}
await ConfigureGit()
await CloneGitRepo(sourceRepo, sourceBranch, accessToken, 'sourceRepo')
await CloneGitRepo(destinationRepo, destinationBranch, accessToken, 'destinationRepo')
await PackageHelmCharts(`./sourceRepo/${sourceChartsDir}`, `../../destinationRepo/${destinationChartsDir}`)
await GenerateIndex()
await AddCommitPushToGitRepo(`./destinationRepo`, `${github.context.sha}`, destinationBranch)
} catch (error) {
core.setFailed(error.message);
}
}
const getDirectories = fileName =>
readdirSync(fileName, {
withFileTypes: true,
})
.filter(dirent => dirent.isDirectory())
.filter(dirent => !(/(^|\/)\.[^\/\.]/g).test(dirent))
.map(dirent => dirent.name);
const InstallHelm3Latest = async () => {
await exec.exec(`curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3`, [], { cwd: `./` });
await exec.exec(`chmod 700 get_helm.sh`, [], { cwd: `./` });
await exec.exec(`./get_helm.sh`, [], { cwd: `./` });
await exec.exec(`helm version`, [], { cwd: `./` }
)
}
const ConfigureGit = async () => {
await exec.exec(`git config --global user.name`, [github.context.actor], {
cwd: './',
});
await exec.exec(
`git config --global user.email`,
[`${<EMAIL>`],
{ cwd: './' }
);
}
const CloneGitRepo = async (repoName, branchName, accessToken, cloneDirectory) => {
const repoURL = `https://${accessToken}@github.com/${repoName}.git`;
await exec.exec(`git clone`, ['-b', branchName, repoURL, cloneDirectory], {
cwd: './',
});
}
const PackageHelmCharts = async (chartsDir, destinationChartsDir) => {
const chartDirectories = getDirectories(path.resolve(chartsDir));
console.log('Charts dir content');
await exec.exec(`ls`, ['-I ".*"'], { cwd: chartsDir });
for (const chartDirname of chartDirectories) {
console.log(`Resolving helm chart dependency in directory ${chartDirname}`);
await exec.exec(
`helm dependency update`,
[],
{ cwd: `${chartsDir}/${chartDirname}` }
);
console.log(`Packaging helm chart in directory ${chartDirname}`);
await exec.exec(
`helm package`,
[chartDirname, '--destination', destinationChartsDir],
{ cwd: chartsDir }
);
}
console.log('Packaged all helm charts.');
}
const GenerateIndex = async () => {
// generate index
console.log(`Building index.yaml`);
await exec.exec(`helm repo index`, `./destinationRepo`);
console.log(`Successfully generated index.yaml.`);
}
const AddCommitPushToGitRepo = async (workingDir, gitSha, branch) => {
await exec.exec(`git status`, [], { cwd: workingDir });
await exec.exec(`git add`, ['.'], { cwd: workingDir });
await exec.exec(`git status`, [], { cwd: workingDir });
await exec.exec(
`git commit`,
['-m', `Deployed via Helm Publish Action for ${gitSha}`],
{ cwd: workingDir }
);
await exec.exec(`git push`, ['-u', 'origin', `${branch}`],
{ cwd: workingDir }
);
console.log(`Pushed to ${workingDir}`);
}
run();
|
class Account {
let accountNumber: Int
let accountHolder: String
var balance: Double
init(accountNumber: Int, accountHolder: String) {
self.accountNumber = accountNumber
self.accountHolder = accountHolder
self.balance = 0
}
func deposit(amount: Double) {
balance += amount
}
func withdraw(amount: Double) -> Bool {
if amount <= balance {
balance -= amount
return true
} else {
return false
}
}
func checkBalance() -> Double {
return balance
}
}
class Bank {
var accounts: [Int: Account] = [:]
var nextAccountNumber = 1
func createAccount(accountHolder: String) -> Int {
let account = Account(accountNumber: nextAccountNumber, accountHolder: accountHolder)
accounts[nextAccountNumber] = account
nextAccountNumber += 1
return account.accountNumber
}
func deposit(accountNumber: Int, amount: Double) {
if let account = accounts[accountNumber] {
account.deposit(amount: amount)
}
}
func withdraw(accountNumber: Int, amount: Double) -> Bool {
if let account = accounts[accountNumber] {
return account.withdraw(amount: amount)
}
return false
}
func checkBalance(accountNumber: Int) -> Double {
if let account = accounts[accountNumber] {
return account.checkBalance()
}
return 0
}
}
// Example usage
let bank = Bank()
let accountNumber1 = bank.createAccount(accountHolder: "Alice")
let accountNumber2 = bank.createAccount(accountHolder: "Bob")
bank.deposit(accountNumber: accountNumber1, amount: 1000)
bank.deposit(accountNumber: accountNumber2, amount: 500)
bank.withdraw(accountNumber: accountNumber1, amount: 200)
bank.withdraw(accountNumber: accountNumber2, amount: 100)
print(bank.checkBalance(accountNumber: accountNumber1)) // Output: 800.0
print(bank.checkBalance(accountNumber: accountNumber2)) // Output: 400.0 |
import random
# Create class for environment
class TicTacToeEnv():
# Define initializer
def __init__(self):
self.state = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
self.player = 'X'
self.player_turn = 0
# Define a reset method to set initial state
def reset(self):
self.state = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
self.player = 'X'
self.player_turn = 0
return self.state
# Define step function
def step(self, action):
# Set player mark
self.state[action] = self.player
# Check for winner
winner = self.check_winner()
done = False
if winner == 'X' or winner == 'O':
reward = 1
done = True
else:
reward = 0
# Switch players
if self.player == 'X':
self.player = 'O'
else:
self.player = 'X'
# Check for draw and switch player turn
if self.player_turn == 8:
done = True
reward = 0.5
self.player_turn += 1
# Return state, reward, done, info
return self.state, reward, done, None
# Define a method to check if someone has won
def check_winner(self):
win_state = [[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8],[0,4,8],[2,4,6]]
for i in win_state:
if self.state[i[0]] == self.state[i[1]] == self.state[i[2]] == 'X':
return 'X'
if self.state[i[0]] == self.state[i[1]] == self.state[i[2]] == 'O':
return 'O'
return None
# Play a game
env = TicTacToeEnv()
env.reset()
done = False
while not done:
action = random.randint(0, 8)
state, reward, done, _ = env.step(action)
print(env.state)
if reward == 1 or reward == 0.5:
print("Winner: "+str(reward)) |
<reponame>billionare/FPSLighting
//--------------------------------------------------------------------------------------
// File: LoadSceneFromX.cpp
//
// Enables the sample to build a scene from an x-file ('scene.x').
// The x-file has been extended to include custom templates for specifying mesh filenames
// and camera objects within frames.
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//--------------------------------------------------------------------------------------
#include "DXUT.h"
#pragma warning(disable: 4995)
#include "LoadSceneFromX.h"
#include <d3dx9xof.h>
#pragma warning(default: 4995)
//--------------------------------------------------------------------------------------
// Forward declaration
//--------------------------------------------------------------------------------------
HRESULT ProcessFrame( ID3DXFileData* pFrame,
D3DXMATRIX* pParentMatrix,
vector <FRAMENODE>& vecFrameNodes );
//--------------------------------------------------------------------------------------
// The scene is generated from an xfile ('scene.x') that contains a hierarchy of
// transformations and mesh file refernces.
// Two private xfile templates are used, extending the xfile format to provide support
// for such features. These templates are found within a 'frame' datatype, which
// are organized into a hierarchy. For the purposes of this sample, the hierarchy
// is immediately collapsed, as there is need to animate objects within the scene.
// Template FrameMeshName includes:
// RenderPass: Default Render Pass, in which the mesh is to be rendered in
// FileName: Filename of a mesh that exists at this frame location
// Template FrameCamera includes:
// RotationScaler: Sets the speed by which the camera rotates
// MoveScaler: Sets the speed at which the camera moves
//--------------------------------------------------------------------------------------
static const CHAR szTemplates[] = "xof 0303txt 0032\
template FrameMeshName { \
<c2a50aed-0ee9-4d97-8732-c14a2d8a7825> \
DWORD RenderPass;STRING FileName;} \
template FrameCamera { \
<f96e7de6-40ce-4847-b7e9-5875232e5201> \
FLOAT RotationScaler;FLOAT MoveScaler;} \
template Frame { \
<3d82ab46-62da-11cf-ab39-0020af71e433> \
[...] } \
template Matrix4x4 { \
<f6f23f45-7686-11cf-8f52-0040333594a3> \
array FLOAT matrix[16]; } \
template FrameTransformMatrix { \
<f6f23f41-7686-11cf-8f52-0040333594a3> \
Matrix4x4 frameMatrix; \
}";
//--------------------------------------------------------------------------------------
// GUIDS, corresponding to the above templates
//--------------------------------------------------------------------------------------
static const GUID gFrameMeshName =
{
0xc2a50aed, 0xee9, 0x4d97, { 0x87, 0x32, 0xc1, 0x4a, 0x2d, 0x8a, 0x78, 0x25 }
};
static const GUID gCamera =
{
0xf96e7de6, 0x40ce, 0x4847, { 0xb7, 0xe9, 0x58, 0x75, 0x23, 0x2e, 0x52, 0x1 }
};
static const GUID gFrameTransformMatrix =
{
0xf6f23f41, 0x7686, 0x11cf, { 0x8f, 0x52, 0x0, 0x40, 0x33, 0x35, 0x94, 0xa3 }
};
static const GUID gFrame =
{
0x3d82ab46, 0x62da, 0x11cf, { 0xab, 0x39, 0x0, 0x20, 0xaf, 0x71, 0xe4, 0x33 }
};
//--------------------------------------------------------------------------------------
// Reads the scene x-file, and adds the collapsed frame hierarchy to the vecFrameNodes Hierarchy.
//--------------------------------------------------------------------------------------
HRESULT LoadSceneFromX( vector <FRAMENODE>& vecFrameNodes, LPWSTR wszFileName )
{
HRESULT hr;
// vecNodes will contain frames found within the x-file frame hierarchy.
vector <FRAMENODE> vecNodes;
ID3DXFile* pXFile = NULL;
ID3DXFileEnumObject* pEnum = NULL;
// To begin reading the x-file, a ID3DXFile interface must be created
V_RETURN( D3DXFileCreate( &pXFile ) );
// To 'understand' the x-file, templates that are used within it must be registered
V_RETURN( pXFile->RegisterTemplates( szTemplates, sizeof( szTemplates ) - 1 ) );
// Creating an ID3DXFileEnumObject allows the app to enumerate top-level data objects
V_RETURN( pXFile->CreateEnumObject( wszFileName, D3DXF_FILELOAD_FROMWFILE, &pEnum ) );
// Because the enum object was successfully created, the ID3DXFile interface pointer can be released
SAFE_RELEASE( pXFile );
SIZE_T toplevel_children = 0;
// Retrieving the number of children allows the app to iterate across each child in a loop.
V_RETURN( pEnum->GetChildren( &toplevel_children ) );
for( SIZE_T i = 0; i < toplevel_children; i++ )
{
GUID guid;
ID3DXFileData* pChild = NULL;
// To read the data object, a pointer to its ID3DXFileData interface is obtained
V_RETURN( pEnum->GetChild( i, &pChild ) );
// The guid corresponding to the type of the data object can be obtained via GetType.
// For the purposes of this sample, if the top-level data object is not a frame, it is ignored.
// Any frames containing mesh filename references will be added to vecFrameNodes.
V_RETURN( pChild->GetType( &guid ) );
// Add any frames containing meshes to the vector of frames, vecFrameNodes
if( guid == gFrame )
ProcessFrame( pChild, NULL, vecFrameNodes );
//
SAFE_RELEASE( pChild );
}
//
SAFE_RELEASE( pEnum );
return S_OK;
}
//--------------------------------------------------------------------------------------
// Invoked by LoadSceneFromX - Process one frame located within the scene xfile
// Reads any meshes, or cameras, that may exist within this frame.
// Additionally, the frame's transform is collapsed (its matrix is concatenated with that
// of it's parent).
// Note: Assumes the parent node has been collapsed
//--------------------------------------------------------------------------------------
HRESULT ProcessFrame( ID3DXFileData* pFrame, D3DXMATRIX* pParentMatrix, vector <FRAMENODE>& vecFrameNodes )
{
HRESULT hr = S_OK;
SIZE_T children = 0;
FRAMENODE node;
// In the event no corresponding frame transform matrix is located within the frame,
// consider it to be identity.
// Use the collapsed value of the parent matrix if it exists.
// If no parent matrix exists, this is a top-level data frame.
if( pParentMatrix )
node.mat = *pParentMatrix;
else
D3DXMatrixIdentity( &node.mat );
// For the purposes of this sample, the frame hierarchy is collapsed in-place as each frame is encountered.
// A typical application may have a 'scene graph' arrangement of transformations, which are collapsed
// as-needed at *runtime*.
// However, since the hierarchy *is* collaped in-place, it must be ensured that the frame's transform matrix
// has been updated and collapsed BEFORE processing child frames.
// To defer processing of child frames, they are placed into the vecChildFrames container.
vector <ID3DXFileData*> vecChildFrames;
// Retrieving the number of children allows the app to iterate across each child in a loop.
V_RETURN( pFrame->GetChildren( &children ) );
for( SIZE_T i = 0; i < children; i++ )
{
ID3DXFileData* pChild = NULL;
GUID guid;
SIZE_T data_size = 0;
LPCVOID pData = NULL;
// To read the data object, a pointer to its ID3DXFileData interface is obtained
V_RETURN( pFrame->GetChild( i, &pChild ) );
// The guid corresponding to the type of the data object can be obtained via GetType.
V_RETURN( pChild->GetType( &guid ) );
// The child data object is a transformation matrix -- collapse it in place, and store
// the collapsed matrix in node.mat
if( guid == gFrameTransformMatrix )
{
// ID3DXFileData::Lock allows the app to read the actual data
// If the data size of the object does not match the expectation, it is discarded
if( SUCCEEDED( hr ) && SUCCEEDED( hr = pChild->Lock( &data_size, &pData ) ) )
{
if( sizeof( D3DXMATRIX ) == data_size )
{
// Collapse the matrix
// If the frame has a parent, the collapsed matrix is the product of this frame and the collapsed matrix
// of the parent frame.
// Otherwise, the collapsed value is the matrix itself
if( pParentMatrix )
D3DXMatrixMultiply( &node.mat, ( D3DXMATRIX* )pData, pParentMatrix );
else
node.mat = *( D3DXMATRIX* )pData;
}
// Having read the required data, it can now be unlocked with ID3DXFileData::Unlock
hr = pChild->Unlock();
}
}
// If the child data is a mesh file name, the mesh is added to the frame's MESH_REFERENCE container
else if( guid == gFrameMeshName )
{
if( SUCCEEDED( hr ) && SUCCEEDED( hr = pChild->Lock( &data_size, &pData ) ) )
{
if( sizeof( DWORD ) < data_size )
node.meshes.push_back( MESH_REFERENCE( *( DWORD* )pData, ( LPSTR )pData + 4 ) );
hr = pChild->Unlock();
}
}
// Processing the children must be delayed until it can be guaranteed that any
// transform matrices have been applied. (eg, until the matrix stack has been collapsed).
else if( guid == gFrame )
{
// A child frame has been found
// Keep the data object around by adding a reference to it.
// The child will eventually be released when it is processed by iterating the container
// of child frames (vecChildFrames).
pChild->AddRef();
vecChildFrames.push_back( pChild );
}
// A camera object was found within the frame
else if( guid == gCamera )
{
if( SUCCEEDED( hr ) && SUCCEEDED( hr = pChild->Lock( &data_size, &pData ) ) )
{
if( 2 * sizeof( FLOAT ) == data_size )
node.cameras.push_back( CAMERA_REFERENCE( *( FLOAT* )pData, *( ( FLOAT* )pData + 1 ) ) );
hr = pChild->Unlock();
}
}
// Now that the Child Data Object has been read, it can be released.
// Exception: child 'Frame' objects have been AddRef'd, to defer processing
SAFE_RELEASE( pChild );
}
// Add the Frame to the collapsed node container
vecFrameNodes.push_back( node );
// Each child frame that was deferred can now be processed
// This occurs by recursively invoking ProcessFrame, once for each child frame
for( vector <ID3DXFileData*>::iterator it = vecChildFrames.begin();
it != vecChildFrames.end();
it++
)
{
if( SUCCEEDED( hr ) )
{
// Recurse into ProcessFrame for this child
hr = ProcessFrame( *it, &node.mat, vecFrameNodes );
}
// Done processing the child 'Frame' data object, it can be released
SAFE_RELEASE( *it );
}
return hr;
}
|
/*
Copyright 2021 The OpenYurt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package edgenode
const (
KubeletSvcPath = "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
OpenyurtDir = "/var/lib/openyurt"
StaticPodPath = "/etc/kubernetes/manifests"
KubeCondfigPath = "/etc/kubernetes/kubelet.conf"
KubeCaFile = "/etc/kubernetes/pki/ca.crt"
YurthubYamlName = "yurt-hub.yaml"
KubeletConfName = "kubelet.conf"
KubeletSvcBackup = "%s.bk"
Hostname = "/etc/hostname"
KubeletHostname = "--hostname-override=[^\"\\s]*"
KubeletEnvironmentFile = "EnvironmentFile=.*"
DaemonReload = "systemctl daemon-reload"
RestartKubeletSvc = "systemctl restart kubelet"
ServerHealthzServer = "127.0.0.1:10267"
ServerHealthzURLPath = "/v1/healthz"
OpenyurtKubeletConf = `
apiVersion: v1
clusters:
- cluster:
server: http://127.0.0.1:10261
name: default-cluster
contexts:
- context:
cluster: default-cluster
namespace: default
user: default-auth
name: default-context
current-context: default-context
kind: Config
preferences: {}
`
YurthubTemplate = `
apiVersion: v1
kind: Pod
metadata:
labels:
k8s-app: yurt-hub
name: yurt-hub
namespace: kube-system
spec:
volumes:
- name: hub-dir
hostPath:
path: /var/lib/yurthub
type: DirectoryOrCreate
- name: kubernetes
hostPath:
path: /etc/kubernetes
type: Directory
- name: pem-dir
hostPath:
path: /var/lib/kubelet/pki
type: Directory
containers:
- name: yurt-hub
image: __yurthub_image__
imagePullPolicy: IfNotPresent
volumeMounts:
- name: hub-dir
mountPath: /var/lib/yurthub
- name: kubernetes
mountPath: /etc/kubernetes
- name: pem-dir
mountPath: /var/lib/kubelet/pki
command:
- yurthub
- --v=2
- --server-addr=__kubernetes_service_addr__
- --node-name=$(NODE_NAME)
- --join-token=__join_token__
livenessProbe:
httpGet:
host: 127.0.0.1
path: /v1/healthz
port: 10267
initialDelaySeconds: 300
periodSeconds: 5
failureThreshold: 3
resources:
requests:
cpu: 150m
memory: 150Mi
limits:
memory: 300Mi
securityContext:
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
hostNetwork: true
priorityClassName: system-node-critical
priority: 2000001000
`
YurthubClusterRole = `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: yurt-hub
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- apiGroups:
- apps.openyurt.io
resources:
- nodepools
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- yurt-hub-cfg
verbs:
- list
- watch
`
YurthubClusterRoleBinding = `
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: yurt-hub
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: yurt-hub
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
`
)
|
#!/bin/bash
cd $(dirname $0)
[ $# -lt 10 ] && echo "$0 <cluster_id> <cluster_name> <base_domain> <endpoint> <cookie> <ha_flag> <nworkers> <version> <virtual_type> <lb_external_ip> <host_record>" && exit 1
cluster_id=$1
cluster_name=$2
base_domain=$3
endpoint=$4
cookie=$5
haflag=$6
nworkers=$7
version=$8
virt_type=$9
lb_ext_ip=${10}
host_rec=${11}
seq_max=100
cloud_user=$(cat /etc/sudoers.d/*-cloud-init-users | grep NOPASSWD:ALL | tail -1 | cut -d' ' -f1)
declare -a workers_res
declare -a workers_ip
function setup_dns()
{
instID=$(cat /var/lib/cloud/data/instance-id | cut -d'-' -f2)
count=0
while [ -z "$public_ip" -a $count -lt 10 ]; do
data=$(curl -k -XPOST $endpoint/floatingips/assign --cookie "$cookie" --data "instance=$instID" --data "floatingIP=$lb_ext_ip")
public_ip=$(jq -r .networks[0].ip_address <<< $data)
let count=$count+1
sleep 1
done
#[ -z "$public_ip" ] && public_ip=$local_ip
dns_server=$(grep '^nameserver' /etc/resolv.conf | head -1 | awk '{print $2}')
if [ -z "$dns_server" -o "$dns_server" = "127.0.0.1" ]; then
dns_server=8.8.8.8
fi
[ -n "$host_rec" ] && host_rec="$(echo $host_rec | tr ':' ' ')"
yum install -y dnsmasq
cp /etc/dnsmasq.conf /etc/dnsmasq.conf.bak
cat > /etc/dnsmasq.conf <<EOF
no-resolv
server=$dns_server
local=/${cluster_name}.${base_domain}/
address=/apps.${cluster_name}.${base_domain}/$local_ip
srv-host=_etcd-server-ssl._tcp.${cluster_name}.${base_domain},etcd-0.${cluster_name}.${base_domain},2380,0,10
EOF
if [ "$haflag" = "yes" ]; then
cat >> /etc/dnsmasq.conf <<EOF
srv-host=_etcd-server-ssl._tcp.${cluster_name}.${base_domain},etcd-1.${cluster_name}.${base_domain},2380,0,10
srv-host=_etcd-server-ssl._tcp.${cluster_name}.${base_domain},etcd-2.${cluster_name}.${base_domain},2380,0,10
EOF
fi
cat >> /etc/dnsmasq.conf <<EOF
no-hosts
addn-hosts=/etc/dnsmasq.openshift.addnhosts
conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig
EOF
cat > /etc/dnsmasq.openshift.addnhosts <<EOF
$host_rec
$local_ip dns.${cluster_name}.${base_domain}
$local_ip loadbalancer.${cluster_name}.${base_domain} api.${cluster_name}.${base_domain} lb.${cluster_name}.${base_domain}
$local_ip api-int.${cluster_name}.${base_domain}
$bstrap_ip bootstrap.${cluster_name}.${base_domain}
$master_0_ip master-0.${cluster_name}.${base_domain} etcd-0.${cluster_name}.${base_domain}
$master_1_ip master-1.${cluster_name}.${base_domain} etcd-1.${cluster_name}.${base_domain}
$master_2_ip master-2.${cluster_name}.${base_domain} etcd-2.${cluster_name}.${base_domain}
EOF
for i in $(seq 0 $seq_max); do
if [ -n "${workers_ip[$i]}" ]; then
cat >> /etc/dnsmasq.openshift.addnhosts <<EOF
${workers_ip[$i]} worker-$i.${cluster_name}.${base_domain}
EOF
fi
done
echo -e "nameserver 127.0.0.1\nsearch ${cluster_name}.${base_domain}" > /etc/resolv.conf
systemctl restart dnsmasq
systemctl enable dnsmasq
}
function setup_lb()
{
yum install -y haproxy
haconf=/etc/haproxy/haproxy.cfg
cp $haconf ${haconf}.bak
cat > $haconf <<EOF
global
log 127.0.0.1 local2 info
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
defaults
timeout connect 5s
timeout client 30s
timeout server 30s
log global
frontend kubernetes_api
bind 0.0.0.0:6443
default_backend kubernetes_api
frontend machine_config
bind 0.0.0.0:22623
default_backend machine_config
frontend router_https
bind 0.0.0.0:443
default_backend router_https
frontend router_http
mode http
option httplog
bind 0.0.0.0:80
default_backend router_http
backend kubernetes_api
balance roundrobin
option ssl-hello-chk
server bootstrap bootstrap.${cluster_name}.${base_domain}:6443 check
server master-0 master-0.${cluster_name}.${base_domain}:6443 check
server master-1 master-1.${cluster_name}.${base_domain}:6443 check
server master-2 master-2.${cluster_name}.${base_domain}:6443 check
backend machine_config
balance roundrobin
option ssl-hello-chk
server bootstrap bootstrap.${cluster_name}.${base_domain}:22623 check
server master-0 master-0.${cluster_name}.${base_domain}:22623 check
server master-1 master-1.${cluster_name}.${base_domain}:22623 check
server master-2 master-2.${cluster_name}.${base_domain}:22623 check
backend router_https
balance roundrobin
option ssl-hello-chk
EOF
for i in $(seq 0 $seq_max); do
if [ $i -lt $nworkers ]; then
cat >> $haconf <<EOF
server worker-$i worker-$i.${cluster_name}.${base_domain}:443 check
EOF
fi
done
cat >> $haconf <<EOF
backend router_http
mode http
balance roundrobin
EOF
for i in $(seq 0 $seq_max); do
if [ $i -lt $nworkers ]; then
cat >> $haconf <<EOF
server worker-$i worker-$i.${cluster_name}.${base_domain}:80 check
EOF
fi
done
systemctl restart haproxy
systemctl enable haproxy
}
function setup_nginx()
{
#yum install -y nginx
cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak
cat > /etc/nginx/nginx.conf <<EOF
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '\$remote_addr - \$remote_user [\$time_local] "\$request" '
'\$status \$body_bytes_sent "\$http_referer" '
'"\$http_user_agent" "\$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 8080 default_server;
listen [::]:8080 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
}
EOF
systemctl restart nginx
systemctl enable nginx
}
function download_pkgs()
{
cd /opt
conf_url=$endpoint/misc/openshift/ocd.conf
[ -n "$version" ] && conf_url=${conf_url}.${version}
[ -n "$virt_type" ] && conf_url=${conf_url}.${virt_type}
wget --no-check-certificate $conf_url -O ocd.conf
source ocd.conf
wget --no-check-certificate -O /usr/share/nginx/html/rhcos.raw.gz $coreos_image_url
wget --no-check-certificate -O /usr/share/nginx/html/rhcos-rootfs.img $coreos_rootfs_url
wget --no-check-certificate -O openshift-install-linux.tgz $openshift_installer
wget --no-check-certificate -O openshift-client-linux.tgz $openshift_client
tar -zxf openshift-install-linux.tgz
tar -zxf openshift-client-linux.tgz
cp kubectl oc /usr/bin/
}
function ignite_files()
{
echo "~~~~~~~~~start to ignite file~~~~~~~~~"
pwd
parts=$(cat | base64 -d | sed -s 's/\r//')
ssh_key=$(cat /home/$cloud_user/.ssh/authorized_keys | tail -1)
#rm -rf $cluster_name
#mkdir $cluster_name
mreplica=1
[ "$haflag" = "yes" ] && mreplica=3
cat > install-config.yaml <<EOF
apiVersion: v1
baseDomain: $base_domain
compute:
- hyperthreading: Enabled
name: worker
replicas: 0
architecture: amd64
controlPlane:
hyperthreading: Enabled
name: master
replicas: $mreplica
architecture: amd64
metadata:
name: $cluster_name
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
networkType: OpenShiftSDN
serviceNetwork:
- 172.30.0.0/16
platform:
none: {}
fips: false
sshKey: '$ssh_key'
$parts
EOF
sed -i "/^$/d" install-config.yaml
#sed -i "/^{}/d" install-config.yaml
echo "start to backup"
mkdir /opt/backup
cp install-config.yaml /opt/backup
cd /opt/$cluster_name
echo " create manifests now" >> /tmp/cloudland.log
../openshift-install create manifests
sed -i "s/mastersSchedulable: true/mastersSchedulable: false/" manifests/cluster-scheduler-02-config.yml
echo " manifests running completed " >> /tmp/cloudland.log
cp -rf ../$cluster_name /opt/backup
pwd
echo " starting to create ignition-configs file " >> /tmp/cloudland.log
../openshift-install create ignition-configs
echo " ignition-configs file completed" >> /tmp/cloudland.log
cp -rf ../$cluster_name /opt/backup
ignite_dir=/usr/share/nginx/html/ignition
rm -rf $ignite_dir
mkdir $ignite_dir
cp *.ign $ignite_dir
chmod a+r $ignite_dir/*
echo "copy ignition file completed " >> /tmp/cloudland.log
cat >>/root/.bashrc <<EOF
export KUBECONFIG=/opt/$cluster_name/auth/kubeconfig
export PS1='[\u@\h.$cluster_name \w]\\$ '
EOF
cat >>/home/$cloud_user/.bashrc <<EOF
export PS1='[\u@\h.$cluster_name \w]\\$ '
EOF
}
function setup_nfs_pv()
{
cd /opt/$cluster_name
mkdir data
chmod a+rw data
yum -y install nfs-utils nfs-utils-lib
systemctl start rpcbind
systemctl start nfs
systemctl start nfslock
systemctl enable rpcbind
systemctl enable nfs
systemctl enable nfslock
cat >/etc/exports <<EOF
/opt/$cluster_name/data 192.168.91.0/24(rw,sync,no_root_squash,no_subtree_check,insecure)
EOF
exportfs -a
cat >nfs-pv.yaml <<EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv
spec:
capacity:
storage: 100Gi
accessModes:
- ReadWriteMany
nfs:
path: /opt/$cluster_name/data
server: 192.168.91.8
persistentVolumeReclaimPolicy: Recycle
EOF
../oc create -f nfs-pv.yaml
../oc patch configs.imageregistry/cluster --type merge --patch '{"spec":{"storage":{"pvc":{"claim":""}}}}'
cd -
}
function create_storage()
{
cd /opt/$cluster_name
cat >storage.yaml <<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: gp2
provisioner: kubernetes.io/glusterfs
parameters:
resturl: 'http://192.168.91.199:8080'
reclaimPolicy: Delete
volumeBindingMode: Immediate
EOF
../oc create -f storage.yaml
cd -
}
function set_autocsr()
{
cat >/etc/cron.hourly/50autocsr <<EOF
#!/bin/bash
export KUBECONFIG=/opt/$cluster_name/auth/kubeconfig
/opt/oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs /opt/oc adm certificate approve
EOF
chmod +x /etc/cron.hourly/50autocsr
}
function launch_cluster()
{
if [ ! -d $cluster_name ];then
mkdir $cluster_name
fi
cd /opt/$cluster_name
bstrap_res=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=bootstrap.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
bstrap_interfaces=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=bootstrap.${cluster_name}.${base_domain} --cookie $cookie)
date > /tmp/cloudland.log
echo $bstrap_interfaces > /tmp/cloudland.log
bstrap_ID=$(jq -r .'instances[0].ID' <<< $bstrap_interfaces)
bstrap_ip=$(jq -r .'instances[0].Interfaces[0].Address.Address' <<< $bstrap_interfaces)
bstrap_ip=${bstrap_ip%/*}
echo " boostrapIP is $bstrap_ip" > /tmp/cloudland.log
echo "~~~~~~~++++~~~~~~"
curl -k -XPOST $endpoint/openshifts/$cluster_id/state --cookie $cookie --data "status=bootstrap"
sleep 3
curl -k -XPOST $endpoint/openshifts/$cluster_id/state --cookie $cookie --data "status=masters"
master_0_res=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=master-0.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
master_0_interfaces=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=master-0.${cluster_name}.${base_domain} --cookie $cookie)
master_0_ip=$(jq -r .'instances[0].Interfaces[0].Address.Address' <<< $master_0_interfaces)
master_0_ip=${master_0_ip%/*}
echo " master_0_ip is $master_0_ip" > /tmp/cloudland.log
sleep 5
if [ "$haflag" = "yes" ]; then
master_1_res=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=master-1.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
master_1_interfaces=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=master-1.${cluster_name}.${base_domain} --cookie $cookie)
master_1_ip=$(jq -r .'instances[0].Interfaces[0].Address.Address' <<< $master_1_interfaces)
master_1_ip=${master_1_ip%/*}
echo " master_1_ip is $master_1_ip" > /tmp/cloudland.log
sleep 5
master_2_res=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=master-2.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
master_2_interfaces=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=master-2.${cluster_name}.${base_domain} --cookie $cookie)
master_2_ip=$( jq -r .'instances[0].Interfaces[0].Address.Address' <<< $master_2_interfaces)
master_2_ip=${master_2_ip%/*}
echo " master_2_ip is $master_2_ip" > /tmp/cloudland.log
sleep 5
fi
# start worker
curl -k -XPOST $endpoint/openshifts/$cluster_id/state --cookie $cookie --data "status=workers"
workers_res[0]=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=worker-0.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
workers_ip[0]=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=worker-0.${cluster_name}.${base_domain} --cookie $cookie | jq -r .'instances[0].Interfaces[0].Address.Address')
workers_ip[0]=${workers_ip[0]%/*}
echo " worker_0_ip is $workers_ip[0]" > /tmp/cloudland.log
sleep 5
workers_res[1]=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=worker-1.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
workers_ip[1]=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=worker-1.${cluster_name}.${base_domain} --cookie $cookie | jq -r .'instances[0].Interfaces[0].Address.Address')
workers_ip[1]=${workers_ip[1]%/*}
echo "~~~~~~~~+++++++~~~~~~~~"
echo "worker_1_ip is $workers_ip[1]" > /tmp/cloudland.log
let more=$nworkers-2
for i in $(seq 1 $more); do
let index=$i+1
let last=$index+20
workers_res[$index]=$(curl -k -XPOST $endpoint/openshifts/$cluster_id/launch --cookie $cookie --data "hostname=worker-$index.${cluster_name}.${base_domain}&ipaddr=${local_ip}")
workers_ip[$index]=$(curl -k -s -H "X-Json-Format: yes" -XGET $endpoint/instances?q=worker-$index.${cluster_name}.${base_domain} --cookie $cookie | jq -r .'instances[0].Interfaces[0].Address.Address')
workers_ip[$index]=${workers_ip[$index]%/*}
echo " worker_($index)_ip is $workers_ip[$index]" > /tmp/cloudland.log
sleep 5
done
}
function wait_ocd()
{
while true; do
sleep 5
nc -zv $bstrap_ip 6443
[ $? -eq 0 ] && break
done
../openshift-install wait-for bootstrap-complete --log-level debug
echo "bootstrap-complete runnning completed " >> /tmp/cloudland.log
curl -k -XDELETE $endpoint/instances/$bstrap_ID --cookie $cookie
sleep 5
# delete bootstrap record in haproxy
haconf=/etc/haproxy/haproxy.cfg
systemctl stop haproxy
sed -i "/bootstrap/d" $haconf
systemctl start haproxy
nodes=3
[ "$haflag" = "yes" ] && nodes=5
export KUBECONFIG=auth/kubeconfig
while true; do
../oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs ../oc adm certificate approve
sleep 5
count=$(../oc get nodes | grep -c Ready)
[ "$count" -ge "$nodes" ] && break
done
sleep 60
while true; do
sleep 5
../oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs ../oc adm certificate approve
../oc get clusteroperators image-registry
[ $? -eq 0 ] && break
done
#setup_nfs_pv
../openshift-install wait-for install-complete
echo "install-complete" >> /tmp/cloudland.log
curl -k -XPOST $endpoint/openshifts/$cluster_id/state --cookie $cookie --data "status=complete"
curl -k -XDELETE $endpoint/instances/$bstrap_ID --cookie $cookie
let nodes=$nodes+$more
while true; do
sleep 5
../oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs ../oc adm certificate approve
count=$(../oc get nodes | grep -c Ready)
[ "$count" -ge "$nodes" ] && break
done
set_autocsr
# create_storage
}
setenforce Permissive
curl -I -k $endpoint/misc/openshift/ocd_lb_yum.repo --cookie $cookie | grep '404 Not Found'
[ $? -ne 0 ] && curl -k $endpoint/misc/openshift/ocd_lb_yum.repo -o /etc/yum.repos.d/oc.repo
sed -i 's/^SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
[ $(uname -m) != s390x ] && yum -y install epel-release
[ "$(uname -m)" = "s390x" ] && yum -y install rng-tools && systemctl start rngd
local_ip=$(ip addr | grep "inet .*brd" | head -1 | awk '{print $2}' | cut -d'/' -f1)
systemctl stop firewalld
systemctl disable firewalld
systemctl mask firewalld
yum -y install wget jq nc nginx
download_pkgs
launch_cluster
setup_dns
setup_lb
setup_nginx
ignite_files
wait_ocd
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2011:0920
#
# Security announcement date: 2011-07-05 18:20:06 UTC
# Script generation date: 2017-01-01 21:13:11 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - krb5-appl-clients.x86_64:1.0.1-2.el6_1.1
# - krb5-appl-debuginfo.x86_64:1.0.1-2.el6_1.1
# - krb5-appl-servers.x86_64:1.0.1-2.el6_1.1
#
# Last versions recommanded by security team:
# - krb5-appl-clients.x86_64:1.0.1-2.el6_1.3
# - krb5-appl-debuginfo.x86_64:1.0.1-2.el6_1.3
# - krb5-appl-servers.x86_64:1.0.1-2.el6_1.3
#
# CVE List:
# - CVE-2011-1526
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install krb5-appl-clients.x86_64-1.0.1 -y
sudo yum install krb5-appl-debuginfo.x86_64-1.0.1 -y
sudo yum install krb5-appl-servers.x86_64-1.0.1 -y
|
#!/bin/bash
#MONGO_SERVERS=172.19.57.79:27017,172.19.57.66:27017,172.19.57.73:27017
servers=${MONGO_SERVERS//,/ }
master_host=127.0.0.1
master_port=27017
server_count=0
export PATH=$PATH:./
function cluster_init {
sleep 10
mongo --eval "printjson(rs.initiate())"
for server in $servers; do
mongo --eval "printjson(rs.add('${server}'))"
sleep 5
done
sleep 10
find_master
}
function cluster_reconfig {
server_update_count=$server_count
server_add_count=0
server_del_count=0
server_cur_count=$(mongo --host ${master_host} --port ${master_port} --eval "printjson(rs.config());" | grep host | wc -l )
if [ $server_count -eq $server_cur_count ]; then
echo "eq: $server_count"
fi
if [ $server_count -lt $server_cur_count ]; then
((server_del_count=$server_cur_count - $server_count))
echo "lt: new=$server_count, old=$server_cur_count"
fi
if [ $server_count -gt $server_cur_count ]; then
server_update_count=$server_cur_count
((server_add_count=$server_count - $server_cur_count))
echo "gt: new=$server_count, old=$server_cur_count"
fi
config_script="var config=rs.config();";
config_add_script="";
update_cursor=0
for server in $servers; do
if [ $update_cursor -ge $server_update_count ]; then
config_add_script="$config_add_script rs.add(\"${server}\");"
else
config_script="$config_script config.members[$update_cursor].host = \"$server\";"
fi
((update_cursor=$update_cursor + 1))
done
if [ $server_del_count -gt 0 ]; then
((cursor=$server_cur_count - $server_del_count))
config_script="$config_script config.members.splice($cursor, $server_del_count);"
fi
config_script="$config_script rs.reconfig(config, {\"force\":true});"
config_script="$config_script $config_add_script"
#config=$(mongo --host ${master_host} --port ${master_port} --eval "printjson(rs.config());")
#echo "$config_script"
mongo --host ${master_host} --port ${master_port} --eval "$config_script"
sleep 10
find_master
}
function collection_init {
mongo=( mongo --host ${master_host} --port ${master_port} --quiet )
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.js) echo "$0: running $f"; "${mongo[@]}" "$f"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
}
function find_master {
for server in $servers; do
server=${server//:/ }
server=($server)
IS_MASTER=$(mongo --host ${server[0]} --port ${server[1]} --eval "printjson(db.isMaster())" | grep 'ismaster')
if echo $IS_MASTER | grep "true"; then
master_host=${server[0]}
master_port=${server[1]}
export master_host
export master_port
return 0
fi
done
return 1
}
tries=300
while true; do
success="true"
server_count=0
for server in $servers; do
server=${server//:/ }
server=($server)
mongo=( mongo --host ${server[0]} --port ${server[1]} --quiet )
if "${mongo[@]}" 'admin' --eval 'quit(0)' &> /dev/null; then
echo "${server[0]}:${server[1]} connected!"
else
echo "${server[0]}:${server[1]} connect fail!"
success="false"
fi
((server_count=$server_count+1))
done
if [ "x$success" = "xtrue" ]; then
break
fi
(( tries-- ))
if [ "$tries" -le 0 ]; then
echo >&2
echo >&2 "error: $originalArgOne does not appear to have accepted connections quickly enough -- perhaps it had an error?"
echo >&2
exit 1
fi
sleep 2
done
echo "find server count: ${server_count}"
echo 'wait 5s, then find master...'
sleep 3
find_master
if [ $? -eq 0 ]; then
echo "Find Master: ${master_host}:${master_port}"
cluster_reconfig
else
echo 'Initiating the cluster!'
cluster_init
cluster_reconfig
fi
sleep 2
find_master
collection_init |
module.exports = {
getReadingTime(text) {
const wordsPerMinute = 180;
const numberOfWords = text.split(/\s/g).length;
return Math.ceil(numberOfWords / wordsPerMinute);
},
formatReadingTime(text) {
const wordsPerMinute = 180;
const numberOfWords = text.split(/\s/g).length;
const readingTime = Math.ceil(numberOfWords / wordsPerMinute);
return `${new Array( Math.round(readingTime / 5) || 1).fill('🧁').join('')}`;
},
getReadableDate(dateStr) {
const dateObj = new Date(dateStr);
return dateObj.toGMTString().slice(5,16);
},
getHtmlDate(dateStr) {
const dateObj = new Date(dateStr);
return `${dateObj.getUTCFullYear()}-${dateObj.getUTCMonth()}-${dateObj.getUTCDate()}`;
}
};
|
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(feature_x, labels) |
<gh_stars>10-100
package ru.ispras.pu4spark
import org.apache.logging.log4j.LogManager
import org.apache.spark.ml.classification.{ProbabilisticClassificationModel, ProbabilisticClassifier}
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
/**
* Modified Positive-Unlabeled learning algorithm; main idea is to gradually refine set of positive examples<br>
*
* Pseudocode was taken from:
* <NAME>., <NAME>., <NAME>., & <NAME>. (2015).
* Detecting positive and negative deceptive opinions using PU-learning.
* Information Processing & Management, 51(4), 433-443.
*
* @author <NAME> (<EMAIL>)
*/
class GradualReductionPULearner[
E <: ProbabilisticClassifier[Vector, E, M],
M <: ProbabilisticClassificationModel[Vector, M]](
relNegThreshold: Double,
classifier: ProbabilisticClassifier[Vector, E, M]) extends TwoStepPULearner[E,M](classifier) {
val log = LogManager.getLogger(getClass)
override def weight(df: DataFrame, labelColumnName: String, featuresColumnName: String, finalLabel: String): DataFrame = {
val oneStepPUDF: DataFrame = zeroStep(df, labelColumnName, featuresColumnName, finalLabel)
.drop("probability").drop("prediction").drop("rawPrediction").drop(ProbabilisticClassifierConfig.labelName)
val prevLabel = "prevLabel"
val curLabel = "curLabel"
var curDF = replaceZerosByUndefLabel(oneStepPUDF, labelColumnName, prevLabel, GradualReductionPULearner.undefLabel)
val confAdder = new GradRelNegConfidenceThresholdAdder(relNegThreshold, GradualReductionPULearner.undefLabel)
//replace weights by binary column for further learning (induce labels for curLabDF)
val curLabelColumn = confAdder.binarizeUDF(curDF(finalLabel), curDF(prevLabel))
curDF = curDF.withColumn(curLabel, curLabelColumn).cache()
var newRelNegCount = curDF
//unlabeled in previous iterations && negative in current iteration
.filter(curDF(prevLabel) === GradualReductionPULearner.undefLabel && curDF(curLabel) === GradualReductionPULearner.relNegLabel)
.count()
log.debug(s"newRelNegCount: $newRelNegCount")
var prevNewRelNegCount = newRelNegCount
val totalPosCount = curDF.filter(curDF(curLabel) === GradualReductionPULearner.posLabel).count()
var totalRelNegCount = curDF.filter(curDF(curLabel) === GradualReductionPULearner.relNegLabel).count()
var prevGain = Long.MaxValue
var curGain = newRelNegCount
do {
//learn new classifier
val curLabDF = curDF.filter(curDF(curLabel) !== GradualReductionPULearner.undefLabel)
val newPreparedDf = indexLabelColumn(curLabDF, curLabel, ProbabilisticClassifierConfig.labelName,
Seq(GradualReductionPULearner.relNegLabel.toString, GradualReductionPULearner.posLabel.toString))
val model = classifier.fit(newPreparedDf)
//apply classifier to all data (however, we are interested in ReliableNegatives data only, see confAdder)
val labUnlabDF = model.transform(curDF)
curDF = labUnlabDF.withColumn(finalLabel, getPOne(labUnlabDF("probability")))
.drop("probability").drop("prediction").drop("rawPrediction").drop(ProbabilisticClassifierConfig.labelName)
curDF = curDF.drop(prevLabel)
.withColumnRenamed(curLabel, prevLabel)
val innerConfAdder = new GradRelNegConfidenceThresholdAdder(relNegThreshold, GradualReductionPULearner.relNegLabel)
val curLabelColumn = innerConfAdder.binarizeUDF(curDF(finalLabel), curDF(prevLabel))
curDF = curDF.withColumn(curLabel, curLabelColumn).cache()
prevNewRelNegCount = newRelNegCount
newRelNegCount = curDF
//negative in current iteration
.filter(curDF(curLabel) === GradualReductionPULearner.relNegLabel)
.count()
totalRelNegCount = curDF.filter(curDF(curLabel) === GradualReductionPULearner.relNegLabel).count()
prevGain = curGain
curGain = prevNewRelNegCount - totalRelNegCount
log.debug(s"newRelNegCount: $newRelNegCount, prevNewRelNegCount: $prevNewRelNegCount, totalRelNegCount: $totalRelNegCount")
log.debug(s"curGain: $curGain, prevGain: $prevGain")
} while (curGain > 0 && curGain < prevGain && totalPosCount < totalRelNegCount)
curDF
}
}
private class GradRelNegConfidenceThresholdAdder(threshold: Double, labelToConsider: Int) extends Serializable {
def binarize(probPred: Double, prevLabel: Int): Int = if (prevLabel == labelToConsider) {
if (probPred < threshold) {
GradualReductionPULearner.relNegLabel
} else {
GradualReductionPULearner.undefLabel
}
} else {
prevLabel // keep as it was //(1 or -1 in case of unlabeled classification)
}
val binarizeUDF = udf(binarize(_: Double, _: Int))
}
object GradualReductionPULearner {
val relNegLabel = 0
val posLabel = 1
val undefLabel = -1
}
case class GradualReductionPULearnerConfig(relNegThreshold: Double = 0.5,
classifierConfig: ProbabilisticClassifierConfig) extends PositiveUnlabeledLearnerConfig {
override def build(): PositiveUnlabeledLearner = {
classifierConfig match {
case lrc: LogisticRegressionConfig => new GradualReductionPULearner(relNegThreshold, lrc.build())
case rfc: RandomForestConfig => new GradualReductionPULearner(relNegThreshold, rfc.build())
}
}
}
|
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Returns the total number of Linux and Windows nodes in the cluster.
#
# Vars assumed:
# NUM_NODES
# NUM_WINDOWS_NODES
function get-num-nodes {
echo "$((NUM_NODES + NUM_WINDOWS_NODES))"
}
# Vars assumed:
# NUM_NODES
# NUM_WINDOWS_NODES
function get-master-size {
local suggested_master_size=1
if [[ "$(get-num-nodes)" -gt "5" ]]; then
suggested_master_size=2
fi
if [[ "$(get-num-nodes)" -gt "10" ]]; then
suggested_master_size=4
fi
if [[ "$(get-num-nodes)" -gt "100" ]]; then
suggested_master_size=8
fi
if [[ "$(get-num-nodes)" -gt "250" ]]; then
suggested_master_size=16
fi
if [[ "$(get-num-nodes)" -gt "500" ]]; then
suggested_master_size=32
fi
if [[ "$(get-num-nodes)" -gt "3000" ]]; then
suggested_master_size=64
fi
echo "${suggested_master_size}"
}
# Vars assumed:
# NUM_NODES
# NUM_WINDOWS_NODES
function get-master-root-disk-size() {
local suggested_master_root_disk_size="20GB"
if [[ "$(get-num-nodes)" -gt "500" ]]; then
suggested_master_root_disk_size="100GB"
fi
if [[ "$(get-num-nodes)" -gt "3000" ]]; then
suggested_master_root_disk_size="500GB"
fi
echo "${suggested_master_root_disk_size}"
}
# Vars assumed:
# NUM_NODES
# NUM_WINDOWS_NODES
function get-master-disk-size() {
local suggested_master_disk_size="20GB"
if [[ "$(get-num-nodes)" -gt "500" ]]; then
suggested_master_disk_size="100GB"
fi
if [[ "$(get-num-nodes)" -gt "3000" ]]; then
suggested_master_disk_size="200GB"
fi
echo "${suggested_master_disk_size}"
}
function get-node-ip-range {
if [[ -n "${NODE_IP_RANGE:-}" ]]; then
>&2 echo "Using user provided NODE_IP_RANGE: ${NODE_IP_RANGE}"
echo "${NODE_IP_RANGE}"
return
fi
local suggested_range="10.40.0.0/22"
if [[ "$(get-num-nodes)" -gt 1000 ]]; then
suggested_range="10.40.0.0/21"
fi
if [[ "$(get-num-nodes)" -gt 2000 ]]; then
suggested_range="10.40.0.0/20"
fi
if [[ "$(get-num-nodes)" -gt 4000 ]]; then
suggested_range="10.40.0.0/19"
fi
echo "${suggested_range}"
}
function get-cluster-ip-range {
local suggested_range="10.64.0.0/14"
if [[ "$(get-num-nodes)" -gt 1000 ]]; then
suggested_range="10.64.0.0/13"
fi
if [[ "$(get-num-nodes)" -gt 2000 ]]; then
suggested_range="10.64.0.0/12"
fi
if [[ "$(get-num-nodes)" -gt 4000 ]]; then
suggested_range="10.64.0.0/11"
fi
echo "${suggested_range}"
}
# Calculate ip alias range based on max number of pods.
# Let pow be the smallest integer which is bigger or equal to log2($1 * 2).
# (32 - pow) will be returned.
#
# $1: The number of max pods limitation.
function get-alias-range-size() {
for pow in {0..31}; do
if (( 1 << pow >= $1 * 2 )); then
echo $((32 - pow))
return 0
fi
done
}
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
# in order to initialize properly.
NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
# Below exported vars are used in cluster/gce/util.sh (or maybe somewhere else),
# please remove those vars when not needed any more.
# Root directory for Kubernetes files on Windows nodes.
WINDOWS_K8S_DIR="C:\etc\kubernetes"
# Directory where Kubernetes binaries will be installed on Windows nodes.
export WINDOWS_NODE_DIR="${WINDOWS_K8S_DIR}\node\bin"
# Directory where Kubernetes log files will be stored on Windows nodes.
export WINDOWS_LOGS_DIR="${WINDOWS_K8S_DIR}\logs"
# Directory where CNI binaries will be stored on Windows nodes.
export WINDOWS_CNI_DIR="${WINDOWS_K8S_DIR}\cni"
# Directory where CNI config files will be stored on Windows nodes.
export WINDOWS_CNI_CONFIG_DIR="${WINDOWS_K8S_DIR}\cni\config"
# CNI storage path for Windows nodes
export WINDOWS_CNI_STORAGE_PATH="https://storage.googleapis.com/k8s-artifacts-cni/release"
# CNI version for Windows nodes
export WINDOWS_CNI_VERSION="v0.8.5"
# Pod manifests directory for Windows nodes on Windows nodes.
export WINDOWS_MANIFESTS_DIR="${WINDOWS_K8S_DIR}\manifests"
# Directory where cert/key files will be stores on Windows nodes.
export WINDOWS_PKI_DIR="${WINDOWS_K8S_DIR}\pki"
# Location of the certificates file on Windows nodes.
export WINDOWS_CA_FILE="${WINDOWS_PKI_DIR}\ca-certificates.crt"
# Path for kubelet config file on Windows nodes.
export WINDOWS_KUBELET_CONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet-config.yaml"
# Path for kubeconfig file on Windows nodes.
export WINDOWS_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet.kubeconfig"
# Path for bootstrap kubeconfig file on Windows nodes.
export WINDOWS_BOOTSTRAP_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubelet.bootstrap-kubeconfig"
# Path for kube-proxy kubeconfig file on Windows nodes.
export WINDOWS_KUBEPROXY_KUBECONFIG_FILE="${WINDOWS_K8S_DIR}\kubeproxy.kubeconfig"
# Pause container image for Windows container.
export WINDOWS_INFRA_CONTAINER="gcr.io/gke-release/pause-win:1.2.1"
|
import axios from 'axios'
export const state = () => ({
daily: [],
weekly: [],
monthly: [],
repositories: [],
repository: null,
isOpening: true,
isLoading: false,
isOpen: false,
type: 'daily',
types: [
'daily',
'weekly',
'monthly'
],
})
export const mutations = {
SET_IS_OPEN (state, flag) {
state.isOpen = flag
},
SET_IS_OPENING (state, flag) {
state.isOpening = flag
},
SET_IS_LOADING (state, flag) {
state.isLoading = flag
},
SET_DAILY (state, repositories) {
state.daily = repositories
},
SET_WEEKLY (state, repositories) {
state.weekly = repositories
},
SET_MONTHLY (state, repositories) {
state.monthly = repositories
},
SET_REPOSITORY (state, repository) {
state.repository = repository
},
SET_TYPE (state, type) {
state.type = type
},
PUSH_REPOSITORY (state, repository) {
state.repositories.push(repository)
},
}
export const actions = {
async fetchDaily ({commit}) {
const res = await axios.get('https://github-trending-api.now.sh/repositories?language=javascript&since=daily')
commit('SET_DAILY', res.data)
},
async fetchWeekly ({commit}) {
const res = await axios.get('https://github-trending-api.now.sh/repositories?language=javascript&since=weekly')
commit('SET_WEEKLY', res.data)
},
async fetchMonthly ({commit}) {
const res = await axios.get('https://github-trending-api.now.sh/repositories?language=javascript&since=monthly')
commit('SET_MONTHLY', res.data)
},
async fetchRepository ({commit}, {author, name}) {
const res = await axios.get(`https://api.github.com/repos/${author}/${name}`)
return res.data
},
async fetchReadMe ({commit}, {author, name}){
const res = await axios.get(`https://api.github.com/repos/${author}/${name}/readme`)
return res.data
}
} |
def top_three(lst):
top1 = top2 = top3 = float('-inf')
for num in lst:
if num > top1:
top3 = top2
top2 = top1
top1 = num
elif num > top2:
top3 = top2
top2 = num
elif num >top3:
top3 = num
return top1, top2, top3 |
export BLUE_DISK=`kubectl get deployment nginx-blue -o json | jq -r '.spec.template.spec.volumes | .[] | select( .name | contains("site-source")) | .gcePersistentDisk.pdName'`
export GREEN_DISK=`kubectl get deployment nginx-green -o json | jq -r '.spec.template.spec.volumes | .[] | select( .name | contains("site-source")) | .gcePersistentDisk.pdName'`
export RED_DISK=`kubectl get deployment nginx-red -o json | jq -r '.spec.template.spec.volumes | .[] | select( .name | contains("site-source")) | .gcePersistentDisk.pdName'`
if [ $BLUE_DISK != $GREEN_DISK ]
then
echo "Blue and green aren't working from the same disk. Make that the case, then try again"
exit 99
fi
# Make sure red is using the disk blue and green are using. This means that the other disk will be unused.
kubectl patch deployment nginx-red --patch '{"spec": {"template": {"spec": { "volumes": [{"name":"site-source", "gcePersistentDisk": {"fsType":"ext4", "pdName": "'$BLUE_DISK'", "readOnly": true}}]}}}}'
# Wait until only one nginx-red pod
NPODS=`kubectl get pods | grep nginx-red | wc -l`
while [ $NPODS -ne 1 ]
do
echo "Waiting for nginx-red pods to sort themselves out..."
sleep 5
NPODS=`kubectl get pods | grep nginx-red | wc -l`
done
if [ $BLUE_DISK = "site-source" ]
then
NEW_DISK="site-source-2"
elif [ $BLUE_DISK = "site-source-2" ]
then
NEW_DISK="site-source"
else
echo "Unrecognised old disk: $OLD_DISK"
exit 99
fi
until gcloud compute instances attach-disk gke-colony-default-pool-7f86a12c-hkwv --disk $NEW_DISK --device-name $NEW_DISK
do
echo "Try attach again"
done
gcloud compute ssh --command="mkdir ~/$NEW_DISK" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="mkdir ~/site" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo mount /dev/disk/by-id/google-$NEW_DISK ~/$NEW_DISK" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo rm -r ~/site/*" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo rm -r ~/$NEW_DISK/*" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute scp --recurse ./public/* gke-colony-default-pool-7f86a12c-hkwv:~/site/
gcloud compute ssh --command="sudo mv ~/site/* ~/$NEW_DISK/" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo chown -R root ~/$NEW_DISK/*" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo chgrp -R root ~/$NEW_DISK/*" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo chmod -R -w ~/$NEW_DISK/*" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute ssh --command="sudo umount ~/$NEW_DISK/" gke-colony-default-pool-7f86a12c-hkwv
gcloud compute instances detach-disk gke-colony-default-pool-7f86a12c-hkwv --disk $NEW_DISK
kubectl patch deployment nginx-red --patch '{"spec": {"template": {"spec": { "volumes": [{"name":"site-source", "gcePersistentDisk": {"fsType":"ext4", "pdName": "'$NEW_DISK'", "readOnly": true}}]}}}}'
# Should also patch nginx-blue and nginx-green.... but not yet while we're testing! |
#!/bin/bash
testsCounter=0
passedTestsCounter=0
echo -e "\nRunning tests..."
for test in $2/*.key
do
echo -e "=========================================="
echo "test: $test"
./$1/dcl $(cat $test) < ${test%key}a > out
echo $? > code.exit
if diff code.exit ${test%key}exit >/dev/null 2>&1
then
echo -e "\nexit code OK"
((passedTestsCounter++))
else
echo -e "\nexit code WA"
fi
((testsCounter++))
done
echo -e "=========================================="
echo -e "=========================================="
echo "Passed $passedTestsCounter / $testsCounter my tests"
echo -e "=========================================="
echo -e "=========================================="
echo -e "Python tests:\n"
echo -e "=========================================="
echo -e "n = 100 000, l = 30"
python3 $1/gen.py -n 1000 -l 30 imm ./$1/dcl ./$1/ref
echo -e "=========================================="
echo -e "n = 1 000, l = 1 000"
python3 $1/gen.py -n 1000 -l 1000 imm ./$1/dcl ./$1/ref
echo -e "=========================================="
echo -e "n = 10, l = 1 000 000"
python3 $1/gen.py -n 10 -l 1000000 imm ./$1/dcl ./$1/ref
|
package arouter.dawn.zju.edu.module_forum.config;
/**
* @Auther: Dawn
* @Date: 2018/11/22 22:01
* @Description:
*/
public interface EventBusCode {
int FORUM_LIST_SCROLL_DOWN = 0;
int FORUM_LIST_SCROLL_UP = 1;
}
|
def palindrome(str):
for i in range(0, len(str)//2):
if str[i] != str[len(str)-i-1]:
return False
return True
# main
string = "madam"
ans = palindrome(string)
if (ans):
print("Yes")
else:
print("No") |
def binomial_coefficient(n, k):
if k == 0 or k == n :
return 1
return binomial_coefficient(n - 1, k - 1) + binomial_coefficient(n - 1, k)
print("The value of C(5, 3) is", binomial_coefficient(5, 3)) |
<filename>server/routes/profile.js
const express = require('express')
const getProfileController = require('../controllers/profile/getProfileController')
const getProfileImageController = require('../controllers/profile/getProfileImageController')
const checkHandleTakenController = require('../controllers/profile/checkHandleTakenController')
const updateHandleController = require('../controllers/profile/updateHandleController')
const updateProfileController = require('../controllers/profile/updateProfileController')
const router = express.Router()
router.get('/profile/:id', getProfileController)
router.get('/profileImage', getProfileImageController)
router.get('/checkHandleTaken', checkHandleTakenController)
router.patch('/updateHandle', updateHandleController)
router.patch('/updateProfile', updateProfileController)
module.exports = router
|
<filename>old/fbotp3.py
#!/usr/bin/python
#DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
#Copyright (C) 2004 <NAME> <<EMAIL>>
#Everyone is permitted to copy and distribute verbatim or modified
#copies of this license document, and changing it is allowed as long
#as the name is changed.
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
#1. You just DO WHAT THE FUCK YOU WANT TO.
#
# ///\\\ ( Have Fun )
# / ^ ^ \ /
# __\ __ /__
# / _ `----' _ \
# \__\ _ |__\
# (..) _| _ (..)
# |____(___| Mynameisv
#_ __ _ (____)____) _ _________________________________ _'
#
##
###
###########################################################
# Python Imports
#############################
from naiveotp3 import ObjNaiveOtp
import base64
import sys
import time
#
##
###
###########################################################
# Functions
#############################
def printHelp():
print('')
print('Facebook OTP Generator v0.1')
print('')
print('Dirty command line with the secret in clear as a parameter O_o')
print(' # python fbotp.py <base32 encoded secret key>')
print('')
print('Example:')
print(' # python fbotp.py ABCDEFGHIJKLMNOP')
print('')
sys.exit()
#end - printHelp
#
##
###
###########################################################
# Main
#############################
def main():
if len(sys.argv)!=2:
printHelp()
else:
sKeyBase32 = sys.argv[1].strip()
#Check that the key is Base32 encoded
try:
sKeyBin = base64.b32decode(sKeyBase32)
except:
print(" [!] Argument is not Base32")
printHelp()
# Generate the OTP
oOtp = ObjNaiveOtp()
sKeyHex = oOtp.convertFacebookKeyToHex(sKeyBase32)
oOtp.setKey(sKeyHex)
oOtp.setAlgo('sha1')
oOtp.setOtpLen(6)
oOtp.setOtpValidity(30)
oOtp.doTimeCurrent()
oOtp.doTimeRangeFloor()
print(str(oOtp.genOtp()))
print('Sleep 10')
time.sleep(10)
#end - main
if __name__ == '__main__':
main()
else:
main() |
<reponame>danielmlc/blogServer
import { Module } from '@nestjs/common';
import { TypeOrmModule } from '@nestjs/typeorm';
import { JwtModule } from '@nestjs/jwt';
import { PassportModule } from '@nestjs/passport';
import { Article } from './article.entity';
import { ArticleService } from './article.service';
import { ArticleController } from './article.controller';
@Module({
imports: [
TypeOrmModule.forFeature([Article]),
PassportModule.register({ defaultStrategy: 'jwt' }),
JwtModule.register({
secretOrPrivateKey: 'secretKey',
signOptions: {
expiresIn: 3600,
},
}),
],
controllers: [ArticleController],
providers: [ArticleService],
})
export class ArticleModule {} |
#!/usr/bin/python
from crawler import Crawler
if __name__ == '__main__':
# Solidity crawler
my_crawler = Crawler("ethereum")
my_crawler.crawl() |
<filename>TMDbISEL/app/src/main/java/pt/isel/pdm/li51n/g4/tmdbisel/data/provider/MovieInfoProvider.java
package pt.isel.pdm.li51n.g4.tmdbisel.data.provider;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import java.io.IOException;
import java.util.List;
import pt.isel.pdm.li51n.g4.tmdbisel.data.models.TMDB.Movie;
import pt.isel.pdm.li51n.g4.tmdbisel.data.models.TMDB.Review;
import pt.isel.pdm.li51n.g4.tmdbisel.data.provider.TMDb.TMDbAPIConnectionException;
import pt.isel.pdm.li51n.g4.tmdbisel.data.provider.TMDb.TMDbAPIRateLimitException;
import pt.isel.pdm.li51n.g4.tmdbisel.workers.DownloadThread;
public interface MovieInfoProvider {
/**
* Asynchonous method to get the movie list from the selected type.
*
* @param type Movie List Type
* @param language Movie List language
* @param query Text to search
* @param completionCallback The callback to be executed once the operation is completed, either successfully or in failure.
*/
void getMovieListInfoAsync(@NonNull String type, @NonNull String language, @Nullable String query, @NonNull DownloadThread.Callback completionCallback);
/**
* Method to get the movie list from the selected type.
*
* @param type Movie List Type
* @param language Movie List language
* @param query Text to search
* @param pageNumber
* @return Ther resulting Movie List
* @throws TMDbAPIRateLimitException, IOException If an error occurred while fetching the Movie List information.
*/
List<Movie> getMovieListInfo(@NonNull String type, @NonNull String language, @Nullable String query, int pageNumber) throws TMDbAPIRateLimitException, IOException;
/**
* Asynchonous method to get the movie info from the given id.
*
* @param id The movie id
* @param language Movie language
* @param completionCallback The callback to be executed once the operation is completed, either successfully or in failure.
*/
void getMovieInfoAsync(@NonNull String id, @NonNull String language, @NonNull DownloadThread.Callback completionCallback);
/**
* Method to get the movie info from the given id.
*
* @param id The movie id.
* @param language Movie language
* @return The resulting Movie information
* @throws TMDbAPIRateLimitException If API Limit was reached
* @throws IOException If an error occurred while fetching the Movie List information.
*/
Movie getMovieInfo(@NonNull String id, @NonNull String language) throws TMDbAPIRateLimitException, IOException, TMDbAPIConnectionException;
/**
* Method to get the Movie Reviews
*
* @param id The movie id.
* @param language Movie language
* @return The Movie Reviews
*/
List<Review> getReviewsByMovieId(@NonNull String id, @NonNull String language) throws IOException, TMDbAPIRateLimitException, TMDbAPIConnectionException;
}
|
<reponame>VGLoic/permissioning-smart-contracts<filename>app/src/components/AppBar/AppBar.js
// Libs
import React from "react";
// Rimble Components
import { Flex, Heading } from "rimble-ui";
// Styles
import styles from "./styles.module.scss";
const AppBar = () => (
<Flex
position="sticky"
top="0"
height="100px"
alignItems="center"
justifyContent="space-between"
bg="white"
className={styles.appBar}
pl={4}
pr={5}
width="100%"
zIndex="2"
>
<Heading.h3>Pantheon Node Permissioning</Heading.h3>
</Flex>
);
export default AppBar;
|
<filename>app/src/main/java/com/likewater/articleone/MainActivity.java
package com.likewater.articleone;
import android.content.Intent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.graphics.Typeface;
import android.widget.Toast;
import butterknife.Bind;
import butterknife.ButterKnife;
public class MainActivity extends AppCompatActivity implements View.OnClickListener{
@Bind(R.id.findRepsButton) Button mFindRepsButton;
@Bind(R.id.locationEditText) EditText mLocationEditText;
@Bind(R.id.articleOneTextView) TextView mArticleOneTextView;
@Bind(R.id.articleOneTextView2) TextView mArticleOneTextView2;
@Bind(R.id.findAboutPageButton) Button mFindAboutPageButton;
//private String[] numbers = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
ButterKnife.bind(this);
Typeface openSans = Typeface.createFromAsset(getAssets(),
"fonts/opensans-regular.ttf");
mArticleOneTextView.setTypeface(openSans);
mArticleOneTextView2.setTypeface(openSans);
mFindRepsButton.setOnClickListener(this);
mFindAboutPageButton.setOnClickListener(this);
}
@Override
public void onClick(View v){
if(v == mFindRepsButton) {
String location = mLocationEditText.getText().toString();
char[] userInputNumbers = location.toCharArray();
int stringLength = location.length();
if (stringLength == 5) {
for(int i = 0; i <= 4; i++){
int counter = i;
if (userInputNumbers[i] >= '0' && userInputNumbers[i] <= '9' ) {
//if(Character.isDigit(userInputNumbers[i]) && counter == 4){
Intent intent = new Intent(MainActivity.this, RepListActivity.class);
intent.putExtra("location", location);
startActivity(intent);
} else {
Toast.makeText(MainActivity.this, "Please Enter Only Numbers", Toast.LENGTH_LONG).show();
}
}
} else {
Toast.makeText(MainActivity.this, "Please Enter A Five Digit Zip Code", Toast.LENGTH_LONG).show();
}
}
if(v == mFindAboutPageButton){
Intent intent = new Intent(MainActivity.this, AboutActivity.class);
startActivity(intent);
}
}
}
|
# setAcl.sh <feedName>(/<entryName>)* ({c|r|u|d}{+|-}<principal>,)+
#
# e.g.: setAcl.sh Cafe r-one@example.com,crud+admin@example.com
# e.g.: setAcl.sh Cafe/deli r+one@example.com,crud-admin@example.com
source ./setupEnv.sh
java -cp $FSCT_CLASSPATH com.google.feedserver.tools.FeedServerClientAclTool -op setAcl -url $FSCT_FEED_BASE/acl -username $FSCT_USER_NAME -serviceName $SERVICE_NAME -authnURLProtocol $AUTHN_URL_PROTOCOL -authnURL $AUTHN_URL -resource $1 -acl $2
|
./node_modules/.bin/tsc || exit 1
./node_modules/.bin/tsc -p sdk-tools/ts2hc/tsconfig.json || exit 1
node scripts/buildLua52SdkTool || exit 1
node scripts/buildDartSdkTools "$@" || exit 1
node scripts/buildTsSdkTools || exit 1 |
#!/usr/bin/env bash
set -e
DIR=$(dirname "${BASH_SOURCE[0]}")
cd "$DIR"
cd "../.."
VALID_ARCHIVES="data/valid_archives.zst"
INVALID_ARCHIVES="data/invalid_archives.zst"
VOLATILE_ARCHIVES="data/volatile_archives.zst"
FILES=(
"$VALID_ARCHIVES"
"$INVALID_ARCHIVES"
"$VOLATILE_ARCHIVES"
)
for file in "${FILES[@]}"; do
echo -n "" | zstd -c > "$file"
done
|
<filename>node_modules/react-icons-kit/ionicons/androidColorPalette.js
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.androidColorPalette = void 0;
var androidColorPalette = {
"viewBox": "0 0 512 512",
"children": [{
"name": "g",
"attribs": {
"id": "Icon_12_"
},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "path",
"attribs": {
"d": "M256,64C150.401,64,64,150.401,64,256c0,105.604,86.401,192,192,192c18.136,0,32-13.864,32-32\r\n\t\t\tc0-8.531-3.198-16-8.531-21.333c-5.333-5.334-8.531-12.803-8.531-21.334c0-18.135,13.864-32,32-32h38.396\r\n\t\t\tc58.667,0,106.667-48,106.667-106.666C448,140.802,361.604,64,256,64z M138.667,256c-18.136,0-32-13.864-32-32s13.864-32,32-32\r\n\t\t\tc18.135,0,32,13.864,32,32S156.802,256,138.667,256z M202.667,170.667c-18.136,0-32-13.865-32-32c0-18.136,13.864-32,32-32\r\n\t\t\tc18.135,0,32,13.864,32,32C234.667,156.802,220.802,170.667,202.667,170.667z M309.333,170.667c-18.135,0-32-13.865-32-32\r\n\t\t\tc0-18.136,13.865-32,32-32c18.136,0,32,13.864,32,32C341.333,156.802,327.469,170.667,309.333,170.667z M373.333,256\r\n\t\t\tc-18.135,0-32-13.864-32-32s13.865-32,32-32c18.136,0,32,13.864,32,32S391.469,256,373.333,256z"
},
"children": [{
"name": "path",
"attribs": {
"d": "M256,64C150.401,64,64,150.401,64,256c0,105.604,86.401,192,192,192c18.136,0,32-13.864,32-32\r\n\t\t\tc0-8.531-3.198-16-8.531-21.333c-5.333-5.334-8.531-12.803-8.531-21.334c0-18.135,13.864-32,32-32h38.396\r\n\t\t\tc58.667,0,106.667-48,106.667-106.666C448,140.802,361.604,64,256,64z M138.667,256c-18.136,0-32-13.864-32-32s13.864-32,32-32\r\n\t\t\tc18.135,0,32,13.864,32,32S156.802,256,138.667,256z M202.667,170.667c-18.136,0-32-13.865-32-32c0-18.136,13.864-32,32-32\r\n\t\t\tc18.135,0,32,13.864,32,32C234.667,156.802,220.802,170.667,202.667,170.667z M309.333,170.667c-18.135,0-32-13.865-32-32\r\n\t\t\tc0-18.136,13.865-32,32-32c18.136,0,32,13.864,32,32C341.333,156.802,327.469,170.667,309.333,170.667z M373.333,256\r\n\t\t\tc-18.135,0-32-13.864-32-32s13.865-32,32-32c18.136,0,32,13.864,32,32S391.469,256,373.333,256z"
},
"children": []
}]
}]
}]
}]
}]
};
exports.androidColorPalette = androidColorPalette; |
#!/bin/sh
###############################################################
# SPDX-License-Identifier: BSD-2-Clause-Patent
# Copyright (c) 2019 Tomer Eliyahu (Intel)
# This code is subject to the terms of the BSD+Patent license.
# See LICENSE file for more details.
###############################################################
run() {
echo "$*"
"$@" || exit $?
}
bridge_ip="$1"; shift
base_mac="$1"; shift
run ip link add br-lan address "${base_mac}:00:00" type bridge
run ip link add wlan0 address "${base_mac}:00:10" type dummy
run ip link add wlan2 address "${base_mac}:00:20" type dummy
run ip link set dev eth0 master br-lan
run ip link set dev wlan0 master br-lan
run ip link set dev wlan2 master br-lan
run ip address flush dev eth0
run ip link set dev wlan0 up
run ip link set dev wlan2 up
run ip address add dev br-lan "$bridge_ip"
run ip link set dev br-lan up
cd ${INSTALL_DIR}
exec /bin/bash "$@"
|
<filename>src/infrastructure/mongoose/repository/index.ts
export { LoginCredentialQueryRepository as LoginCredentialsQueryRepository } from "./query/LoginCredential"
export { LoginCredentialCommandRepository as LoginCredentialsCommandRepository } from "./command/LoginCredential"
export { LoginSessionQueryRepository as LoginSessionsQueryRepository } from "./query/LoginSession"
export { LoginSessionCommandRepository as LoginSessionsCommandRepository } from "./command/LoginSession"
export { UserQueryRepository as UsersQueryRepository } from "./query/User"
export { UserCommandRepository as UsersCommandRepository } from "./command/User"
export { TransactionRepository } from "./Transaction"
|
n = int(input())
integers = list(map(int, input().split()))
for i in range(n):
result = integers[i] // 2 + 1
print(result) |
#!/usr/bin/env bash
DIR1=$1
if [[ -n "$DIR1" ]]; then
kubectl apply -f $DIR1/persistent-volume/k8sPersistentVolume.yaml
kubectl apply -f $DIR1/persistent-volume/k8sPersistentVolumeClaim.yaml
else
echo "argument error"
fi
|
#!/bin/bash
set -eu
PROJECT_DIR=$(dirname "${BASH_SOURCE}")
cd $PROJECT_DIR
export NODE_ENV="development"
export PORT=${PORT:-"8000"}
rm -rf dist
# Bundle server once before starting nodemon
yarn run webpack --display=errors-only
yarn run webpack --display=errors-only --watch &
PID[0]=$!
yarn run nodemon dist/server.js &
PID[1]=$!
trap "kill ${PID[0]} ${PID[1]}; exit 1" INT
wait
|
import $ from 'jquery';
import axios from 'axios';
import * as sections from '@shopify/theme-sections';
let hourFinished = false;
let minuteFinished = false;
const shift = () => {
let image = $('.hero__image');
let toggle = $('.hero__shift-container');
if(hourFinished && minuteFinished) {
image.addClass('animated');
toggle.addClass('animated');
}
}
const check = () => {
let hour = $('.hero__shift-hour');
let minute = $('.hero__shift-minute');
hour.on('transitionend', (evt) => {
hourFinished = true;
shift();
});
minute.on('transitionend', (evt) => {
minuteFinished = true;
shift();
});
}
const toggle = () => {
$('.hero__shift-day').on('click', () => {
$('body').removeClass('time__night');
$('body').addClass('time__day');
});
$('.hero__shift-night').on('click', () => {
$('body').removeClass('time__day');
$('body').addClass('time__night');
});
}
sections.register('hero', {
onLoad: () => {
check();
toggle();
}
}); |
#!/usr/bin/env python
import logging
import ROOT as r
__version__ = "0.0.2"
def dummy_file_to_dataset(file):
"""Find dataset name associated to file
Can parse the file name, query a data base, whatever
:param file: Path of the input file
:type file: string
:return: Dataset name associated to that file
:rtype: string
"""
return "dummy"
class AnalyzerBase(object):
def __init__(self,files):
### Internal
# Files to run over
self._files = files
# File to save output in
self._outfile = r.TFile("outfile.root","RECREATE")
# Current TDirectory in the output file
self._curdir = None
# Dictionary to hold histograms
self._histos = None
# The dataset currently being analyzed
self._dataset = None
# Function that matches file names to datasets
self._file_to_dataset = dummy_file_to_dataset
def _create_histograms(self):
histos = {}
# Initialize your histograms here, e.g.:
# histos["ptj"] = ROOT.TH1D("ptj", "ptj", 200, 0, 2000)
return histos
def _change_dataset(self, new_dataset):
"""Perform all actions associated to a change in dataset
Necessary actions are:
1. Change of TDirectory where histograms are saved
2. Change of histograms to fill
For both actions, it is first checked whether the dataset and histograms
already exist. If so, they are loaded from file. If not, they are created.
:param new_dataset: New dataset to change to
:type new_dataset: str
:return: True on success, False otherwise
:rtype: bool
"""
if new_dataset in self._outfile.GetListOfKeys():
logging.debug("Found existing folder for dataset '{}'.".format(new_dataset))
logging.debug("Loading histograms from file.")
directory = self._outfile.Get(new_dataset)
histos = {}
for key in directory.GetListOfKeys():
histos[key.GetName()] = directory.Get(key.GetName())
else:
logging.debug("No folder found for dataset '{}'.".format(new_dataset))
directory = self._outfile.mkdir(new_dataset)
directory.cd()
histos = self._create_histograms()
# Update everything
self._curdir = directory
self._histos = histos
self._dataset = new_dataset
def _write_histos(self):
"""Write histograms to file"""
logging.debug("Writing histograms to file.")
if self._curdir:
self._curdir.cd()
for tag, histogram in self._histos.items():
logging.debug("Writing historam '{}'.".format(tag))
histogram.Write("",r.TObject.kOverwrite)
def _finish(self):
"""Perform all actions necessary to end a run
Necessary actions are
1. Writing histograms to file
2. Closing the file
"""
self._write_histos()
self._outfile.Close()
def _file_loop(self):
"""Loop over files and analyze each one"""
nfiles = len(self._files)
for i, file in enumerate(self._files):
logging.info("File {} of {}: {}".format(i+1, nfiles, file))
dataset = self._file_to_dataset(file)
if dataset != self._dataset:
self._change_dataset(dataset)
self._dataset = dataset
self._analyze(file)
def run(self):
"""Run the analysis"""
logging.info("Starting analyzer run.")
self._file_loop()
logging.info("Finished analyzing files.")
self._finish()
def _analyze(self,file):
# Do your analysis task
return True
|
#!/bin/bash
set -u ## Error if variables are used without being defined first
#set -x
#Do not use Hungarian notation
#Do not use a prefix for member variables (_, m_, s_, etc.). If you want to distinguish between local and member variables you should use 'this. in C# and 'Me.' in VB.NET.
#Do use camelCasing for member variables
#Do use camelCasing for parameters
#Do use camelCasing for local variables
#Do use PascalCasing for function, property, event, and class names
#Do prefix interfaces names with 'I'
#Do not prefix enums, classes, or delegates with any letter
SetupInputOptions() {
## Setup inputOptionsArray
## This will be accessible to other functions called by this script
inputOptions=()
inputOptions+=('') ## Need at least one entry in array to avoid 'Unbound variable' error
for option in "$@"
do
#echo handle option $option
inputOptions+=("$option")
done
}
Local_HandleInputOptions() {
usage() {
echo "
This script is for backup and restore of data on a path ( --install-base)
For backup:
This script performs backup of a path ( --install-base).
It will creaate separate tar file for any filesystem under that path.
It will also split large directories into smaller tar files.
valid options to be picked up by this script are
--install-base : Install base.
--action : backup or restore
--output-dir : Output directory
[--max-parallel <value> ] : Maximum parallel backups to run. Defaults to number of cpus/2
"
}
## Check original shell option
orig_options_for_u=${-//[^u]/}
set +u
## elements in array
paramCount=${#inputOptions[@]}
for ((i = 0 ; i < ${paramCount} ; i++));
do
case ${inputOptions[${i}]} in
--install-base)
## This is an example of an option that has a value
install_base_supplied='TRUE'
typeset __install_base ## Create the variable
SetVariableFromInputOptionValue __install_base ## Set variables value in using the function
typeset -r __install_base ## Set it as read only ( keeping the value )
;;
--action)
## This is an example of an option that has a value
typeset __action ## Create the variable
SetVariableFromInputOptionValue __action ## Set variables value in using the function
typeset -r __action ## Set it as read only ( keeping the value )
;;
--archive-dir)
## This is an example of an option that has a value
typeset __archive_dir ## Create the variable
SetVariableFromInputOptionValue __archive_dir ## Set variables value in using the function
typeset -r __archive_dir ## Set it as read only ( keeping the value )
;;
--max-parallel)
## This is an example of an option that has a value
typeset __max_parallel ## Create the variable
SetVariableFromInputOptionValue __max_parallel ## Set variables value in using the function
typeset -r __max_parallel ## Set it as read only ( keeping the value )
;;
--test)
inputOptions[${i}]='' # remove input option once it is handled here
typeset -r __test='true'
;;
-h | --help ) usage
exit
;;
--*) LogMessage "ERROR: unhandled --parameter ${inputOptions[${i}]}"
usage
exit
;;
*) continue;; ## allow unhandled options to remain. They will be checked by the standard function HandleInputOptions()
esac
done
if [[ "${orig_options_for_u}" == "u" ]]
then
set -u
fi
# This script will use the shared functions to do the full end to end install of DataStage
typeset -r -g install_base=$( echo "${__install_base:-}" | sed 's!/\s*$!!' ) ## remove any trailing '/'
if [[ ( ! -z ${install_base} ) && -d ${install_base} ]]
then
LogMessage "INFO: --install-base ${install_base} is valid and exists."
else
LogMessage "ERROR: --install-base ${install_base} is not valid."
usage
exit 9
fi
typeset -r -g action=${__action:-}
if [[ ( "${action}" == "backup" ) || ( "${action}" == "restore" ) ]]
then
LogMessage "INFO: --action ${action} is valid."
else
LogMessage "ERROR: --action ${action} is not valid."
usage
exit 9
fi
typeset -r -g archive_dir=${__archive_dir:-}
if [[ ( ! -z ${archive_dir} ) && -d ${archive_dir} ]]
then
LogMessage "INFO: --archive-dir ${archive_dir} is valid and exists."
else
LogMessage "ERROR: --archive-dir ${archive_dir} is not valid. It must be supplied and it must exist."
usage
exit 9
fi
local number_of_processors=$( grep -c ^processor /proc/cpuinfo )
local our_recommended_max_limit=${number_of_processors} ## just a guess..not really tested.
local default_max_parallel=$(( ${number_of_processors}/ 2 )) ## just a guess..not really tested.
local temp_max_parallel=${__max_parallel:-${default_max_parallel}}
if [[ "${temp_max_parallel}" -ge ${our_recommended_max_limit} ]]
then
LogMessage "INFO: --max-parallel was provided as ${temp_max_parallel}, but we're limiting it to ${our_recommended_max_limit} on this environment, as it has ${number_of_processors} processors available ."
temp_max_parallel=${our_recommended_max_limit}
fi
typeset -g -r max_parallel=${temp_max_parallel}
if [[ "${__test:-}" == "true" ]]
then
messageType="TEST"
else
messageType="INFO"
fi
LogMessage "${messageType}: Running with --install-base ${install_base} "
LogMessage "${messageType}: Running with --action ${action} "
LogMessage "${messageType}: Running with --archive-dir is set to ${archive_dir} "
LogMessage "${messageType}: Running with --max-parallel is set to ${max_parallel} "
if [[ "${__test:-}" == "true" ]]; then exit 0 ; fi
}
#### Already shared
#!/bin/bash
SetupLog() {
typeset -g thisScriptDir=$( cd $(dirname $0) ; pwd; cd - >/dev/null)
typeset -g thisScript=$(basename $0)
## If logFile is already set, continue using same value:
if [[ -z "${logFile:-}" ]]
then
vTimeStamp=`date '+%Y%m%d_%H%M%S'`
# Create log files directory
logFileDir=/tmp/Logs_$(whoami)_$(date '+%Y%m%d')
if [[ ! -e ${logFileDir} ]]
then
mkdir ${logFileDir}
fi
chmod 775 ${logFileDir}
logFileBaseName=${thisScript}
logFileName=${logFileBaseName}_${vTimeStamp}.log
typeset -g logFile=${logFileDir}/${logFileName}
>${logFile}
chmod 755 ${logFile}
tmpERR=/${logFileDir}/tmpERR-$$
>${tmpERR}
chmod 755 ${tmpERR}
fi
LogMessage "INFO: Log messages will be written to ${logFile}"
}
LogMessage () {
logDate=`date '+%y/%m/%d %H:%M:%S'`
message="$1"
message_plus="${message} - ( Message from ${FUNCNAME[@]} - Script name : $0 )" ## Could build this up by looping through the FUNCNAME array to get full list of functions.
if [ -z "${logFile}" ]
then
echo "[ ${logDate} ] - ${message_plus} " >&2
else
logDate=`date '+%y/%m/%d %H:%M:%S'`
echo "[ ${logDate} ] - ${message_plus}" | tee -a ${logFile} >&2
fi
}
SetVariableFromInputOptionValue(){
## This fuction will work on the inputOptions options array that is already set up in the calling function
## and uses the value of i controlled in the calling function
if [[ -z "${inputOptions[@]:-}" ]]
then
LogMessage "ERROR: inputOptions array does not exist"
return 9
#else
# LogMessage "INFO : inputOptions array is "${inputOptions[@]}
fi
variable_to_set=$1
optName="${inputOptions[${i}]}"
inputOptions[${i}]='' # remove input option once it is handled here
i=$(( i + 1 )) ##i++ # move to next entry, which is the value
## code to handle empty value being passed
typeset tmp=${inputOptions[${i}]-}
if [[ "${tmp}" =~ ^- ]]
then
##invalid value supplied
LogMessage "INFO: Assuming that ${optName} is being passed as empty. Value can not start with '-'"
tmp=""
i=$(( i - 1 )) ## go back , to this can be processed again as a new -- variable
else
##value supplied is ok
## keep 'tmp' as it is
inputOptions[${i}]='' # remove input option once it is handled here
fi
## Set variable to contents of 'tmp'
eval "${variable_to_set}+=(\"${tmp}\")" ## Treat it like an array. Works for simple strings too. ( just initialise variable before calling this function )
}
################### MOVE TO SHARED ??
CreateDir() {
typeset targetDirectory=$1
LogMessage "INFO: Creating dir ${targetDirectory}"
# Make sure taraget directory exists
if [[ ! -d ${targetDirectory} ]]
then
orig_umask=$(umask)
umask 022
mkdir -p ${targetDirectory}
umask ${orig_umask}
fi
}
TimeRun(){
## Capture time taken to run something. ( Why is this so fiddly?)
local command="${1}"
# Keep both stdout and stderr unmolested.
# http://mywiki.wooledge.org/BashFAQ/032
# The shell's original FD 1 is saved in FD 3, which is inherited by the subshell.
# Inside the innermost block, we send the time command's stdout to FD 3.
# Inside the innermost block, we send the time command's stderr to FD 4.
exec 3>&1 4>&2
local time_output=$( { time ${command} 1>&3 2>&4; } 2>&1 ) # Captures time only.
exec 3>&- 4>&-
real_time=$( echo -e "${time_output}" | grep real | sed 's/real//' | tr -d '\t' | tr -s ' ' )
echo "${real_time}"
}
BackupFolder(){
local folder_path="$1" #The source folder you are backing up.
local archive_path="$2"
local additional_exclude_options=${3:-}
## Create a file to be the temp tar script.
local temp_script_file=$( mktemp --tmpdir=/localwork2/temp ds_backup_tar_commands.XXX.tmp )
GenerateTarScript "${folder_path}" "${archive_path}" "${temp_script_file}"
vRC=$?
LogMessage "INFO: tar script created at ${temp_script_file}"
LogMessage "INFO: running tar commands."
OLDIFS="$IFS"
IFS=$'\n'
ok_to_continue='false'
number_of_commands_to_run=$( cat ${temp_script_file} | sort | uniq | wc -l )
LogMessage "INFO: About to run ${number_of_commands_to_run} tar commands."
return_code_array=()
counter=0
for tar_command in $( cat ${temp_script_file} | sort | uniq )
do
while [[ "${ok_to_continue}" == 'false' ]]
do
counter=$(( counter + 1 ))
#number_of_background_jobs=$(( $( jobs | wc -l ) -1 )) # take one off, as you always need one background proceses just to be in here.
number_of_background_jobs=$( jobs | wc -l )
#number_of_background_jobs=$( pstree -aup ${current_pid} | grep ' tar ' | grep -v grep | wc -l )
if [[ ${number_of_background_jobs} -ge ${max_parallel} ]]
then
LogMessage "DEBUG: Already have ${number_of_background_jobs} background jobs running, so sleep for 10s and check again."
local current_jobs=$( jobs )
LogMessage "DEBUG: Shown current jobs. ${current_jobs} "
## sleep 10 seconds and then test again
sleep 10
#LogMessage "DEBUG: current pid is ${current_pid} "
#pstree -aup ${current_pid}
continue
else
LogMessage "DEBUG: Currrent number of background jobs is number_of_background_jobs is ${number_of_background_jobs}. ok to continue "
ok_to_continue='true'
break
fi
done
# Run the tar command
{
${tar_command}
vRC=$?
return_code_array[${counter}]=${vRC}
}
done
IFS="$OLDIFS"
## Check through return code array
counter=0
for return_code in "${return_code_array[@]}"
do
counter=$(( counter + 1 ))
LogMessage "INFO: return code for command ${counter} : ${return_code}"
done
}
GenerateTarScript() {
## Probably want to have a level above this..which decides whether to split into subfolders or not.
## Or maybe decide by size of folder?
## check if its got filesystems mounted on it... and warn/exit if it does , or split into separate filesystems?
local folder_path="$1" #The source folder you are backing up.
local archive_path="$2"
local temp_script_file=${3}
local split_size_bytes=500000000
#for funcname in "${FUNCNAME[@]}"
#do
# LogMessage "TEMP: funcname ${funcname}"
#done
if [[ "${install_base}" == "${folder_path}" ]]
then
local top_level='true'
typeset -g top_level_path=${folder_path}
LogMessage "INFO: This is the top level call to BackupFolder . Top level path is ${top_level_path}"
else
local top_level='false'
LogMessage "INFO: This is a recursive call to BackupFolder . Top level path is ${top_level_path} "
## Need to remember to add the original part to the name of the archive file.
fi
local additional_exclude_options=" --exclude=${folder_path}/IISBackup " ## allows you to backup /iis/dtxx without IISBackup
local folder_size_bytes
local folder_size_bytes=$( du -s -b "${folder_path}" | cut -f 1 )
## Maybe put some logic in here.. to say if certain size, then do each subfolder as separate backup.
## ie add it to the exclude options and the list of additional things to process, like you do with fs.
local objects_to_backup_separately=() ## use this array to store names of objects that we exclude from this backup, and then backup separately.
if [[ ${folder_size_bytes} -ge ${split_size_bytes} ]]
then
LogMessage "INFO: Folder size is greater than split size."
## Checking for subfolders or files
local folder_name
local folder_size
for object_size in $( du -bs ${folder_path}/* | sed 's/\t/~/' )
do
folder_name=$( echo ${object_size} | cut -d '~' -f 2 )
folder_size=$( echo ${object_size} | cut -d '~' -f 1 )
#LogMessage "INFO: ${folder_name} ${folder_size} "
## If any individual object in this dir is greater than the split size, then back that up separately.
if [[ ${folder_size} -ge ${split_size_bytes} ]]
then
LogMessage "INFO: ${folder_name} is larger than the split size, so will be backed up separately."
objects_to_backup_separately+=(${folder_name})
fi
done
else
LogMessage "INFO: Folder size ( ${folder_size_bytes} ) is less than split size ( ${split_size_bytes} )."
fi
LogMessage "INFO: Backup up ${folder_path} to ${archive_path}"
LogMessage "INFO: ${folder_path} has size ${folder_size_bytes} (bytes)."
LogMessage "TEMP: running df -hP | grep \" ${folder_path}/\" | awk '{print \$NF}' "
## Checking if any filesytems are mounted under this location
## Any nested filesystems should be excluded from this backup, and then backed up explicitly
local nested_filesystems=$( df -hP | grep " ${folder_path}/" | awk '{print $NF}' )
LogMessage "INFO: Filesytems found under this location. ${nested_filesystems}"
local exclude_options=" --exclude=./.snapshot "
exclude_options+=" --exclude=./IISBackup "
## build up exclude options for filesystems and objects we are backing up separately.
LogMessage "DEBUG: ${objects_to_backup_separately[@]:-Nothing in objects} when backing up ${folder_path} "
for filesystem in ${nested_filesystems[@]} ${objects_to_backup_separately[@]:-}
do
## Need to exclude the path of the filesystem relative to where we're backing up.
## If backing up from /iis/dt7g and you want to exclude /iis/dt7g/IISBackup , then --exclude=./IISBackup
## So for the filestem paths, we want to replace the /iis/dt7g/ with './'
local path_to_exclude=$( echo ${filesystem} | sed "s!${folder_path}/!./!" )
LogMessage "TEMP: path_to_exclude is ${path_to_exclude}. filesystem ${filesystem} folder path ${folder_path} "
exclude_options+=" --exclude=${path_to_exclude}"
done
local folder_name=$( basename ${folder_path} )
local relative_folder_path=$( echo ${folder_path} | sed "s!${top_level_path}!.!" )
#local relative_folder_path_as_line_separated=$( echo ${relative_folder_path} | sed 's!/!_!g' | sed 's/^.//' | sed 's/^_//' ) ## e.g change /my/path to to my_path
LogMessage "TEMP: relative_folder_path is ${relative_folder_path}"
local relative_folder_path_as_line_separated=$( echo ${relative_folder_path} | sed 's!^./!!' | sed 's!/!_!g' ) ## e.g change /my/path to to my_path
## Name for top level, should just be the last part of top level
base_top=$( basename ${top_level_path} )
## If it's not the top level call, then we need to add the top level path to here
if [[ "${top_level}" == 'true' ]]
then
LogMessage "DEBUG: This is top level, so no need to add base name to archive file path. ${install_base} matches ${folder_path} "
LogMessage "DEBUG: top_level is ${top_level} "
local archive_file_path=${archive_path}/${folder_name}.tar.z
else
LogMessage "DEBUG: This is recursive call, so base name added to archive file path. ${install_base} does not match ${folder_path} "
LogMessage "DEBUG: top_level is ${top_level} "
local archive_file_path=${archive_path}/${base_top}_${relative_folder_path_as_line_separated}.tar.z
fi
CreateDir "${archive_path}"
if [[ -e ${archive_file_path} ]]
then
local backup=${archive_file_path}_$(date +'%Y%m%d_%H%M%S')_$$
LogMessage "INFO: Moving existing archive at ${archive_file_path} to ${backup}"
mv ${archive_file_path} ${backup}
fi
LogMessage "INFO: Starting tar command"
LogMessage "DEBUG: Running tar --directory ${top_level_path} --gzip ${exclude_options} -cf ${archive_file_path} ${relative_folder_path}" ##--directory = work from this directory; . backup from the directory you're working in. Means it gives relative path.
#tar --directory ${top_level_path} --gzip ${exclude_options} -cf ${archive_file_path} ${relative_folder_path} ##--directory = work from this directory; . backup from the directory you're working in. Means it gives relative path.
echo "tar --directory ${top_level_path} --gzip ${exclude_options} -cf ${archive_file_path} ${relative_folder_path} \# " >> ${temp_script_file} ##--directory = work from this directory; . backup from the directory you're working in. Means it gives relative path.
local counter=0
## This is now backing up the things we excluded from original backup. ie. separate filesystems or objects that are greater than the split size.
local filesystem
## remove duplicates across the two arrays
OLDIFS="$IFS"
IFS=$'\n'
combined_object_list=(`for R in "${nested_filesystems[@]:-}" "${objects_to_backup_separately[@]:-}" ; do echo "$R" ; done | sort -du`)
## TEMP###
## temp output list to file
tempfile=$( mktemp --tmpdir=/localwork2/temp ds_backup.XXX.tmp )
for line in "${combined_object_list[@]:-}"
do
echo "${line}" >> ${tempfile}
done
###
IFS="$OLDIFS"
#for filesystem in ${nested_filesystems[@]} ${objects_to_backup_separately[@]:-}
LogMessage " "
LogMessage " "
LogMessage " "
LogMessage " "
LogMessage " "
LogMessage "INFO: This call to BackupFolder should create ${#combined_object_list[@]:-} background jobs."
for filesystem in ${combined_object_list[@]:-}
do
counter=$(( counter + 1 ))
if [[ "${filesystem}" =~ IISBackup ]]
then
LogMessage "INFO: Skipping ${filesystem} because it looks like IISBackup . "
continue
fi
LogMessage "INFO: Doing tar of nested filesystem ${filesystem}"
ok_to_continue='false'
#number_of_tar_commands_running=$( pstree -aup | grep ' tar ' | wc -l )
# using number of tar commands instead of number of jobs
#number_of_background_jobs=${number_of_tar_commands_running}
## nb
current_pid=$$
LogMessage "DEBUG: current pid is ${current_pid} "
while [[ "${ok_to_continue}" == 'false' ]]
do
#number_of_background_jobs=$(( $( jobs | wc -l ) -1 )) # take one off, as you always need one background proceses just to be in here.
#number_of_background_jobs=$( jobs | wc -l )
number_of_background_jobs=$( pstree -aup ${current_pid} | grep ' tar ' | grep -v grep | wc -l )
if [[ ${number_of_background_jobs} -ge ${max_parallel} ]]
then
LogMessage "DEBUG: Already have ${number_of_background_jobs} background jobs running, so sleep for 10s and check again."
local current_jobs=$( jobs )
LogMessage "DEBUG: Shown current jobs. ${current_jobs} "
## sleep 10 seconds and then test again
sleep 10
#LogMessage "DEBUG: current pid is ${current_pid} "
pstree -aup ${current_pid}
continue
else
LogMessage "DEBUG: Currrent number of background jobs is number_of_background_jobs is ${number_of_background_jobs}. ok to continue "
ok_to_continue='true'
break
fi
done
#RunParallel() {
# local time_taken=$( TimeRun "BackupFolder ${filesystem} ${archive_dir}" )
# LogMessage "INFO: BackupFolder ${filesystem} ${archive_dir} took ${time_taken}"
#}
#RunParallel &
{
#local time_taken=$( TimeRun "BackupFolder ${filesystem} ${archive_dir}" )
local time_taken=$( TimeRun "GenerateTarScript ${filesystem} ${archive_dir} ${temp_script_file}" )
LogMessage "INFO: BackupFolder ${filesystem} ${archive_dir} took ${time_taken}"
} & ## run this in background to allow parallel running.
number_of_background_jobs=$( jobs | wc -l )
LogMessage "INFO: Number of backgound jobs running is ${number_of_background_jobs} ."
LogMessage "INFO: Counter is ${counter} ."
#if [[ ${counter} -ge 3 ]]
#then
# break
#fi
done
LogMessage "INFO: Waiting for parallel backups to complete"
wait
}
########################
#### MAIN PROGRAM #########
thisScriptDir=$( cd $(dirname $0) ; pwd; cd - >/dev/null)
typeset -i error_count=0
## Standard Functions are in the SubScripts directory
. ${thisScriptDir}/StandardFunctions.env
SetupLog;
echo logfile is ${logFile}
LogMessage "INFO: Detailed Log file is ${logFile}"
SetupInputOptions "$@"
Local_HandleInputOptions ${inputOptions} ;vRC=$?; if [ ${vRC} -ne 0 ] ; then LogMessage "Installation abandoned with rc=${vRC}"; funcTidyUp; exit 3; fi
cat /dev/null > /localwork2/stetempscript.sh
case ${action} in
backup)
time_taken=$( TimeRun "BackupFolder ${install_base} ${archive_dir}"; vRC=$? )
LogMessage "INFO: Total backup time taken : ${time_taken}"
LogMessage "INFO: BackupFileSystems finished with return code ${vRC} ."
;;
restore)
LogMessage "INFO: Not coded yet."
;;
*)
LogMessage "ERROR: Invalid action ${action}"
;;
esac
LogMessage "INFO: Detailed Log file is ${logFile}"
LogMessage "INFO: End of Script."
|
window.cookieStorage = {
get: (cname) => {
var name = cname + "=";
var decodedCookie = decodeURIComponent(document.cookie);
var ca = decodedCookie.split(';');
for (var i = 0; i < ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
}
if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
},
set: (cookie) => {
document.cookie = cookie;
},
delete: (cookieName) => {
document.cookie = cookieName + '=; expires=Thu, 01 Jan 1970 00:00:01 GMT;';
}
};
window.interop = {
getElementByName: (name) => {
var elements = document.getElementsByName(name);
if (elements.length) {
return elements[0].value;
} else {
return "";
}
},
SetFocus: (id) => { document.getElementById(id).focus(); },
submitForm: (path, fields) => {
const form = document.createElement('form');
form.method = 'post';
form.action = path;
for (const key in fields) {
if (fields.hasOwnProperty(key)) {
const hiddenField = document.createElement('input');
hiddenField.type = 'hidden';
hiddenField.name = key;
hiddenField.value = fields[key];
form.appendChild(hiddenField);
}
}
document.body.appendChild(form);
form.submit();
}
} |
package com.netcracker.ncstore.util.enumeration;
public enum EProductSortRule {
DEFAULT,
POPULAR,
RATING,
PRICE,
DATE,
DISCOUNT
}
|
<reponame>anticore/automaton
import { Reducer } from 'redux';
import { produce } from 'immer';
// == state ========================================================================================
export interface State {
mode: 'dope' | 'channel' | 'curve';
}
export const initialState: Readonly<State> = {
mode: 'dope'
};
// == action =======================================================================================
export type Action = {
type: 'Workspace/ChangeMode';
mode: 'dope' | 'channel' | 'curve';
};
// == reducer ======================================================================================
export const reducer: Reducer<State, Action> = ( state = initialState, action ) => {
return produce( state, ( newState: State ) => {
if ( action.type === 'Workspace/ChangeMode' ) {
newState.mode = action.mode;
}
} );
};
|
<filename>Documentation/_verification_helpers_8cpp.js<gh_stars>0
var _verification_helpers_8cpp =
[
[ "CheckValidSize", "_verification_helpers_8cpp.xhtml#a97dc68ae76f04b81c833184724836c9a", null ],
[ "NonNegative", "_verification_helpers_8cpp.xhtml#ab075020544612cd151ebdd08db537396", null ],
[ "VerifyInt32", "_verification_helpers_8cpp.xhtml#a2e0aa273755368a1bf5fc65102df4a92", null ]
]; |
<reponame>JonathanLoscalzo/RepairTracking<filename>src/RepairTracking.Web/ClientApp/src/modules/task/update/index.js
import { replace } from 'react-router-redux'
import { toast } from 'react-toastify';
import { change } from 'redux-form';
import * as _ from 'lodash'
import api from '../../common/api';
export const LOAD_TASK = "TASKS/UPDATE/LOAD_CREATE_TASK"
export const LOADED_TASK = "TASKS/UPDATE/LOADED_CREATE_TASK"
export const REQUEST_UPDATE_TASK = "TASKS/UPDATE/REQUEST_TASKS"
export const RESPONSE_UPDATE_TASKS = "TASKS/UPDATE/RESPONSE_TASKS"
export const ERROR_UPDATE_TASKS = "TASKS/UPDATE/ERROR_TASKS"
const ADD_AUTOCOMPLETE_TASK = "TASKS/UPDATE/ADD_TASK"
const REMOVE_AUTOCOMPLETE_TASK = "TASKS/UPDATE/REMOVE_TASK"
let initialState = {
task: null,
loading: true,
error: null
}
export default function reducer(state = initialState, action = {}) {
let diff;
switch (action.type) {
case LOAD_TASK:
return { ...state, loading: true }
case LOADED_TASK:
return { ...state, loading: false, task: action.payload }
case ADD_AUTOCOMPLETE_TASK:
let tasks = state.tasks
return { ...state, tasks: [...tasks, action.payload] }
case REMOVE_AUTOCOMPLETE_TASK:
diff = _.differenceWith(state.tasks, [action.payload], (a, b) => a.id === b.id);
return { ...state, tasks: diff }
case REQUEST_UPDATE_TASK:
return { ...state, loading: true }
case RESPONSE_UPDATE_TASKS:
return { ...state, loading: false, task: action.payload }
case ERROR_UPDATE_TASKS:
return { ...state, loading: false, error: action.error }
default:
return state;
}
}
export const addTask = (fields, id) => (dispatch, state) => {
if (id != -1) {
const item = state().task.update.tasks.find(x => x.id == id);
fields.push(item);
if (item) {
dispatch(change('task/update', 'task_selectables', '-1'))
//dispatch({ type: REMOVE_AUTOCOMPLETE_TASK, payload: item })
}
}
}
export const update = (task) => (dispatch) => {
dispatch({ type: REQUEST_UPDATE_TASK })
api.put(`task/${task.id}`, task)
.then((response) => {
dispatch({ type: RESPONSE_UPDATE_TASKS, payload: response.data })
dispatch(replace('/task'));
toast.success("Pedido Modificado")
})
.catch((error) => {
dispatch({ type: ERROR_UPDATE_TASKS, error: error })
toast.error("Error al modificar pedido")
})
}
export const load = (id) => (dispatch, state) => {
dispatch({ type: LOAD_TASK })
let order = state().task.list.tasks.find(x => x.id === id);
if (order) {
dispatch({ type: LOADED_TASK, payload: order })
} else {
dispatch(replace('/task'));
toast.warn("No se puede editar el pedido seleccionado")
}
}
export const goBack = () => dispatch => {
dispatch(replace('/task'));
toast.info("Modificación Cancelada")
} |
SELECT e.name, d.department
FROM employees e
JOIN departments d ON d.id = e.department_id; |
<reponame>shainyoreng/go-figure-web<gh_stars>10-100
import { Point2D } from '@app/structures/point';
export class Vector {
start: Point2D;
end: Point2D;
constructor(start?: Point2D, end?: Point2D) {
this.start = start || new Point2D;
this.end = end || new Point2D;
}
length(): number {
return Math.sqrt(Math.pow(this.end.x - this.start.x, 2) + Math.pow(this.end.y - this.start.y, 2));
}
direction(): number {
let angle = Math.atan((this.end.y - this.start.y)/(this.end.x - this.start.x));
if (this.end.x < this.start.x)
angle += Math.PI;
return angle;
}
getReverseVector(): Vector {
let v = new Vector;
v.start = this.end;
v.end = this.start;
return v;
}
} |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
copy_dir()
{
local source="$1"
local destination="$2"
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" \"${source}*\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" "${source}"/* "${destination}"
}
SELECT_SLICE_RETVAL=""
select_slice() {
local paths=("$@")
# Locate the correct slice of the .xcframework for the current architectures
local target_path=""
# Split archs on space so we can find a slice that has all the needed archs
local target_archs=$(echo $ARCHS | tr " " "\n")
local target_variant=""
if [[ "$PLATFORM_NAME" == *"simulator" ]]; then
target_variant="simulator"
fi
if [[ ! -z ${EFFECTIVE_PLATFORM_NAME+x} && "$EFFECTIVE_PLATFORM_NAME" == *"maccatalyst" ]]; then
target_variant="maccatalyst"
fi
for i in ${!paths[@]}; do
local matched_all_archs="1"
for target_arch in $target_archs
do
if ! [[ "${paths[$i]}" == *"$target_variant"* ]]; then
matched_all_archs="0"
break
fi
# Verifies that the path contains the variant string (simulator or maccatalyst) if the variant is set.
if [[ -z "$target_variant" && ("${paths[$i]}" == *"simulator"* || "${paths[$i]}" == *"maccatalyst"*) ]]; then
matched_all_archs="0"
break
fi
# This regex matches all possible variants of the arch in the folder name:
# Let's say the folder name is: ios-armv7_armv7s_arm64_arm64e/CoconutLib.framework
# We match the following: -armv7_, _armv7s_, _arm64_ and _arm64e/.
# If we have a specific variant: ios-i386_x86_64-simulator/CoconutLib.framework
# We match the following: -i386_ and _x86_64-
# When the .xcframework wraps a static library, the folder name does not include
# any .framework. In that case, the folder name can be: ios-arm64_armv7
# We also match _armv7$ to handle that case.
local target_arch_regex="[_\-]${target_arch}([\/_\-]|$)"
if ! [[ "${paths[$i]}" =~ $target_arch_regex ]]; then
matched_all_archs="0"
break
fi
done
if [[ "$matched_all_archs" == "1" ]]; then
# Found a matching slice
echo "Selected xcframework slice ${paths[$i]}"
SELECT_SLICE_RETVAL=${paths[$i]}
break
fi
done
}
install_xcframework() {
local basepath="$1"
local name="$2"
local package_type="$3"
local paths=("${@:4}")
# Locate the correct slice of the .xcframework for the current architectures
select_slice "${paths[@]}"
local target_path="$SELECT_SLICE_RETVAL"
if [[ -z "$target_path" ]]; then
echo "warning: [CP] Unable to find matching .xcframework slice in '${paths[@]}' for the current build architectures ($ARCHS)."
return
fi
local source="$basepath/$target_path"
local destination="${PODS_XCFRAMEWORKS_BUILD_DIR}/${name}"
if [ ! -d "$destination" ]; then
mkdir -p "$destination"
fi
copy_dir "$source/" "$destination"
echo "Copied $source to $destination"
}
install_xcframework "${PODS_ROOT}/../../node_modules/@unimodules/react-native-adapter/ios/UMReactNativeAdapter.xcframework" "UMReactNativeAdapter" "framework" "ios-arm64" "ios-arm64_x86_64-simulator"
|
'use strict'
const test = require('tap').test
const build = require('..')
test('can stringify recursive directory tree (issue #181)', (t) => {
t.plan(1)
const schema = {
definitions: {
directory: {
type: 'object',
properties: {
name: { type: 'string' },
subDirectories: {
type: 'array',
items: { $ref: '#/definitions/directory' },
default: []
}
}
}
},
type: 'array',
items: { $ref: '#/definitions/directory' }
}
const stringify = build(schema)
t.equal(stringify([
{ name: 'directory 1', subDirectories: [] },
{
name: 'directory 2',
subDirectories: [
{ name: 'directory 2.1', subDirectories: [] },
{ name: 'directory 2.2', subDirectories: [] }
]
}
]), '[{"name":"directory 1","subDirectories":[]},{"name":"directory 2","subDirectories":[{"name":"directory 2.1","subDirectories":[]},{"name":"directory 2.2","subDirectories":[]}]}]')
})
test('can stringify when recursion in external schema', t => {
t.plan(1)
const referenceSchema = {
$id: 'person',
type: 'object',
properties: {
name: { type: 'string' },
children: {
type: 'array',
items: { $ref: '#' }
}
}
}
const schema = {
$id: 'mainSchema',
type: 'object',
properties: {
people: {
$ref: 'person'
}
}
}
const stringify = build(schema, {
schema: {
[referenceSchema.$id]: referenceSchema
}
})
const value = stringify({ people: { name: 'Elizabeth', children: [{ name: 'Charles' }] } })
t.equal(value, '{"people":{"name":"Elizabeth","children":[{"name":"Charles"}]}}')
})
test('use proper serialize function', t => {
t.plan(1)
const personSchema = {
$id: 'person',
type: 'object',
properties: {
name: { type: 'string' },
children: {
type: 'array',
items: { $ref: '#' }
}
}
}
const directorySchema = {
$id: 'directory',
type: 'object',
properties: {
name: { type: 'string' },
subDirectories: {
type: 'array',
items: { $ref: '#' },
default: []
}
}
}
const schema = {
$id: 'mainSchema',
type: 'object',
properties: {
people: { $ref: 'person' },
directory: { $ref: 'directory' }
}
}
const stringify = build(schema, {
schema: {
[personSchema.$id]: personSchema,
[directorySchema.$id]: directorySchema
}
})
const value = stringify({
people: {
name: 'Elizabeth',
children: [{
name: 'Charles',
children: [{ name: 'William', children: [{ name: 'George' }, { name: 'Charlotte' }] }, { name: 'Harry' }]
}]
},
directory: {
name: 'directory 1',
subDirectories: [
{ name: 'directory 1.1', subDirectories: [] },
{
name: 'directory 1.2',
subDirectories: [{ name: 'directory 1.2.1' }, { name: 'directory 1.2.2' }]
}
]
}
})
t.equal(value, '{"people":{"name":"Elizabeth","children":[{"name":"Charles","children":[{"name":"William","children":[{"name":"George"},{"name":"Charlotte"}]},{"name":"Harry"}]}]},"directory":{"name":"directory 1","subDirectories":[{"name":"directory 1.1","subDirectories":[]},{"name":"directory 1.2","subDirectories":[{"name":"directory 1.2.1","subDirectories":[]},{"name":"directory 1.2.2","subDirectories":[]}]}]}}')
})
test('can stringify recursive references in object types (issue #365)', t => {
t.plan(1)
const schema = {
type: 'object',
definitions: {
parentCategory: {
type: 'object',
properties: {
parent: {
$ref: '#/definitions/parentCategory'
}
}
}
},
properties: {
category: {
type: 'object',
properties: {
parent: {
$ref: '#/definitions/parentCategory'
}
}
}
}
}
const stringify = build(schema)
const data = {
category: {
parent: {
parent: {
parent: {
parent: {}
}
}
}
}
}
const value = stringify(data)
t.equal(value, '{"category":{"parent":{"parent":{"parent":{"parent":{}}}}}}')
})
|
<gh_stars>0
// #include <common.h>
// #include <game.h>
// #include <g3dhax.h>
// #include <daYoshi_c.h>
// #include "yoshiFly.h"
// extern int getNybbleValue(u32 settings, int fromNybble, int toNybble);
// dYoshiWingsRenderer_c *dYoshiWingsRenderer_c::build() {
// return new dYoshiWingsRenderer_c;
// }
// dYoshiWingsRenderer_c::dYoshiWingsRenderer_c() { } dYoshiWingsRenderer_c::~dYoshiWingsRenderer_c() {
// }
// void dYoshiWingsRenderer_c::setup(dPlayerModelHandler_c *handler) {
// setup(handler, 0);
// }
// void dYoshiWingsRenderer_c::setup(dPlayerModelHandler_c *handler, int sceneID) {
// OSReport("Starting setup\n");
// yoshi = (dPlayerModel_c*)handler->mdlClass;
// allocator.link(-1, GameHeaps[0], 0, 0x20);
// nw4r::g3d::ResFile rf(getResource("Y_TexGreen", "g3d/wing.brres"));
// nw4r::g3d::ResMdl rm = rf.GetResMdl("wing");
// wings.setup(rm, &allocator, 0, 1, 0);
// SetupTextures_Enemy(&wings, sceneID);
// allocator.unlink();
// yoshiModel = &yoshi->models[0].body;
// nw4r::g3d::ResMdl *yoshiResMdl =
// (nw4r::g3d::ResMdl*)(((u32)yoshiModel->scnObj) + 0xE8);
// nw4r::g3d::ResNode spin = yoshiResMdl->GetResNode("spin");
// spinNodeID = spin.GetID();
// OSReport("Ending setup\n");
// }
// void dYoshiWingsRenderer_c::draw() {
// OSReport("Starting draw\n");
// daYoshi_c *CuteYoshi = dAcPy_c::findByID(yoshi->player_id_1)->getYoshi();
// if (!(getNybbleValue(CuteYoshi->settings, 12, 12) == 4 && getNybbleValue(CuteYoshi->settings, 5, 5) == 1))
// return;
// Mtx rootMtx;
// yoshiModel->getMatrixForNode(spinNodeID, &rootMtx);
// wings.setDrawMatrix(&rootMtx);
// wings.setScale(1.0f, 1.0f, 1.0f);
// wings.calcWorld(false);
// wings.scheduleForDrawing();
// OSReport("Ending draw\n");
// }
#include <common.h>
#include <game.h>
#include <profile.h>
#include <daYoshi_c.h>
const char *YoshiWingsFileList[] = {"Y_TexGreen", 0};
class daEnYoshiWings_c : public dEn_c {
public:
int onCreate();
int onExecute();
int onDelete();
int onDraw();
mHeapAllocator_c allocator;
nw4r::g3d::ResFile resFile;
m3d::mdl_c bodyModel;
m3d::anmChr_c animationChr;
daYoshi_c *CuteYoshi;
static dActor_c *build();
void updateModelMatrices();
void playerCollision(ActivePhysics *apThis, ActivePhysics *apOther);
void yoshiCollision(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat7_GroundPound(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat7_GroundPoundYoshi(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCatD_Drill(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCatA_PenguinMario(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat1_Fireball_E_Explosion(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat2_IceBall_15_YoshiIce(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat9_RollingObject(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat13_Hammer(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat14_YoshiFire(ActivePhysics *apThis, ActivePhysics *apOther);
bool collisionCat3_StarPower(ActivePhysics *apThis, ActivePhysics *apOther);
};
const SpriteData YoshiWingsSpriteData = { ProfileId::EN_YOSHIWINGS, 0, 0, 0, 0, 0x100, 0x100, 0, 0, 0, 0, 0 };
Profile YoshiWingsProfile(&daEnYoshiWings_c::build, SpriteId::EN_YOSHIWINGS, &YoshiWingsSpriteData, ProfileId::EN_YOSHIWINGS, ProfileId::EN_YOSHIWINGS, "daEnYoshiWings_c", YoshiWingsFileList);
u8 hijackMusicWithSongName(const char *songName, int themeID, bool hasFast, int channelCount, int trackCount, int *wantRealStreamID);
void daEnYoshiWings_c::playerCollision(ActivePhysics *apThis, ActivePhysics *apOther) {
}
void daEnYoshiWings_c::yoshiCollision(ActivePhysics *apThis, ActivePhysics *apOther) {
}
bool daEnYoshiWings_c::collisionCat7_GroundPound(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat7_GroundPoundYoshi(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCatD_Drill(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCatA_PenguinMario(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat1_Fireball_E_Explosion(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat2_IceBall_15_YoshiIce(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat9_RollingObject(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat13_Hammer(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat14_YoshiFire(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
bool daEnYoshiWings_c::collisionCat3_StarPower(ActivePhysics *apThis, ActivePhysics *apOther) {
return false;
}
dActor_c *daEnYoshiWings_c::build() {
void *buffer = AllocFromGameHeap1(sizeof(daEnYoshiWings_c));
daEnYoshiWings_c *c = new(buffer) daEnYoshiWings_c;
return c;
}
extern int getNybbleValue(u32 settings, int fromNybble, int toNybble);
int daEnYoshiWings_c::onCreate() {
this->deleteForever = true;
// Model creation
allocator.link(-1, GameHeaps[0], 0, 0x20);
this->resFile.data = getResource("Y_TexGreen", "g3d/wing.brres");
nw4r::g3d::ResMdl mdl = this->resFile.GetResMdl("wing");
bodyModel.setup(mdl, &allocator, 0x224, 1, 0);
SetupTextures_Enemy(&bodyModel, 0);
bool ret;
nw4r::g3d::ResAnmChr anmChr = this->resFile.GetResAnmChr("wing_pata");
ret = this->animationChr.setup(mdl, anmChr, &this->allocator, 0);
this->animationChr.bind(&this->bodyModel, anmChr, 1);
this->bodyModel.bindAnim(&this->animationChr, 0);
this->animationChr.setUpdateRate(1.0);
allocator.unlink();
// ActivePhysics::Info HitMeBaby;
// HitMeBaby.xDistToCenter = 0.0;
// HitMeBaby.yDistToCenter = 0.0;
// HitMeBaby.xDistToEdge = 7.5;
// HitMeBaby.yDistToEdge = 7.5;
// HitMeBaby.category1 = 0x3;
// HitMeBaby.category2 = 0x0;
// HitMeBaby.bitfield1 = 0x6F;
// HitMeBaby.bitfield2 = 0xffbafffe;
// HitMeBaby.unkShort1C = 0;
// HitMeBaby.callback = &dEn_c::collisionCallback;
// this->aPhysics.initWithStruct(this, &HitMeBaby);
// this->aPhysics.addToList();
// Stuff I do understand
CuteYoshi = (daYoshi_c *)Actor_SearchByID(this->settings);
this->scale = (Vec){1.0, 1.0, 1.0};
this->rot.x = 0;
this->rot.y = 0;
this->rot.z = 0;
this->pos.z = 4000;
this->onExecute();
return true;
}
int daEnYoshiWings_c::onDelete() {
return true;
}
int daEnYoshiWings_c::onDraw() {
bodyModel.scheduleForDrawing();
return true;
}
// extern nw4r::g3d::ResMdl *yoshiResMDL;
void daEnYoshiWings_c::updateModelMatrices() {
// dPlayerModel_c *yoshi = (dPlayerModel_c*)(CuteYoshi->modelhandler->mdlClass);
// m3d::mdl_c *yoshiModel = &yoshi->models[0].body;
// // nw4r::g3d::ResMdl *yoshiResMdl =
// // (nw4r::g3d::ResMdl*)(((u32)yoshiModel->scnObj) + 0xE8);
// nw4r::g3d::ResNode spin = yoshiResMDL->GetResNode("skl_root");
// u32 spinNodeID = spin.GetID();
// Mtx rootMtx;
// yoshiModel->getMatrixForNode(spinNodeID, &rootMtx);
matrix.translation(CuteYoshi->pos.x + ((CuteYoshi->direction) ? -4 : 4), CuteYoshi->pos.y + 11, CuteYoshi->pos.z + 5);
matrix.applyRotationYXZ(&CuteYoshi->rot.x, &CuteYoshi->rot.y, &CuteYoshi->rot.z);
bodyModel.setDrawMatrix(matrix);
bodyModel.setScale(&scale);
bodyModel.calcWorld(false);
}
int daEnYoshiWings_c::onExecute() {
bodyModel._vf1C();
updateModelMatrices();
if(CuteYoshi->input.getHeldTwo()) {
this->animationChr.setUpdateRate(1.0);
}
else {
this->animationChr.setUpdateRate(0.0);
}
if(this->animationChr.isAnimationDone()) {
this->animationChr.setCurrentFrame(0.0);
}
return true;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.