text stringlengths 1 1.05M |
|---|
package io.sniffy.influxdb.lineprotocol;
import org.junit.Test;
import static org.junit.Assert.*;
public class FieldIntegerValueTest {
@Test
public void testAsBoolean() {
assertTrue(new FieldIntegerValue(1).asBoolean());
assertFalse(new FieldIntegerValue(0).asBoolean());
}
@Test
public void testAsLong() {
assertEquals(1L, new FieldIntegerValue(1).asLong());
assertEquals(0L, new FieldIntegerValue(0).asLong());
}
@Test
public void testAsDouble() {
assertEquals(1., new FieldIntegerValue(1).asDouble(), 0.1);
assertEquals(0., new FieldIntegerValue(0).asDouble(), 0.1);
}
@Test
public void testAsString() {
assertEquals("1", new FieldIntegerValue(1).asString());
assertEquals("0", new FieldIntegerValue(0).asString());
}
}
|
package com.copperware;
import java.util.Date;
public class Race {
Driver winner;
Map map;
Kart karts[];
Date start;
Date end;
public void startRace() {
}
}
|
import { Module } from '@nestjs/common';
import { JwtModule } from '@nestjs/jwt';
import { JwtConfiguartionService } from './jwt-configuartion.service';
import { JwtGuard } from './guard/jwt.guard';
import { JwtStrategy } from './strategy/jwt.strategy';
import { RoleGuard } from './guard/role.guard';
@Module({
imports:[JwtModule.registerAsync({useClass:JwtConfiguartionService})],
providers:[JwtModule,JwtStrategy,RoleGuard]
})
export class AuthModule {}
|
<reponame>dreamncn/time
$(document).ready(function () {
if (themeConfig.fancybox.enable) {
Theme.fancybox.register();
}
Theme.backToTop.register();
});
|
<filename>snail/src/main/java/com/acgist/snail/net/torrent/peer/PeerUploaderGroup.java
package com.acgist.snail.net.torrent.peer;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.acgist.snail.config.PeerConfig;
import com.acgist.snail.config.SystemConfig;
import com.acgist.snail.context.SystemThreadContext;
import com.acgist.snail.pojo.session.PeerSession;
import com.acgist.snail.pojo.session.TorrentSession;
/**
* <p>PeerUploader组</p>
* <dl>
* <dt>管理PeerUploader</dt>
* <dd>清除劣质Peer</dd>
* <dd>管理连接数量</dd>
* </dl>
*
* @author acgist
*/
public final class PeerUploaderGroup {
private static final Logger LOGGER = LoggerFactory.getLogger(PeerUploaderGroup.class);
/**
* <p>PeerUploader队列</p>
*/
private final BlockingQueue<PeerUploader> peerUploaders = new LinkedBlockingQueue<>();
/**
* <p>BT任务信息</p>
*/
private final TorrentSession torrentSession;
/**
* @param torrentSession BT任务信息
*/
private PeerUploaderGroup(TorrentSession torrentSession) {
this.torrentSession = torrentSession;
}
/**
* <p>创建PeerUploader组</p>
*
* @param torrentSession BT任务信息
*
* @return PeerUploader组
*/
public static final PeerUploaderGroup newInstance(TorrentSession torrentSession) {
return new PeerUploaderGroup(torrentSession);
}
/**
* <p>开始下载</p>
* <p>如果Peer接入支持下载则发送下载请求</p>
*/
public void download() {
synchronized (this.peerUploaders) {
this.peerUploaders.forEach(PeerUploader::download);
}
}
/**
* <p>创建Peer接入连接</p>
*
* @param peerSession Peer信息
* @param peerSubMessageHandler Peer消息代理
*
* @return Peer接入
*/
public PeerUploader newPeerUploader(PeerSession peerSession, PeerSubMessageHandler peerSubMessageHandler) {
synchronized (this.peerUploaders) {
LOGGER.debug("Peer接入:{}-{}", peerSession.host(), peerSession.port());
if(!this.connectable(peerSession)) {
LOGGER.debug("Peer接入失败:{}-{}", peerSession.host(), peerSession.port());
return null;
}
final PeerUploader peerUploader = PeerUploader.newInstance(peerSession, this.torrentSession, peerSubMessageHandler);
peerSession.status(PeerConfig.STATUS_UPLOAD);
this.offer(peerUploader);
return peerUploader;
}
}
/**
* <dl>
* <dt>判断是否允许连接</dt>
* <dd>Peer当前正在下载</dd>
* <dd>当前连接小于最大连接数量</dd>
* </dl>
*
* @param peerSession Peer信息
*
* @return true-允许;false-不允许;
*
* TODO:通常大多数数据都是从接入Peer下载获得,是否考虑放大接入限制
*/
private boolean connectable(PeerSession peerSession) {
if(peerSession != null && peerSession.downloading()) {
return true;
} else {
return this.peerUploaders.size() < SystemConfig.getPeerSize();
}
}
/**
* <p>优化PeerUploader</p>
*/
public void optimize() {
LOGGER.debug("优化PeerUploader");
synchronized (this.peerUploaders) {
try {
this.inferiorPeerUploaders();
} catch (Exception e) {
LOGGER.error("优化PeerUploader异常", e);
}
}
}
/**
* <p>释放资源</p>
* <p>释放所有PeerUploader</p>
*/
public void release() {
LOGGER.debug("释放PeerUploaderGroup");
synchronized (this.peerUploaders) {
this.peerUploaders.forEach(uploader -> SystemThreadContext.submit(uploader::release));
this.peerUploaders.clear();
}
}
/**
* <p>剔除无效接入</p>
* <ul>
* <li>不可用的连接</li>
* <li>长时间没有请求的连接</li>
* <li>超过最大连接数的连接</li>
* </ul>
*/
private void inferiorPeerUploaders() {
LOGGER.debug("剔除无效PeerUploader");
int index = 0;
int offerSize = 0; // 有效数量
long uploadMark;
long downloadMark;
PeerUploader tmpUploader;
final int size = this.peerUploaders.size();
final int maxSize = SystemConfig.getPeerSize();
while(index++ < size) {
tmpUploader = this.peerUploaders.poll();
if(tmpUploader == null) {
break;
}
// 状态不可用直接剔除
if(!tmpUploader.available()) {
LOGGER.debug("剔除无效PeerUploader(不可用)");
this.inferiorPeerUploader(tmpUploader);
continue;
}
// 获取评分同时清除评分
uploadMark = tmpUploader.uploadMark(); // 上传评分
downloadMark = tmpUploader.downloadMark(); // 下载评分
// 提供下载的Peer提供上传
if(downloadMark > 0L) {
offerSize++;
this.offer(tmpUploader);
continue;
}
// 提供下载的Peer提供上传
if(tmpUploader.peerSession().downloading()) {
offerSize++;
this.offer(tmpUploader);
continue;
}
if(uploadMark <= 0L) {
// 没有评分
LOGGER.debug("剔除无效PeerUploader(没有评分)");
this.inferiorPeerUploader(tmpUploader);
} else if(offerSize > maxSize) {
// 超过最大Peer数量
LOGGER.debug("剔除无效PeerUploader(超过最大数量)");
this.inferiorPeerUploader(tmpUploader);
} else {
offerSize++;
this.offer(tmpUploader);
}
}
}
/**
* <p>PeerUploader加入队列</p>
*
* @param peerUploader PeerUploader
*/
private void offer(PeerUploader peerUploader) {
final var success = this.peerUploaders.offer(peerUploader);
if(!success) {
LOGGER.warn("PeerUploader丢失:{}", peerUploader);
}
}
/**
* <p>剔除劣质Peer</p>
*
* @param peerUploader 劣质Peer
*/
private void inferiorPeerUploader(PeerUploader peerUploader) {
if(peerUploader != null) {
final PeerSession peerSession = peerUploader.peerSession();
LOGGER.debug("剔除无效PeerUploader:{}-{}", peerSession.host(), peerSession.port());
SystemThreadContext.submit(() -> peerUploader.release());
}
}
}
|
<reponame>raminious/valeedator<gh_stars>0
const assert = require('assert')
const validate = function(field, value, options) {
assert(typeof value === 'number', `${field} is not number`)
if (typeof options.min !== 'undefined') {
assert(value >= options.min, `${field} must greater than ${options.min}`)
}
if (typeof options.max !== 'undefined') {
assert(value <= options.max, `${field} must lower than ${options.max}`)
}
}
module.exports = validate
|
<gh_stars>0
exports.seed = function(knex) {
return knex("ingredients")
.truncate()
.then(() => {
return knex("ingredients").insert([
{
ingredient_name: "tomatoes"
},
{ ingredient_name: "milk" },
{ ingredient_name: "cheese" }
]);
});
};
|
<filename>dbflute-runtime/src/main/java/org/dbflute/jdbc/ExecutionTimeInfo.java
/*
* Copyright 2014-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.dbflute.jdbc;
import org.dbflute.util.DfTraceViewUtil;
/**
* The information of execution time.
* @author jflute
*/
public class ExecutionTimeInfo {
// ===================================================================================
// Attribute
// =========
protected final Long _commandBeforeTimeMillis;
protected final Long _commandAfterTimeMillis;
protected final Long _sqlBeforeTimeMillis;
protected final Long _sqlAfterTimeMillis;
// ===================================================================================
// Constructor
// ===========
public ExecutionTimeInfo(Long commandBeforeTimeMillis, Long commandAfterTimeMillis, Long sqlBeforeTimeMillis, Long sqlAfterTimeMillis) {
_commandBeforeTimeMillis = commandBeforeTimeMillis;
_commandAfterTimeMillis = commandAfterTimeMillis;
_sqlBeforeTimeMillis = sqlBeforeTimeMillis;
_sqlAfterTimeMillis = sqlAfterTimeMillis;
}
// ===================================================================================
// View
// ====
/**
* The performance view of command invoking. e.g. 01m40s012ms <br>
* @return The view string of command invoking. (NotNull: if command failure, in SqlFireHook, and so on..., returns "*No time")
*/
public String toCommandPerformanceView() {
if (hasCommandTimeMillis()) {
return convertToPerformanceView(_commandAfterTimeMillis - _commandBeforeTimeMillis);
} else {
return "*No time";
}
}
/**
* The performance view of SQL fire. e.g. 01m40s012ms <br>
* (before building SQL clause after mapping to entity). <br>
* When batch execution, all statements is contained to the time.
* @return The view string of SQL fire. (NotNull: if no-modified-column update, SQL failure, and so on..., returns "*No time")
*/
public String toSqlPerformanceView() {
if (hasSqlTimeMillis()) {
return convertToPerformanceView(_sqlAfterTimeMillis - _sqlBeforeTimeMillis);
} else {
return "*No time";
}
}
/**
* Convert to performance view.
* @param after_minus_before The difference between before time and after time.
* @return The view string to show performance. e.g. 01m40s012ms (NotNull)
*/
protected String convertToPerformanceView(long after_minus_before) {
return DfTraceViewUtil.convertToPerformanceView(after_minus_before);
}
// ===================================================================================
// Status
// ======
/**
* Does it have the time of behavior command. <br>
* @return The determination, true or false. (basically true but no guarantee)
*/
public boolean hasCommandTimeMillis() {
return _commandAfterTimeMillis != null && _commandBeforeTimeMillis != null;
}
/**
* Does it have the time of SQL fire. <br>
* Basically it returns true but no guarantee, because this is additional info. <br>
* For example, no-modified-column update execution does not have its SQL fire.
* @return The determination, true or false. (basically true but no guarantee)
*/
public boolean hasSqlTimeMillis() {
return _sqlAfterTimeMillis != null && _sqlBeforeTimeMillis != null;
}
// ===================================================================================
// Basic Override
// ==============
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("{");
sb.append("commandBefore=").append(_commandBeforeTimeMillis);
sb.append(", commandAfter=").append(_commandAfterTimeMillis);
sb.append(", sqlBefore=").append(_sqlBeforeTimeMillis);
sb.append(", sqlAfter=").append(_sqlAfterTimeMillis);
sb.append("}");
return sb.toString();
}
// ===================================================================================
// Accessor
// ========
/**
* Get the time as millisecond before command invoking (before building SQL clause).
* @return The long value of millisecond. (NullAllowed: when command failure, in SqlFireHook, and so on...)
*/
public Long getCommandBeforeTimeMillis() {
return _commandBeforeTimeMillis;
}
/**
* Get the time as millisecond after command invoking (after mapping to entity).
* @return The long value of millisecond. (NullAllowed: when command failure, in SqlFireHook, and so on...)
*/
public Long getCommandAfterTimeMillis() {
return _commandAfterTimeMillis;
}
/**
* Get the time as millisecond before SQL fire (after building SQL clause). <br>
* (before building SQL clause after mapping to entity). <br>
* When batch execution, all statements is contained to the time.
* @return The long value of millisecond. (NullAllowed: when no-modified-column update, SQL failure, and so on...)
*/
public Long getSqlBeforeTimeMillis() {
return _sqlBeforeTimeMillis;
}
/**
* Get the time as millisecond after SQL fire (before mapping to entity). <br>
* (before building SQL clause after mapping to entity). <br>
* When batch execution, all statements is contained to the time.
* @return The long value of millisecond. (NullAllowed: when no-modified-column update, SQL failure, and so on...)
*/
public Long getSqlAfterTimeMillis() {
return _sqlAfterTimeMillis;
}
}
|
[[ -s $HOME/.rvm/scripts/rvm]] && source $HOME/.rvm/scripts/rvm
|
def my_sqrt(x):
# Setting the initial guess as x/2
current = x/2
# Running the loop until the accuracy is 3 decimal points
while abs(current * current - x) > 0.001:
current = 0.5 * (current + x/current)
return current
# Driver code
print(my_sqrt(4)) |
<filename>src/widgets/subs.ts
import { Subscription } from '../types/subscription';
import { Config } from '../types/config';
import { WidgetData } from '../types/widgetData';
import { getNormalizedValues, startTask, stopTask } from '../utils';
const io = require("socket.io/client-dist/socket.io.min"); // use socket.io/client-dist instead of socket.io-client because streamlabs requires older client version
// TODO reduce code duplication with timer history if possible
// background tasks
let intervalIdDrawLine: NodeJS.Timer;
// graph
let subs: Subscription[] = [];
const canvas = document.getElementsByTagName('canvas')[0];
const ctx = canvas.getContext('2d');
ctx.lineWidth = 5; // TODO reduce width if timespan greater than x?
ctx.lineJoin = 'bevel';
// register events
const socket = io("/subs");
socket.on('connect', () => {
console.log('Connected to timer.');
document.querySelector('span').innerText = "";
});
socket.on('disconnect', () => {
console.log("Connection to timer lost.")
document.querySelector('span').innerText = "Not connected to timer";
document.querySelector('p').innerText = "";
intervalIdDrawLine = stopTask(intervalIdDrawLine);
ctx.clearRect(0, 0, canvas.width, canvas.height);
});
socket.on('error', (msg: string) => {
document.querySelector('span').innerText = msg;
});
socket.on('config', (newConfig: Config) => updateConfig(newConfig));
socket.on('update', (data: WidgetData) => setSubCount(data.totalSubs));
socket.on('subs-data', (subsData: Subscription[]) => {
subs = subsData
drawLine(subs);
});
socket.on('subs-export', (bgColor: string, callback: (dataUrl: string) => void) => {
const globalCompositeOperation = ctx.globalCompositeOperation;
ctx.globalCompositeOperation = 'destination-over';
ctx.fillStyle = bgColor;
ctx.fillRect(0, 0, canvas.width, canvas.height);
callback(canvas.toDataURL());
ctx.globalCompositeOperation = globalCompositeOperation;
});
/**
* Update the configuration based on the provided config.
*
* @param config the config containing the new values that should be used
*/
const updateConfig = (config: Config) => {
document.querySelector('p').hidden = !config.subHistoryShowTotal;
document.querySelector('div').style.background = config.bgColor;
document.querySelector('p').style.color = config.timerColor;
document.querySelector('p').style.textShadow = `0px 0px 10px ${config.timerShadowColor}`;
ctx.strokeStyle = config.lineColor;
drawLine(subs);
intervalIdDrawLine = startTask(intervalIdDrawLine, () => drawLine(subs), config.subHistoryRefresh);
};
/**
* Update the displayed sub count.
*
* @param count the new sub count
*/
const setSubCount = (count: number) => {
document.querySelector('p').innerText = `${count}`;
};
/**
* Draw the line graph of the sub count history of the provided data.
*
* @param data the graph data
*/
const drawLine = (data: any[]) => { // TODO replace by Subscription[]?
// TODO add possibility to limit the timespan of the graph
// TODO use same start time as history (could get from widgetData); ties in with todo above
data = data
.map(item => {
const timestamp = Date.parse(item.timestamp); // parse date to a number
return { ...item, timestamp };
})
.filter(item => Number.isInteger(item.timestamp)) // remove invalid data points
.sort((a, b) => a.timestamp - b.timestamp); // sort by timestamp in case json file is weird
let dates = data.map(value => value.timestamp);
dates.push(Date.now()); // add current timestamp to stretch the graph to current time instead of last sub
dates = getNormalizedValues(dates);
ctx.clearRect(0, 0, canvas.width, canvas.height);
// calc offsets so the line is not cut off at the edge
const offset = ctx.lineWidth / 2;
const width = canvas.width - ctx.lineWidth;
const height = canvas.height - ctx.lineWidth;
ctx.beginPath();
ctx.moveTo(0, height + offset);
for (let i = 0; i < data.length; i++) { // important: use 'data' not 'dates' as 'dates' contains current time additionally which isn't a sub
let subCount = i + 1; // data is 0-indexed so subCount is 1 higher than index
ctx.lineTo(width * dates[i] + offset, height * (1 - (i / data.length)) + offset); // i is also equal to the previous subCount
ctx.lineTo(width * dates[i] + offset, height * (1 - (subCount / data.length)) + offset);
}
ctx.lineTo(canvas.width, (data.length === 0 ? height : 0) + offset); // finish line to current timestamp (end of graph, subCount is either at 0 or max)
ctx.stroke();
};
|
public class RandomNumber {
public static int generateRandomNumber() {
Random rand = new Random();
return rand.nextInt(15) + 10;
}
} |
#!/bin/bash
TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)}
SRCDIR=${SRCDIR:-$TOPDIR/src}
MANDIR=${MANDIR:-$TOPDIR/doc/man}
DalcoinD=${DalcoinD:-$SRCDIR/Dalcoind}
DalcoinCLI=${DalcoinCLI:-$SRCDIR/Dalcoin-cli}
DalcoinTX=${DalcoinTX:-$SRCDIR/Dalcoin-tx}
DalcoinQT=${DalcoinQT:-$SRCDIR/qt/Dalcoin-qt}
[ ! -x $DalcoinD ] && echo "$DalcoinD not found or not executable." && exit 1
# The autodetected version git tag can screw up manpage output a little bit
DALVER=($($DalcoinCLI --version | head -n1 | awk -F'[ -]' '{ print $6, $7 }'))
# Create a footer file with copyright content.
# This gets autodetected fine for bitcoind if --version-string is not set,
# but has different outcomes for bitcoin-qt and bitcoin-cli.
echo "[COPYRIGHT]" > footer.h2m
$DalcoinD --version | sed -n '1!p' >> footer.h2m
for cmd in $DalcoinD $DalcoinCLI $DalcoinTX $DalcoinQT; do
cmdname="${cmd##*/}"
help2man -N --version-string=${DALVER[0]} --include=footer.h2m -o ${MANDIR}/${cmdname}.1 ${cmd}
sed -i "s/\\\-${DALVER[1]}//g" ${MANDIR}/${cmdname}.1
done
rm -f footer.h2m
|
package org.jaudiotagger.audio.asf.tag;
import junit.framework.TestCase;
import org.jaudiotagger.tag.asf.AsfTagCoverField;
/**
* Tests basic behavior of {@link AsfTagCoverField}.
* <p/>
* Date: 10/19/12
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
public class AsfTagCoverFieldTest extends TestCase {
/**
* Tests the standard constructor.
*/
public void testConstructor() {
final byte[] imageData = new byte[1024];
final int pictureType = 11;
final String description = "description";
final String mimeType = "image/jpeg";
final AsfTagCoverField tag = new AsfTagCoverField(imageData, pictureType, description, mimeType);
assertEquals(imageData.length, tag.getImageDataSize());
assertEquals(pictureType, tag.getPictureType());
assertEquals(mimeType, tag.getMimeType());
assertEquals(description, tag.getDescription());
}
}
|
#! /bin/sh
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is the build configuration utility of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL21$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see http://www.qt.io/terms-conditions. For further
## information use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## As a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## $QT_END_LICENSE$
##
#############################################################################
# This is a small script to copy the required files from a freetype tarball
# into 3rdparty/freetype/ . Documentation, tests, demos etc. are not imported.
if [ $# -ne 2 ]; then
echo "Usage: $0 freetype_tarball_dir/ \$QTDIR/src/3rdparty/freetype/"
exit 1
fi
FT_DIR=$1
TARGET_DIR=$2
if [ ! -d "$FT_DIR" -o ! -r "$FT_DIR" -o ! -d "$TARGET_DIR" -o ! -w "$TARGET_DIR" ]; then
echo "Either the freetype source dir or the target dir do not exist,"
echo "are not directories or have the wrong permissions."
exit 2
fi
# with 1 argument, copies FT_DIR/$1 to TARGET_DIR/$1
# with 2 arguments, copies FT_DIR/$1 to TARGET_DIR/$2
copy_file_or_dir() {
if [ $# -lt 1 -o $# -gt 2 ]; then
echo "Wrong number of arguments to copy_file_or_dir"
exit 3
fi
SOURCE_FILE=$1
if [ -n "$2" ]; then
DEST_FILE=$2
else
DEST_FILE=$1
fi
mkdir -p "$TARGET_DIR/$(dirname "$SOURCE_FILE")"
cp -R "$FT_DIR/$SOURCE_FILE" "$TARGET_DIR/$DEST_FILE"
}
FILES="
README
builds/unix/ftsystem.c
docs/CHANGES
docs/CUSTOMIZE
docs/DEBUG
docs/PROBLEMS
docs/TODO
docs/FTL.TXT
docs/GPLv2.TXT
docs/LICENSE.TXT
include/
src/
"
for i in $FILES; do
copy_file_or_dir "$i"
done
|
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This script runs or (given -n) prints suggested commands to generate files for
# the Architecture/OS specified by the GOARCH and GOOS environment variables.
# See README.md for more information about how the build system works.
GOOSARCH="${GOOS}_${GOARCH}"
# defaults
mksyscall="./mksyscall.pl"
mkerrors="./mkerrors.sh"
zerrors="zerrors_$GOOSARCH.go"
mksysctl=""
zsysctl="zsysctl_$GOOSARCH.go"
mksysnum=
mktypes=
run="sh"
cmd=""
case "$1" in
-syscalls)
for i in zsyscall*go
do
# Run the command line that appears in the first line
# of the generated file to regenerate it.
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
rm _$i
done
exit 0
;;
-n)
run="cat"
cmd="echo"
shift
esac
case "$#" in
0)
;;
*)
echo 'usage: mkall.sh [-n]' 1>&2
exit 2
esac
if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
# Use then new build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
exit
fi
GOOSARCH_in=syscall_$GOOSARCH.go
case "$GOOSARCH" in
_* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
darwin_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_amd64)
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_arm)
mkerrors="$mkerrors"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
dragonfly_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -dragonfly"
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -dragonfly"
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
linux_sparc64)
GOOSARCH_in=syscall_linux_sparc64.go
unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -netbsd -arm"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -openbsd -arm"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
solaris_amd64)
mksyscall="./mksyscall_solaris.pl"
mkerrors="$mkerrors -m64"
mksysnum=
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
*)
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
exit 1
;;
esac
(
if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
case "$GOOS" in
*)
syscall_goos="syscall_$GOOS.go"
case "$GOOS" in
darwin | dragonfly | freebsd | netbsd | openbsd)
syscall_goos="syscall_bsd.go $syscall_goos"
;;
esac
if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
;;
esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then
echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go";
fi
) | $run
|
// MovingBox class
class MovingBox {
private double x, y; // position
private double vx, vy; // velocity
public MovingBox() {
// initialize position and velocity
}
public void updatePosition(List<Attractor> attractors) {
// Update position based on attraction to the attractors
for (Attractor attractor : attractors) {
double dx = attractor.getX() - x;
double dy = attractor.getY() - y;
double distance = Math.sqrt(dx * dx + dy * dy);
double force = attractor.getStrength() / (distance * distance); // Simplified inverse square law
double ax = force * dx / distance;
double ay = force * dy / distance;
vx += ax;
vy += ay;
}
x += vx;
y += vy;
}
// Other methods and properties as needed
}
// Attractor class
class Attractor {
private double x, y; // position
private double strength; // strength of attraction
public Attractor() {
// initialize position and strength
}
// Getters and setters for position and strength
// Other methods and properties as needed
}
// Simulation method
public void simulateInteraction(List<MovingBox> boxes, List<Attractor> attractors) {
// Update positions of moving boxes based on attraction to the attractors
for (MovingBox box : boxes) {
box.updatePosition(attractors);
}
} |
<gh_stars>10-100
/*******************************************************************************
* This file is part of the Symfony eclipse plugin.
*
* (c) <NAME> <<EMAIL>>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
******************************************************************************/
package com.dubture.symfony.ui.preferences;
import org.eclipse.core.runtime.preferences.AbstractPreferenceInitializer;
import com.dubture.symfony.ui.PreferenceConstants;
public class PreferenceInitializer extends AbstractPreferenceInitializer {
@Override
public void initializeDefaultPreferences() {
PreferenceConstants.initializeDefaultValues();
}
}
|
<gh_stars>0
import { NgModule } from '@angular/core';
import { RouterModule } from '@angular/router';
import { FormsModule, ReactiveFormsModule } from '@angular/forms';
import { CommonModule } from '@angular/common';
import { NEW_LEAGUE_COMPONENTS } from './new';
import { EDIT_LEAGUE_COMPONENTS } from './edit';
import { JOIN_LEAGUE_COMPONENTS } from './join';
import { VIEW_LEAGUE_COMPONENTS } from './view';
import { PICK_COMPONENTS } from './pick';
import { LEAGUE_LIST_COMPONENTS } from './list';
import { ROUTES } from './league.routes';
import { DatePickerComponent } from '../../components/datepicker.component';
@NgModule({
declarations: [
...NEW_LEAGUE_COMPONENTS,
...EDIT_LEAGUE_COMPONENTS,
...JOIN_LEAGUE_COMPONENTS,
...VIEW_LEAGUE_COMPONENTS,
...PICK_COMPONENTS,
...LEAGUE_LIST_COMPONENTS,
DatePickerComponent
],
imports: [
FormsModule,
ReactiveFormsModule,
CommonModule,
RouterModule.forChild(ROUTES)
]
})
export class LeagueModule{}
|
<gh_stars>1-10
package jenkinsci
import (
// "io/ioutil"
// "crypto/tls"
// "crypto/x509"
// "net/http"
jenkins "github.com/DanielMabbett/gojenkins"
)
// Config is the set of parameters needed to configure the JenkinsCI provider.
type Config struct {
jenkinsEndpoint string
jenkinsAdminUsername string
jenkinsAdminPassword string
insecure bool
}
// Client Config
func (c *Config) Client() (*jenkins.Jenkins, error) {
client := jenkins.CreateJenkins(nil, c.jenkinsEndpoint, c.jenkinsAdminUsername, c.jenkinsAdminPassword)
_, err := client.Init()
if err != nil {
return nil, err
}
return client, nil
}
|
import React from "react";
import { useState } from "react";
const JobsContext = React.createContext();
function JobsContextProvider(props) {
const [search, setSearch] = useState("");
const [selectedFilters, setSelectedFilters] = useState({
job_type: [],
department: [],
work_schedule: [],
experience: [],
});
const [sorts, setSorts] = useState({});
return (
// dont forget to put values here
<JobsContext.Provider
value={{
search,
setSearch,
selectedFilters,
setSelectedFilters,
sorts,
setSorts,
}}
>
{props.children}
</JobsContext.Provider>
);
}
export { JobsContextProvider, JobsContext };
|
#!/bin/bash
# Copyright (c) 2013 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file.
cd ..
if [ -z "$1" ]; then
echo "ERROR: Please specify a target platform: linux32 or linux64"
else
if [ -z "$2" ]; then
echo "ERROR: Please specify a build type: Debug or Release"
elif [ -z "$3" ]; then
echo "ERROR: Please specify a run type: detailed or simple"
else
export OUT_PATH="./out/$1"
export LIB_PATH=$(readlink -f "./jcef_build/native/$2")
if [ ! -d "$LIB_PATH" ]; then
echo "ERROR: Native build output path does not exist"
exit 1
fi
export CLS_PATH="./third_party/jogamp/jar/*:$OUT_PATH"
export RUN_TYPE="$3"
# Necessary for jcef_helper to find libcef.so.
export LD_LIBRARY_PATH=$LIB_PATH
# Remove the first three params ($1, $2 and $3) and pass the rest to java.
shift
shift
shift
LD_PRELOAD=$LIB_PATH/libcef.so java -cp "$CLS_PATH" -Djava.library.path=$LIB_PATH tests.$RUN_TYPE.MainFrame "$@"
fi
fi
cd tools
|
#!/bin/sh -v
KEYFILE="home/ubuntu/.ssh/kube_aws_rsa"
NODEIP=$1
# run init on master
ssh -o StrictHostKeyChecking=no -i $HOME/.ssh/kube_aws_rsa ubuntu@$NODEIP sudo `cat join.cmd` > kubeadm-$NODEIP.log
exit 0
|
<gh_stars>1-10
import { css, StyleSheet } from 'aphrodite'
import * as React from 'react'
import CircularButton from './CircularButton'
const styles = StyleSheet.create({
separatorVContainer: {
display: 'flex',
flexDirection: 'column',
justifyContent: 'center',
width: 44,
padding: '0 8px'
},
separatorHContainer: {
display: 'flex',
flexDirection: 'row',
justifyContent: 'center',
height: 44,
padding: '8px 0'
},
lineVContainer: {
display: 'flex',
flexDirection: 'row',
justifyContent: 'center',
flex: 1,
padding: '10px'
},
lineHContainer: {
display: 'flex',
flexDirection: 'column',
justifyContent: 'center',
flex: 1,
padding: '10px'
},
line: {
backgroundColor: '#333'
},
vLine: {
width: 2
},
hLine: {
height: 2
}
})
interface LineProps {
isVertical: boolean
}
const Line = ({ isVertical }: LineProps) =>
<div
className={css(styles.line, isVertical ? styles.vLine : styles.hLine)}
/>
export interface SeparatorProps {
isVertical: boolean
isButtonToggled: boolean
expanded: boolean
shrink: () => void
expand: () => void
}
const Separator = ({
isVertical,
isButtonToggled,
expanded,
shrink,
expand
}: SeparatorProps) =>
<div
className={css(
isVertical ? styles.separatorVContainer : styles.separatorHContainer
)}
>
<div
className={css(
isVertical ? styles.lineVContainer : styles.lineHContainer
)}
>
<Line isVertical={isVertical} />
</div>
<CircularButton
onClick={expanded ? shrink : expand}
toggled={isButtonToggled}
vertical={isVertical}
/>
<div
className={css(
isVertical ? styles.lineVContainer : styles.lineHContainer
)}
>
<Line isVertical={isVertical} />
</div>
</div>
export default Separator
|
#!/bin/sh
echo "WARNING: DELETING SUBVERSION FILES"
sleep 10
rm -fr `find . -name .svn -type d`
rm -fv `find . -size +1M`
|
/*
* Copyright (c) 2018 https://www.reactivedesignpatterns.com/
*
* Copyright (c) 2018 https://rdp.reactiveplatform.xyz/
*
*/
public class SequentialExecution {
public static class ReplyA {}
public static class ReplyB {}
public static class ReplyC {}
public static class Result {
final ReplyA replyA;
final ReplyB replyB;
final ReplyC replyC;
public Result(ReplyA replyA, ReplyB replyB, ReplyC replyC) {
this.replyA = replyA;
this.replyB = replyB;
this.replyC = replyC;
}
}
public static Result aggregate(ReplyA replyA, ReplyB replyB, ReplyC replyC) {
return new Result(replyA, replyB, replyC);
}
public static ReplyA computeA() {
return new ReplyA(); // return from compute
}
public static ReplyB computeB() {
return new ReplyB(); // return from compute
}
public static ReplyC computeC() {
return new ReplyC(); // return from compute
}
public static void main(String[] args) {
// #snip
final ReplyA a = computeA();
final ReplyB b = computeB();
final ReplyC c = computeC();
final Result r = aggregate(a, b, c);
// #snip
System.out.println(r);
}
}
|
<filename>enrichment-coordinator-service/src/main/java/org/oransc/enrichment/repository/EiType.java
/*-
* ========================LICENSE_START=================================
* O-RAN-SC
* %%
* Copyright (C) 2019 Nordix Foundation
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ========================LICENSE_END===================================
*/
package org.oransc.enrichment.repository;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import lombok.Getter;
public class EiType {
@Getter
private final String id;
@Getter
private final Object jobDataSchema;
private final Map<String, EiProducer> producers = new HashMap<>();
public EiType(String id, Object jobDataSchema) {
this.id = id;
this.jobDataSchema = jobDataSchema;
}
public synchronized Collection<EiProducer> getProducers() {
return Collections.unmodifiableCollection(producers.values());
}
public synchronized Collection<String> getProducerIds() {
return Collections.unmodifiableCollection(producers.keySet());
}
public synchronized void addProducer(EiProducer producer) {
this.producers.put(producer.getId(), producer);
}
public synchronized EiProducer removeProducer(EiProducer producer) {
return this.producers.remove(producer.getId());
}
}
|
#!/bin/bash
# 1g20 is as 1g but adding the option "--constrained false" to --egs.opts.
# This is the new 'unconstrained egs' code where it uses the e2e examples.
#
# local/chain/compare_wer.sh exp/chain/tdnn1g_sp exp/chain/tdnn1g20_sp
# System tdnn1g_sp tdnn1g20_sp
#WER dev_clean_2 (tgsmall) 13.55 13.55
#WER dev_clean_2 (tglarge) 9.74 9.66
# Final train prob -0.0454 -0.0318
# Final valid prob -0.0920 -0.0800
# Final train prob (xent) -1.1679 -1.1831
# Final valid prob (xent) -1.4506 -1.5074
# Num-params 6227338 6227338
# 1g is as 1f but adding dropout (well, something like dropout-- the mask
# is shared across time and it's continuous rather than zero-one), increasing
# the hidden dimension, and training for more epochs.
# local/chain/compare_wer.sh --online exp/chain/tdnn1f_sp exp/chain/tdnn1g_sp
# System tdnn1f_sp tdnn1g_sp
#WER dev_clean_2 (tgsmall) 14.21 13.76
# [online:] 14.18 13.72
#WER dev_clean_2 (tglarge) 10.32 9.65
# [online:] 10.25 9.85
# Final train prob -0.0507 -0.0453
# Final valid prob -0.0912 -0.0892
# Final train prob (xent) -1.3550 -1.1694
# Final valid prob (xent) -1.6018 -1.4486
# Num-params 4205322 6227338
# steps/info/chain_dir_info.pl exp/chain/tdnn1{f,g}_sp
# exp/chain/tdnn1f_sp: num-iters=17 nj=2..5 num-params=4.2M dim=40+100->2309 combine=-0.060->-0.060 (over 1) xent:train/valid[10,16,final]=(-1.61,-1.41,-1.36/-1.82,-1.66,-1.60) logprob:train/valid[10,16,final]=(-0.067,-0.057,-0.051/-0.106,-0.097,-0.091)
# exp/chain/tdnn1g_sp: num-iters=25 nj=2..5 num-params=6.2M dim=40+100->2309 combine=-0.054->-0.053 (over 2) xent:train/valid[15,24,final]=(-1.49,-1.22,-1.17/-1.75,-1.51,-1.45) logprob:train/valid[15,24,final]=(-0.063,-0.050,-0.045/-0.106,-0.096,-0.089)
# Set -e here so that we catch if any executable fails immediately
set -euo pipefail
# First the options that are passed through to run_ivector_common.sh
# (some of which are also used in this script directly).
stage=0
decode_nj=10
train_set=train_clean_5
test_sets=dev_clean_2
gmm=tri3b
nnet3_affix=
# The rest are configs specific to this script. Most of the parameters
# are just hardcoded at this level, in the commands below.
affix=1g20 # affix for the TDNN directory name
tree_affix=
train_stage=-10
get_egs_stage=-10
decode_iter=
# training options
# training chunk-options
chunk_width=140,100,160
# we don't need extra left/right context for TDNN systems.
chunk_left_context=0
chunk_right_context=0
dropout_schedule='0,0@0.20,0.3@0.50,0'
common_egs_dir=
xent_regularize=0.1
# training options
srand=0
remove_egs=true
reporting_email=
#decode options
test_online_decoding=true # if true, it will run the last decoding stage.
# End configuration section.
echo "$0 $@" # Print the command line for logging
. ./cmd.sh
. ./path.sh
. ./utils/parse_options.sh
if ! cuda-compiled; then
cat <<EOF && exit 1
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
If you want to use GPUs (and have them), go to src/, and configure and make on a machine
where "nvcc" is installed.
EOF
fi
# The iVector-extraction and feature-dumping parts are the same as the standard
# nnet3 setup, and you can skip them by setting "--stage 11" if you have already
# run those things.
local/nnet3/run_ivector_common.sh --stage $stage \
--train-set $train_set \
--gmm $gmm \
--nnet3-affix "$nnet3_affix" || exit 1;
# Problem: We have removed the "train_" prefix of our training set in
# the alignment directory names! Bad!
gmm_dir=exp/$gmm
ali_dir=exp/${gmm}_ali_${train_set}_sp
tree_dir=exp/chain${nnet3_affix}/tree_sp${tree_affix:+_$tree_affix}
lang=data/lang_chain
lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats
dir=exp/chain${nnet3_affix}/tdnn${affix}_sp
train_data_dir=data/${train_set}_sp_hires
lores_train_data_dir=data/${train_set}_sp
train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
$lores_train_data_dir/feats.scp $ali_dir/ali.1.gz; do
[ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
done
if [ $stage -le 10 ]; then
echo "$0: creating lang directory $lang with chain-type topology"
# Create a version of the lang/ directory that has one state per phone in the
# topo file. [note, it really has two states.. the first one is only repeated
# once, the second one has zero or more repeats.]
if [ -d $lang ]; then
if [ $lang/L.fst -nt data/lang/L.fst ]; then
echo "$0: $lang already exists, not overwriting it; continuing"
else
echo "$0: $lang already exists and seems to be older than data/lang..."
echo " ... not sure what to do. Exiting."
exit 1;
fi
else
cp -r data/lang $lang
silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
# Use our special topology... note that later on may have to tune this
# topology.
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
fi
fi
if [ $stage -le 11 ]; then
# Get the alignments as lattices (gives the chain training more freedom).
# use the same num-jobs as the alignments
steps/align_fmllr_lats.sh --nj 75 --cmd "$train_cmd" ${lores_train_data_dir} \
data/lang $gmm_dir $lat_dir
rm $lat_dir/fsts.*.gz # save space
fi
if [ $stage -le 12 ]; then
# Build a tree using our new topology. We know we have alignments for the
# speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
# those. The num-leaves is always somewhat less than the num-leaves from
# the GMM baseline.
if [ -f $tree_dir/final.mdl ]; then
echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
exit 1;
fi
steps/nnet3/chain/build_tree.sh \
--frame-subsampling-factor 3 \
--context-opts "--context-width=2 --central-position=1" \
--cmd "$train_cmd" 3500 ${lores_train_data_dir} \
$lang $ali_dir $tree_dir
fi
if [ $stage -le 13 ]; then
mkdir -p $dir
echo "$0: creating neural net configs using the xconfig parser";
num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
opts="l2-regularize=0.05 dropout-per-dim-continuous=true"
output_opts="l2-regularize=0.02 bottleneck-dim=192"
mkdir -p $dir/configs
cat <<EOF > $dir/configs/network.xconfig
input dim=100 name=ivector
input dim=40 name=input
# please note that it is important to have input layer with the name=input
# as the layer immediately preceding the fixed-affine-layer to enable
# the use of short notation for the descriptor
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
# the first splicing is moved before the lda layer, so no splicing here
relu-batchnorm-dropout-layer name=tdnn1 $opts dim=512
relu-batchnorm-dropout-layer name=tdnn2 $opts dim=512 input=Append(-1,0,1)
relu-batchnorm-dropout-layer name=tdnn3 $opts dim=512
relu-batchnorm-dropout-layer name=tdnn4 $opts dim=512 input=Append(-1,0,1)
relu-batchnorm-dropout-layer name=tdnn5 $opts dim=512
relu-batchnorm-dropout-layer name=tdnn6 $opts dim=512 input=Append(-3,0,3)
relu-batchnorm-dropout-layer name=tdnn7 $opts dim=512 input=Append(-3,0,3)
relu-batchnorm-dropout-layer name=tdnn8 $opts dim=512 input=Append(-6,-3,0)
## adding the layers for chain branch
relu-batchnorm-layer name=prefinal-chain $opts dim=512
output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5
# adding the layers for xent branch
# This block prints the configs for a separate output that will be
# trained with a cross-entropy objective in the 'chain' models... this
# has the effect of regularizing the hidden parts of the model. we use
# 0.5 / args.xent_regularize as the learning rate factor- the factor of
# 0.5 / args.xent_regularize is suitable as it means the xent
# final-layer learns at a rate independent of the regularization
# constant; and the 0.5 was tuned so as to make the relative progress
# similar in the xent and regular final layers.
relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=512
output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
EOF
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
fi
if [ $stage -le 14 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
utils/create_split_dir.pl \
/export/b0{3,4,5,6}/$USER/kaldi-data/egs/mini_librispeech-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
fi
steps/nnet3/chain/train.py --stage=$train_stage \
--cmd="$decode_cmd" \
--feat.online-ivector-dir=$train_ivector_dir \
--feat.cmvn-opts="--norm-means=false --norm-vars=false" \
--chain.xent-regularize $xent_regularize \
--chain.leaky-hmm-coefficient=0.1 \
--chain.l2-regularize=0.00005 \
--chain.apply-deriv-weights=false \
--chain.lm-opts="--num-extra-lm-states=2000" \
--trainer.dropout-schedule $dropout_schedule \
--trainer.srand=$srand \
--trainer.max-param-change=2.0 \
--trainer.num-epochs=15 \
--trainer.frames-per-iter=3000000 \
--trainer.optimization.num-jobs-initial=2 \
--trainer.optimization.num-jobs-final=5 \
--trainer.optimization.initial-effective-lrate=0.001 \
--trainer.optimization.final-effective-lrate=0.0001 \
--trainer.optimization.shrink-value=1.0 \
--trainer.num-chunk-per-minibatch=256,128,64 \
--trainer.optimization.momentum=0.0 \
--egs.chunk-width=$chunk_width \
--egs.chunk-left-context=$chunk_left_context \
--egs.chunk-right-context=$chunk_right_context \
--egs.chunk-left-context-initial=0 \
--egs.chunk-right-context-final=0 \
--egs.dir="$common_egs_dir" \
--egs.opts="--frames-overlap-per-eg 0 --constrained false" \
--cleanup.remove-egs=$remove_egs \
--use-gpu=true \
--reporting.email="$reporting_email" \
--feat-dir=$train_data_dir \
--tree-dir=$tree_dir \
--lat-dir=$lat_dir \
--dir=$dir || exit 1;
fi
if [ $stage -le 15 ]; then
# Note: it's not important to give mkgraph.sh the lang directory with the
# matched topology (since it gets the topology file from the model).
utils/mkgraph.sh \
--self-loop-scale 1.0 data/lang_test_tgsmall \
$tree_dir $tree_dir/graph_tgsmall || exit 1;
fi
if [ $stage -le 16 ]; then
frames_per_chunk=$(echo $chunk_width | cut -d, -f1)
rm $dir/.error 2>/dev/null || true
for data in $test_sets; do
(
nspk=$(wc -l <data/${data}_hires/spk2utt)
steps/nnet3/decode.sh \
--acwt 1.0 --post-decode-acwt 10.0 \
--extra-left-context $chunk_left_context \
--extra-right-context $chunk_right_context \
--extra-left-context-initial 0 \
--extra-right-context-final 0 \
--frames-per-chunk $frames_per_chunk \
--nj $nspk --cmd "$decode_cmd" --num-threads 4 \
--online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \
$tree_dir/graph_tgsmall data/${data}_hires ${dir}/decode_tgsmall_${data} || exit 1
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_test_{tgsmall,tglarge} \
data/${data}_hires ${dir}/decode_{tgsmall,tglarge}_${data} || exit 1
) || touch $dir/.error &
done
wait
[ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
fi
# Not testing the 'looped' decoding separately, because for
# TDNN systems it would give exactly the same results as the
# normal decoding.
if $test_online_decoding && [ $stage -le 17 ]; then
# note: if the features change (e.g. you add pitch features), you will have to
# change the options of the following command line.
steps/online/nnet3/prepare_online_decoding.sh \
--mfcc-config conf/mfcc_hires.conf \
$lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online
rm $dir/.error 2>/dev/null || true
for data in $test_sets; do
(
nspk=$(wc -l <data/${data}_hires/spk2utt)
# note: we just give it "data/${data}" as it only uses the wav.scp, the
# feature type does not matter.
steps/online/nnet3/decode.sh \
--acwt 1.0 --post-decode-acwt 10.0 \
--nj $nspk --cmd "$decode_cmd" \
$tree_dir/graph_tgsmall data/${data} ${dir}_online/decode_tgsmall_${data} || exit 1
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_test_{tgsmall,tglarge} \
data/${data}_hires ${dir}_online/decode_{tgsmall,tglarge}_${data} || exit 1
) || touch $dir/.error &
done
wait
[ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1
fi
exit 0;
|
package app.habitzl.elasticsearch.status.monitor.tool.client;
import app.habitzl.elasticsearch.status.monitor.tool.analysis.ElasticsearchClient;
import app.habitzl.elasticsearch.status.monitor.tool.client.connection.MonitoringRestClient;
import app.habitzl.elasticsearch.status.monitor.tool.client.data.cluster.ClusterInfo;
import app.habitzl.elasticsearch.status.monitor.tool.client.data.cluster.ClusterSettings;
import app.habitzl.elasticsearch.status.monitor.tool.client.data.connection.ConnectionInfo;
import app.habitzl.elasticsearch.status.monitor.tool.client.data.connection.ConnectionStatus;
import app.habitzl.elasticsearch.status.monitor.tool.client.data.node.NodeInfo;
import app.habitzl.elasticsearch.status.monitor.tool.client.data.shard.UnassignedShardInfo;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.ClusterAllocationParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.ClusterHealthParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.ClusterSettingsParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.ClusterStateParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.EndpointVersionParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.GeneralParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.NodeInfoParams;
import app.habitzl.elasticsearch.status.monitor.tool.client.params.NodeStatsParams;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import javax.inject.Inject;
import javax.net.ssl.SSLHandshakeException;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
public class DefaultElasticsearchClient implements ElasticsearchClient {
private static final Logger LOG = LogManager.getLogger(DefaultElasticsearchClient.class);
static final String METHOD_GET = "GET";
static final String HEADER_ACCEPT = "accept";
static final String CONTENT_TYPE_APPLICATION_JSON = "application/json";
private static final int HTTP_STATUS_BAD_REQUEST = 400;
private final RestClient client;
private final ResponseMapper responseMapper;
private final InfoMapper infoMapper;
@Inject
public DefaultElasticsearchClient(
final @MonitoringRestClient RestClient client,
final ResponseMapper responseMapper,
final InfoMapper infoMapper) {
this.client = client;
this.responseMapper = responseMapper;
this.infoMapper = infoMapper;
}
@Override
public ConnectionInfo checkConnection() {
ConnectionInfo connectionInfo;
Request request = new Request(METHOD_GET, EndpointVersionParams.API_ENDPOINT);
setAcceptedContentToJSON(request);
try {
Response response = client.performRequest(request);
ConnectionStatus status = ConnectionStatus.fromHttpCode(response.getStatusLine().getStatusCode());
connectionInfo = status == ConnectionStatus.SUCCESS
? ConnectionInfo.success()
: ConnectionInfo.error(status, response.getStatusLine().toString());
} catch (final ResponseException e) {
logError(e);
Response response = e.getResponse();
ConnectionStatus status = ConnectionStatus.fromHttpCode(response.getStatusLine().getStatusCode());
connectionInfo = ConnectionInfo.error(status, response.getStatusLine().toString());
} catch (final SSLHandshakeException e) {
logError(e);
connectionInfo = ConnectionInfo.error(ConnectionStatus.SSL_HANDSHAKE_FAILURE, e.getMessage());
} catch (final IOException e) {
logError(e);
connectionInfo = ConnectionInfo.error(ConnectionStatus.NOT_FOUND, e.getMessage());
}
return connectionInfo;
}
@Override
public Optional<ClusterSettings> getClusterSettings() {
ClusterSettings clusterSettings;
Request request = new Request(METHOD_GET, ClusterSettingsParams.API_ENDPOINT);
setAcceptedContentToJSON(request);
request.addParameter(ClusterSettingsParams.PARAM_INCLUDE_DEFAULTS, "true");
try {
Response response = client.performRequest(request);
String result = responseMapper.getContentAsString(response);
clusterSettings = infoMapper.mapClusterSettings(result);
LOG.debug("Mapped cluster settings: {}", clusterSettings);
} catch (final IOException e) {
logError(e);
clusterSettings = null;
}
return Optional.ofNullable(clusterSettings);
}
@Override
public Optional<ClusterInfo> getClusterInfo() {
ClusterInfo clusterInfo;
Request clusterHealthRequest = new Request(METHOD_GET, ClusterHealthParams.API_ENDPOINT);
setAcceptedContentToJSON(clusterHealthRequest);
Request clusterStateRequest = new Request(METHOD_GET, ClusterStateParams.API_ENDPOINT);
setAcceptedContentToJSON(clusterStateRequest);
try {
Response clusterHealthResponse = client.performRequest(clusterHealthRequest);
String clusterHealthResult = responseMapper.getContentAsString(clusterHealthResponse);
Response clusterStateResponse = client.performRequest(clusterStateRequest);
String clusterStateResult = responseMapper.getContentAsString(clusterStateResponse);
clusterInfo = infoMapper.mapClusterInfo(clusterHealthResult, clusterStateResult);
LOG.debug("Mapped cluster info: {}", clusterInfo);
} catch (final IOException e) {
logError(e);
clusterInfo = null;
}
return Optional.ofNullable(clusterInfo);
}
@Override
public List<NodeInfo> getNodeInfo() {
List<NodeInfo> nodeInfos = List.of();
Request masterNodeRequest = new Request(METHOD_GET, ClusterStateParams.onlyRequestMasterNode());
setAcceptedContentToJSON(masterNodeRequest);
Request nodeInfoRequest = new Request(METHOD_GET, NodeInfoParams.API_ENDPOINT);
setAcceptedContentToJSON(nodeInfoRequest);
Request nodeStatsRequest = new Request(METHOD_GET, NodeStatsParams.API_ENDPOINT);
setAcceptedContentToJSON(nodeStatsRequest);
nodeStatsRequest.addParameter(NodeStatsParams.PARAM_METRIC, NodeStatsParams.allMetrics());
try {
Response masterNodeResponse = client.performRequest(masterNodeRequest);
String masterNodeResult = responseMapper.getContentAsString(masterNodeResponse);
Response nodeInfoResponse = client.performRequest(nodeInfoRequest);
String nodeInfoResult = responseMapper.getContentAsString(nodeInfoResponse);
Response nodeStatsResponse = client.performRequest(nodeStatsRequest);
String nodeStatsResult = responseMapper.getContentAsString(nodeStatsResponse);
nodeInfos = infoMapper.mapNodeInfo(masterNodeResult, nodeInfoResult, nodeStatsResult);
LOG.debug("Mapped node infos: {}", nodeInfos);
} catch (final IOException e) {
logError(e);
}
return nodeInfos;
}
@Override
public Optional<UnassignedShardInfo> getUnassignedShardInfo() {
UnassignedShardInfo unassignedShardInfo;
Request request = new Request(METHOD_GET, ClusterAllocationParams.API_ENDPOINT);
setAcceptedContentToJSON(request);
try {
Response response = client.performRequest(request);
if (response.getStatusLine().getStatusCode() != HTTP_STATUS_BAD_REQUEST) {
String result = responseMapper.getContentAsString(response);
unassignedShardInfo = infoMapper.mapUnassignedShardInfo(result);
LOG.debug("Mapped unassigned shard info: {}", unassignedShardInfo);
} else {
unassignedShardInfo = null;
LOG.debug("No unassigned shards found.");
}
} catch (final ResponseException e) {
unassignedShardInfo = null;
if (e.getResponse().getStatusLine().getStatusCode() == HTTP_STATUS_BAD_REQUEST) {
LOG.debug("No unassigned shards found.");
} else {
logError(e);
}
} catch (final IOException e) {
logError(e);
unassignedShardInfo = null;
}
return Optional.ofNullable(unassignedShardInfo);
}
/**
* Either setting the "accept" header or the "format" parameter would also work alone.
*/
private void setAcceptedContentToJSON(final Request request) {
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
builder.addHeader(HEADER_ACCEPT, CONTENT_TYPE_APPLICATION_JSON);
request.setOptions(builder);
request.addParameter(GeneralParams.PARAM_FORMAT, "json");
}
private void logError(final Exception e) {
LOG.error("Failed to request Elasticsearch data!", e);
}
}
|
import asyncio
async def process_message(base_url, message):
chat_id = message['chat_id']
text = message['text']
await send_message(base_url, chat_id, text)
# Example usage
async def main():
base_url = 'https://api.example.com'
offset = 0
updates = await get_updates(base_url, offset)
for update in updates:
message = update['message']
await process_message(base_url, message)
asyncio.run(main()) |
EMBED_SIZE=273
HIDDEN_SIZE=273
CHECKPOINT_PATH=checkpoints/lstm_all_dim_273_share_all
python train.py temp_single_vocab/bpe_single_dict/bin \
--save-dir $CHECKPOINT_PATH --arch lstm \
--dropout 0.2 \
--optimizer adam --lr 0.005 --lr-shrink 0.5 \
--max-tokens 8000 --task translation \
--bpe subword_nmt \
--encoder-embed-dim $EMBED_SIZE \
--decoder-embed-dim $EMBED_SIZE \
--encoder-hidden-size $HIDDEN_SIZE \
--decoder-hidden-size $HIDDEN_SIZE \
--encoder-layers 3 \
--decoder-layers 3 \
--update-freq 8 \
--warmup-updates 200 \
--decoder-attention False \
--max-epoch 15 \
--weight-decay 0.1 \
--share-all-embeddings \
--decoder-out-embed-dim EMBED_SIZE \
--fp16
#--ddp-backend no_c10d \
# --keep-best-checkpoints 1 \
# --patience 8 \ |
package m.co.rh.id.anavigator;
import android.app.Activity;
import android.os.Handler;
import android.os.Looper;
import android.view.View;
import android.view.animation.AlphaAnimation;
import android.view.animation.Animation;
import android.view.animation.AnimationSet;
import android.view.animation.DecelerateInterpolator;
import android.view.animation.LinearInterpolator;
import android.view.animation.TranslateAnimation;
import java.io.File;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.crypto.Cipher;
import javax.crypto.NullCipher;
import m.co.rh.id.anavigator.component.StatefulViewFactory;
@SuppressWarnings("rawtypes")
public class NavConfiguration<ACT extends Activity, SV extends StatefulView> {
private String initialRouteName;
private Map<String, StatefulViewFactory<ACT, SV>> navMap;
private Animation defaultEnterAnimation;
private Animation defaultExitAnimation;
private Animation defaultPopEnterAnimation;
private Animation defaultPopExitAnimation;
private Animation defaultReBuildEnterAnimation;
private Animation defaultReBuildExitAnimation;
private File saveStateFile;
private Cipher saveStateEncryptCipher;
private Cipher saveStateDecryptCipher;
private Object requiredComponent;
private boolean enableAnnotationInjection;
private ThreadPoolExecutor threadPoolExecutor;
private Handler mainHandler;
private View loadingView;
private NavConfiguration(String initialRouteName, Map<String, StatefulViewFactory<ACT, SV>> navMap) {
if (initialRouteName == null) {
throw new IllegalStateException("initial route name must not null!");
}
if (navMap == null || navMap.isEmpty()) {
throw new IllegalStateException("navMap must not null or empty!");
}
this.initialRouteName = initialRouteName;
this.navMap = navMap;
}
public String getInitialRouteName() {
return initialRouteName;
}
public Map<String, StatefulViewFactory<ACT, SV>> getNavMap() {
return navMap;
}
Animation getDefaultEnterAnimation() {
return defaultEnterAnimation;
}
Animation getDefaultExitAnimation() {
return defaultExitAnimation;
}
public Animation getDefaultPopEnterAnimation() {
return defaultPopEnterAnimation;
}
public Animation getDefaultPopExitAnimation() {
return defaultPopExitAnimation;
}
public Animation getDefaultReBuildEnterAnimation() {
return defaultReBuildEnterAnimation;
}
public Animation getDefaultReBuildExitAnimation() {
return defaultReBuildExitAnimation;
}
public File getSaveStateFile() {
return saveStateFile;
}
public Cipher getSaveStateEncryptCipher() {
return saveStateEncryptCipher;
}
public Cipher getSaveStateDecryptCipher() {
return saveStateDecryptCipher;
}
public Object getRequiredComponent() {
return requiredComponent;
}
public boolean isEnableAnnotationInjection() {
return enableAnnotationInjection;
}
/**
* Set cipher used in save state
*
* @param encrypt initialized cipher use to encrypt
* @param decrypt initialized cipher use to decrypt
* @throws NullPointerException if either encrypt or decrypt cipher is null
*/
public void setSaveStateCipher(Cipher encrypt, Cipher decrypt) {
if (encrypt == null || decrypt == null) {
throw new NullPointerException("Encrypt and Decrypt ciphers MUST NOT NULL");
}
saveStateEncryptCipher = encrypt;
saveStateDecryptCipher = decrypt;
}
public ThreadPoolExecutor getThreadPoolExecutor() {
return threadPoolExecutor;
}
public Handler getMainHandler() {
return mainHandler;
}
public View getLoadingView() {
return loadingView;
}
public static class Builder<ACT extends Activity, SV extends StatefulView> {
private String initialRouteName;
private Map<String, StatefulViewFactory<ACT, SV>> navMap;
private Animation enterAnimation;
private Animation exitAnimation;
private Animation popEnterAnimation;
private Animation popExitAnimation;
private Animation reBuildEnterAnimation;
private Animation reBuildExitAnimation;
private File saveStateFile;
private Cipher saveStateEncryptCipher;
private Cipher saveStateDecryptCipher;
private Object requiredComponent;
private boolean enableAnnotationInjection = true;
private ThreadPoolExecutor threadPoolExecutor;
private Handler mainHandler;
private View loadingView;
/**
* @param initialRouteName initial route to be pushed to navigator
* @param navMap mapping of the routes for this navigator
*/
public Builder(String initialRouteName, Map<String, StatefulViewFactory<ACT, SV>> navMap) {
this.initialRouteName = initialRouteName;
this.navMap = navMap;
}
/**
* Set default animation for this navigator
*
* @param enterAnimation Animation when next view showing
* @param exitAnimation Animation when current view exiting
* @param popEnterAnimation Animation when navigator pop and previous view showing
* @param popExitAnimation Animation when navigator pop and current view exiting
*/
public Builder setAnimation(Animation enterAnimation, Animation exitAnimation, Animation popEnterAnimation, Animation popExitAnimation) {
this.enterAnimation = enterAnimation;
this.exitAnimation = exitAnimation;
this.popEnterAnimation = popEnterAnimation;
this.popExitAnimation = popExitAnimation;
return this;
}
/**
* Set default animation for this navigator when reBuildRoute is invoked
*
* @param enterAnimation Animation when next view showing
* @param exitAnimation Animation when current view exiting
*/
public Builder setReBuildAnimation(Animation enterAnimation, Animation exitAnimation) {
this.reBuildEnterAnimation = enterAnimation;
this.reBuildExitAnimation = exitAnimation;
return this;
}
/**
* Provide file to save state, this file will be re-created and deleted as necessary.
* <p>
* the StatefulView states will be stored in this file by relying on java object serialization mechanism.
* Use SealedObject class instead of default Serializable fields if you need to secure/encrypt them.
* Or set the cipher {@link #setSaveStateCipher(Cipher, Cipher)} to automatically encrypt navigation state
* <p>
* When app gets killed and re-opened, navigator will handle state restoration,
* see https://developer.android.com/topic/libraries/architecture/saving-states
* This saving states behave the same as Saved instance state option.
* The states will be cleared only when activity is finishing properly.
* <p>
* NOTE: Make sure you have decent java serialization knowledge before using this.
* Saving state can be quiet tricky to handle.
*/
public Builder setSaveStateFile(File file) {
this.saveStateFile = file;
return this;
}
/**
* Encrypt navigation state cipher.
* Encryption will not happen if either or both is null
*
* @param encrypt cipher to be used to encrypt, make sure it was initialized before set
* @param decrypt cipher to be used to decryot, make sure it was initialized before set
*/
public Builder setSaveStateCipher(Cipher encrypt, Cipher decrypt) {
this.saveStateEncryptCipher = encrypt;
this.saveStateDecryptCipher = decrypt;
return this;
}
/**
* Set required component to be injected on StatefulViews that implements RequireComponent
*/
public Builder setRequiredComponent(Object component) {
this.requiredComponent = component;
return this;
}
/**
* This will disable all annotation based injection.
* Default value is true which means enabled
* <p>
* Reflection can be slow especially in smartphone devices where resources sometimes limited.
* If you decide to disable this functionality you could manually use API INavigator.injectRequired,
* to manually inject the components into your reusable StatefulView.
* Or set manually by implementing RequireNavigator, RequireNavRoute, RequireComponent.
* <p>
* NOTE: Always measure the performance first before decide to disable this.
* Reflection in this framework is relatively fast due to concurrent thread processing.
*
* @param enable true if enabled, false if disabled
*/
public Builder setEnableAnnotationInjection(boolean enable) {
this.enableAnnotationInjection = enable;
return this;
}
/**
* Set ThreadPoolExecutor to be used for this navigator
* <p>
* NOTE: DO NOT use shared ThreadPoolExecutor instance that is used for other purpose other than for Navigator instances.
* Use ThreadPoolExecutor instance that is shared across Navigator instances only.
* OR just create ThreadPoolExecutor instance exclusively for this navigator instance.
*/
public Builder setThreadPoolExecutor(ThreadPoolExecutor threadPoolExecutor) {
this.threadPoolExecutor = threadPoolExecutor;
return this;
}
/**
* Set Main Handler to be used for this navigator
*/
public Builder setMainHandler(Handler handler) {
this.mainHandler = handler;
return this;
}
/**
* Set Loading view to be shown when navigator loading state
*/
public Builder setLoadingView(View view) {
this.loadingView = view;
return this;
}
public NavConfiguration<ACT, SV> build() {
NavConfiguration<ACT, SV> navConfiguration = new NavConfiguration<>(initialRouteName, navMap);
if (enterAnimation == null) {
AnimationSet inAnimationSet = new AnimationSet(true);
inAnimationSet.setInterpolator(new DecelerateInterpolator());
inAnimationSet.setDuration(200);
inAnimationSet.addAnimation(new AlphaAnimation(0, 1));
inAnimationSet.addAnimation(new TranslateAnimation(0, 0, 100, 0));
enterAnimation = inAnimationSet;
}
if (exitAnimation == null) {
AnimationSet outAnimationSet = new AnimationSet(true);
outAnimationSet.setInterpolator(new LinearInterpolator());
outAnimationSet.setDuration(200);
outAnimationSet.addAnimation(new AlphaAnimation(0.5f, 0));
exitAnimation = outAnimationSet;
}
if (popEnterAnimation == null) {
AnimationSet inAnimationSet = new AnimationSet(true);
inAnimationSet.setInterpolator(new LinearInterpolator());
inAnimationSet.setDuration(200);
inAnimationSet.addAnimation(new AlphaAnimation(0, 1));
popEnterAnimation = inAnimationSet;
}
if (popExitAnimation == null) {
AnimationSet outAnimationSet = new AnimationSet(true);
outAnimationSet.setInterpolator(new LinearInterpolator());
outAnimationSet.setDuration(200);
outAnimationSet.addAnimation(new AlphaAnimation(0.5f, 0));
outAnimationSet.addAnimation(new TranslateAnimation(0, 0, 0, 100));
popExitAnimation = outAnimationSet;
}
if (reBuildEnterAnimation == null) {
Animation inAnimation = new AlphaAnimation(0, 1);
inAnimation.setDuration(200);
reBuildEnterAnimation = inAnimation;
}
if (reBuildExitAnimation == null) {
Animation outAnimation = new AlphaAnimation(1, 0);
outAnimation.setDuration(200);
reBuildExitAnimation = outAnimation;
}
navConfiguration.defaultEnterAnimation = enterAnimation;
navConfiguration.defaultExitAnimation = exitAnimation;
navConfiguration.defaultPopEnterAnimation = popEnterAnimation;
navConfiguration.defaultPopExitAnimation = popExitAnimation;
navConfiguration.defaultReBuildEnterAnimation = reBuildEnterAnimation;
navConfiguration.defaultReBuildExitAnimation = reBuildExitAnimation;
navConfiguration.saveStateFile = saveStateFile;
if (saveStateEncryptCipher == null || saveStateDecryptCipher == null) {
navConfiguration.saveStateEncryptCipher = new NullCipher();
navConfiguration.saveStateDecryptCipher = new NullCipher();
} else {
navConfiguration.saveStateEncryptCipher = saveStateEncryptCipher;
navConfiguration.saveStateDecryptCipher = saveStateDecryptCipher;
}
navConfiguration.requiredComponent = requiredComponent;
navConfiguration.enableAnnotationInjection = enableAnnotationInjection;
if (threadPoolExecutor == null) {
int maxThread = Runtime.getRuntime().availableProcessors();
ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(
maxThread, maxThread, 30, TimeUnit.SECONDS
, new LinkedBlockingQueue<>());
threadPoolExecutor.allowCoreThreadTimeOut(true);
threadPoolExecutor.prestartAllCoreThreads();
navConfiguration.threadPoolExecutor = threadPoolExecutor;
} else {
navConfiguration.threadPoolExecutor = threadPoolExecutor;
}
if (mainHandler == null) {
navConfiguration.mainHandler = new Handler(Looper.getMainLooper());
} else {
navConfiguration.mainHandler = mainHandler;
}
navConfiguration.loadingView = loadingView;
return navConfiguration;
}
}
}
|
def fibonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
n = 7
print("The Fibonacci number for n =", n ,"is:", fibonacci(n)) |
<reponame>mhs1314/allPay<gh_stars>0
package com.qht.services;
import com.qht.RequestObject;
import com.qht.ResultObject;
import com.qht.dto.*;
import org.springframework.web.bind.annotation.RequestBody;
import java.util.List;
/**
* 课程答疑
*
* @author yangtonggan
* @email <EMAIL>
* @date 2018-11-05 18:55:41
*/
public interface AnswerService {
/**
* 学生端-个人中心--首页--课程答疑
* @param requestObject
* @return
*/
public ResultObject<List<MyIndexCourseAnswerDto>> myIndexCourseAnswer(@RequestBody RequestObject<MyIndexCourseAnswerParameter> requestObject);
/**
* app我的课程答疑
* @return
*/
public ResultObject<List<IndexMyAnswerDto>> indexMyAnswer(@RequestBody RequestObject<UidAndTenantID> requestObject);
/**
* app我的课程答疑--答疑详情
* @return
*/
public ResultObject<IndexAnswerDetailsDto> indexAnswerDetails(@RequestBody RequestObject<UidAndTenantID> requestObject);
/**
* 我的课程答疑--答疑详情-追问回答
* @param requestObject
* @return
*/
public ResultObject<Void> indexAnswerDetailsAppendAnswer(@RequestBody RequestObject<IndexAnswerDetailsAppendAnswerParameter> requestObject);
/**
* app教答疑列表
* @param param
* @return
*/
public ResultObject<List<AppSelectAnwerListDto>> appSelectAnwerList(@RequestBody RequestObject<UidAndTenantID> requestObject);
/**
* app学生答疑追问
*/
public ResultObject<Void> appUpdateStudentAnswer(@RequestBody RequestObject<UidAndTenantID>requestObject);
/**
* app老师答疑追答
*/
public ResultObject<Void> appUpdateTeacherAnswer(@RequestBody RequestObject<UidAndTenantID>requestObject);
/**
* app老师答疑追答
*/
public ResultObject<Void> appInsertTeacherAnser(@RequestBody RequestObject<AppInsertTeacherAnswerParameter>requestObject);
}
|
<gh_stars>1-10
// Copyright 2020 <NAME> and <NAME>
#pragma once
#include <random>
#include <vector>
#include "universe.hpp"
#include "observable.hpp"
class Simulation {
public:
static double lambda;
static void start(int sweeps, double lambda_, int targetVolume_, int seed = 0);
static void addObservable(Observable& o) {
observables.push_back(&o);
}
static bool pinch;
static std::array<int, 2> moveFreqs;
static int attemptMove();
private:
static std::default_random_engine rng;
static int targetVolume;
static double epsilon;
static bool measuring;
static std::vector<Observable*> observables;
static void sweep();
static bool moveAdd();
static bool moveDelete();
static bool moveFlip();
static void prepare();
// tuning isn't used in the current setup
// static void tune();
static void grow();
static void thermalize();
};
|
/*
Project Euler: Problem 76: Counting summations
It is possible to write five as a sum in exactly six different ways:
4 + 1
3 + 2
3 + 1 + 1
2 + 2 + 1
2 + 1 + 1 + 1
1 + 1 + 1 + 1 + 1
How many different ways can one hundred be written as a sum of at least two positive integers?
*/
function countingSummations() {
// Good luck!
return true;
}
countingSummations();
|
#include <iostream>
#include <string>
class ConfigAndLogger {
private:
// Configuration settings
// Assume configAndLogger is an object that contains configuration settings
// Example: configAndLogger.configSetting1, configAndLogger.configSetting2, etc.
// Logger settings
// Assume configAndLogger is an object that contains logger settings
// Example: configAndLogger.logLevel, configAndLogger.logFile, etc.
public:
// Constructor to initialize with configAndLogger
ConfigAndLogger(const ConfigAndLogger& configAndLogger) {
// Initialize configuration and logger settings based on configAndLogger
// Example: this->configSetting1 = configAndLogger.configSetting1;
// Example: this->logLevel = configAndLogger.logLevel;
}
// Method to access configuration settings
// Example: int getConfigSetting1() { return this->configSetting1; }
// Method to modify configuration settings
// Example: void setConfigSetting1(int value) { this->configSetting1 = value; }
// Method to log messages with different log levels
// Example: void logInfo(const std::string& message) { /* Log message with info level */ }
// Example: void logWarning(const std::string& message) { /* Log message with warning level */ }
// Example: void logError(const std::string& message) { /* Log message with error level */ }
};
int main() {
// Sample usage of ConfigAndLogger class
ConfigAndLogger configAndLogger(/* provide configAndLogger object */);
// Access and modify configuration settings
// Example: int settingValue = configAndLogger.getConfigSetting1();
// Example: configAndLogger.setConfigSetting1(10);
// Log messages with different log levels
// Example: configAndLogger.logInfo("Information message");
// Example: configAndLogger.logWarning("Warning message");
// Example: configAndLogger.logError("Error message");
return 0;
} |
<gh_stars>100-1000
/*
* Copyright (c) 2020 - 2021 Legacy Fabric
* Copyright (c) 2016 - 2021 FabricMC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.legacyfabric.fabric.impl.client.rendering;
import java.util.HashMap;
import java.util.function.BiConsumer;
import java.util.function.Function;
import net.minecraft.block.entity.BlockEntity;
import net.minecraft.client.render.block.entity.BlockEntityRenderDispatcher;
import net.minecraft.client.render.block.entity.BlockEntityRenderer;
import net.legacyfabric.fabric.api.client.rendering.v1.BlockEntityRendererRegistry;
public class BlockEntityRendererRegistryImpl implements BlockEntityRendererRegistry {
private static final HashMap<Class<? extends BlockEntity>, Function<BlockEntityRenderDispatcher, ? extends BlockEntityRenderer<?>>> map = new HashMap<>();
private static BiConsumer<Class<? extends BlockEntity>, Function<BlockEntityRenderDispatcher, ? extends BlockEntityRenderer<?>>> handler = (type, function) -> map.put(type, function);
@Override
public <E extends BlockEntity> void register(Class<E> clazz, Function<BlockEntityRenderDispatcher, BlockEntityRenderer<E>> blockEntityRenderer) {
handler.accept(clazz, blockEntityRenderer);
}
public static void setup(BiConsumer<Class<? extends BlockEntity>, Function<BlockEntityRenderDispatcher, ? extends BlockEntityRenderer<?>>> vanillaHandler) {
map.forEach(vanillaHandler);
handler = vanillaHandler;
}
}
|
<gh_stars>1-10
/**
* The MIT License
* Copyright (c) 2014 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cormoran.pepper.io;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.nio.file.Path;
import org.junit.Assert;
import org.junit.Test;
public class TestPepperFileHelper {
@Test
public void testCreateTempPath() throws IOException {
Path tmpFile = PepperFileHelper.createTempPath("apex.test", ".csv", true);
// Check the path does not exist
Assert.assertFalse(tmpFile.toFile().exists());
}
@Test
public void testNoNewLine() {
Assert.assertEquals("A B C D", PepperFileHelper.cleanWhitespaces("A\tB C\rD"));
}
@Test
public void testExpandJarToDisk() throws IOException, URISyntaxException {
// Choose a class in a small jar so the test remains fast
String pathToResourceInJar = "/org/slf4j/Logger.class";
URL resource = PepperFileHelper.getResourceURL(pathToResourceInJar);
Path jarPath = PepperFileHelper.getHoldingJarPath(resource.toURI()).get();
Path tmpPath = PepperFileHelper.createTempPath("apex", "testExpandJarToDisk", true);
PepperFileHelper.expandJarToDisk(jarPath, tmpPath);
Assert.assertTrue(new File(tmpPath.toFile(), pathToResourceInJar).exists());
}
@Test
public void testURISpecialCharacters() throws IOException, URISyntaxException {
// '@' is a special characters leading to issues when converting back and forth to URL
Path file = File.createTempFile("TestApexAgentHelper", "special@char").toPath();
URI asURI = file.toUri();
URL asURL = asURI.toURL();
File backToFile = new File(asURI);
File backToFile2 = new File(asURI.getPath());
File backToFile3 = new File(asURL.toURI().getPath());
Assert.assertEquals(file, backToFile.toPath());
Assert.assertEquals(file, backToFile2.toPath());
Assert.assertEquals(file, backToFile3.toPath());
}
}
|
package com.datasift.client.pylon;
import com.fasterxml.jackson.annotation.JsonProperty;
public class PylonResultAnalysis {
@JsonProperty("analysis_type")
protected String analysisType;
@JsonProperty
protected PylonParametersData parameters;
@JsonProperty("results")
protected PylonResultEntryList resultList;
public String getAnalysisType() { return this.analysisType; }
public PylonParametersData getParameters() { return this.parameters; }
public PylonResultEntryList getResults() { return this.resultList; }
}
|
<reponame>anchorchat/anchor-ui<gh_stars>10-100
import isFinite from 'lodash/isFinite';
import colors from '../settings/colors';
import combineStyles from '../internal/combine-styles';
import styles from './styles';
const root = (overrideStyle, disabled) => {
let style = styles.root;
if (disabled) {
style = combineStyles(style, styles.disabled);
}
return combineStyles(style, overrideStyle);
};
const label = overrideStyle => combineStyles(styles.label, overrideStyle);
const filled = (color = colors.theme, percentage, overrideStyle) => {
const style = combineStyles(
styles.filled,
{
width: isFinite(percentage) && percentage > 0
? `${percentage * 100}%`
: '0%',
backgroundColor: color
}
);
return combineStyles(style, overrideStyle);
};
const remaining = (percentage, overrideStyle) => {
const style = combineStyles(styles.remaining, { width: `${(1 - percentage) * 100}%` });
return combineStyles(style, overrideStyle);
};
const button = (color = colors.theme, percentage, overrideStyle) => {
let style = combineStyles(
styles.button,
{
left: isFinite(percentage) && percentage > 0
? `${percentage * 100}%`
: '0%'
}
);
if (percentage !== 0) {
style = combineStyles(style, { backgroundColor: color, border: `2px solid ${color}` });
}
return combineStyles(style, overrideStyle);
};
const error = overrideStyle => combineStyles(styles.error, overrideStyle);
export default {
root,
label,
filled,
remaining,
button,
error
};
|
<gh_stars>0
function neuChartSigmaVisibility(){
if($('#btnSigma').html() == "Hide sigma"){
if(window.chart.series[0].visible){
window.chart.series[1].show();
}
// if(window.chart.series[2].visible){
// window.chart.series[3].show();
// }
// if(window.chart.series[4].visible){
// window.chart.series[5].show();
// }
}
else{
window.chart.series[1].hide();
window.chart.series[3].hide();
window.chart.series[5].hide();
}
}
function updateChart(){
$.ajax({
type: "POST",
url: 'serv-retrive-neu-series.php',
async: 'false',
datatype: 'json',
contentType: "application/json; charset=utf-8",
cache: false,
success: function(data) {
var epochs = JSON.parse(data);
var datetime = epochs[localStorage['stationLabel']].map(epoch => epoch.datetime);
var north = epochs[localStorage['stationLabel']].map(epoch => epoch.north);
var east = epochs[localStorage['stationLabel']].map(epoch => epoch.east);
var up = epochs[localStorage['stationLabel']].map(epoch => epoch.up);
var sigmaNorth = epochs[localStorage['stationLabel']].map(epoch => epoch['sigma-north']);
var sigmaEast = epochs[localStorage['stationLabel']].map(epoch => epoch['sigma-east']);
var sigmaUp = epochs[localStorage['stationLabel']].map(epoch => epoch['sigma-up']);
if(window.chart == undefined){
window.chart = Highcharts.chart('neu-chart', {
chart: {
zoomType: 'xy'
},
title: {
text: null
},
xAxis: [{
categories: datetime,
tickInterval: 10
}],
yAxis: [{
labels: {
format: '{value} m',
style: {
color: Highcharts.getOptions().colors[1]
}
},
title: {
text: 'Meter',
style: {
color: Highcharts.getOptions().colors[1]
}
}
}],
tooltip: {
shared: true
},
series: [{
name: 'North',
type: 'line',
data: north,
tooltip: {
pointFormat: '<span style="font-weight: bold; color: {series.color}">{series.name}</span>: <b>{point.y:.3f}m</b><br>'
}
},
{
name: 'North sigma',
type: 'errorbar',
visible: false,
data: sigmaNorth,
tooltip: {
pointFormatter: function() {
return '(Sigma: ±' + ((this.low + this.high)/2).toFixed(3) + 'm<br/>';
}
}
},
{
name: 'East',
type: 'line',
data: east,
tooltip: {
pointFormat: '<span style="font-weight: bold; color: {series.color}">{series.name}</span>: <b>{point.y:.3f}m</b><br>'
}
},
{
name: 'East sigma',
type: 'errorbar',
visible: false,
data: sigmaEast,
tooltip: {
pointFormatter: function() {
return '(Sigma: ±' + ((this.low + this.high)/2).toFixed(3) + 'm<br/>';
}
}
},
{
name: 'Up',
type: 'line',
data: up,
tooltip: {
pointFormat: '<span style="font-weight: bold; color: {series.color}">{series.name}</span>: <b>{point.y:.3f}m</b><br>'
}
},
{
name: 'Up sigma',
type: 'errorbar',
visible: false,
data: sigmaUp,
tooltip: {
pointFormatter: function() {
return '(Sigma: ±' + ((this.low + this.high)/2).toFixed(3) + 'm<br/>';
}
}
}]
});
neuChartSigmaVisibility();
}
else{
window.chart.xAxis[0].update({categories: datetime});
window.chart.series[0].setData(north);
window.chart.series[1].setData(sigmaNorth);
window.chart.series[2].setData(east);
window.chart.series[3].setData(sigmaEast);
window.chart.series[4].setData(up);
window.chart.series[5].setData(sigmaUp);
}
}
});
}
function updateMap(){
/* Coordenadas centrais do estado de SP */
var latSP = -21.5; //latitude centro de SP (Bauru)
var longSP = -51; //longitude centro de SP (Bauru)
if(window.map == undefined){
window.map = L.map('stations-map').setView([latSP, longSP], 6.75);
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token=<KEY>', {
maxZoom: 18,
attribution: 'Map data © <a href="https://www.openstreetmap.org/">OpenStreetMap</a> contributors, ' + '<a href="https://creativecommons.org/licenses/by-sa/2.0/">CC-BY-SA</a>, ' + 'Imagery © <a href="https://www.mapbox.com/">Mapbox</a>',
id: 'mapbox.streets'
}).addTo(window.map);
L.control.scale().addTo(window.map);
}
window.layer = L.layerGroup();
window.layer.addTo(window.map);
var CustomIcon = L.Icon.extend({});
var suitableStIcon= new L.icon({
iconUrl: 'javascript/lib/leaflet/markers/marker-icon-green.png',
iconAnchor: [10, 41]
});
var notSuitableStIcon= new L.icon({
iconUrl: 'javascript/lib/leaflet/markers/marker-icon-orange.png',
iconAnchor: [10, 41]
});
$.ajax({
type: "POST",
url: 'serv-retrive-neu-last.php',
async: 'false',
datatype: 'json',
contentType: "application/json; charset=utf-8",
cache: false,
success: function(data) {
var epochs = JSON.parse(data);
$.each(epochs, function(key, station){
$("#"+ key +"-last-solution").text(station.datetime);
if(station.status == 1){
$("#"+ key +"-status").text("OK");
L.marker([station.lat, station.long], {icon: suitableStIcon}).bindPopup(key).addTo(window.layer);
}
else{
$("#"+ key +"-status").text("WARNING");
L.marker([station.lat, station.long], {icon: notSuitableStIcon}).bindPopup(key).addTo(window.layer);
}
if(station.old == 1){
$("#"+key + "-tr").addClass("old-data-row");
}
else{
console.log
$("#"+key + "-tr").removeClass("old-data-row");
}
var percentage = 0;
// console.log ($("#cbTime").val());
var minutes = $("#cbTime").val();
if(minutes == 30){
percentage = station.min30 * 100;
}
else if(minutes == 60){
percentage = station.min60 * 100;
}
else if(minutes == 120){
percentage = station.min120 * 100;
}
else{
percentage = station.minAll * 100;
}
percentage = percentage.toFixed(2);
$("#" + key + "-percent").text(percentage+"%");
});
}
});
}
function refresh(){
updateMap();
updateChart();
window.timer = setTimeout(function(){
if(window.layer != undefined){
window.layer.remove();
}
refresh();
}, 5000);
}
$(document).ready(function(){
if(localStorage['stationLabel'] == undefined){
localStorage['stationLabel'] = "PPTE0";
}
$("#cbStation").val(localStorage['stationLabel']);
$("#cbStation").change(function(){
localStorage['stationLabel'] = $(this).children("option:selected").val();
window.chart.destroy();
window.chart = null;
clearTimeout(window.timer);
refresh();
});
$('#btnSigma').click(function(){
if($('#btnSigma').html() == "Show sigma"){
$('#btnSigma').html("Hide sigma");
}
else{
$('#btnSigma').html("Show sigma");
}
neuChartSigmaVisibility();
});
$("#cbTime").change(function(){
updateMap();
});
refresh();
}); |
const evaluate = (node) => {
// Base case
if (!node.left && !node.right) {
return node.val;
}
// Recursive case
let l = evaluate(node.left);
let r = evaluate(node.right);
switch (node.val) {
case '+':
return l + r;
case '-':
return l - r;
case '*':
return l * r;
case '/':
return l / r;
}
}; |
package net.nokok.testdata;
public interface Repository {
String getUrl();
}
|
def fibonacci(n):
fibo = [0, 1]
if n<0:
print("Incorrect input")
elif n<=len(fibo):
return fibo[n-1]
else:
temp_fibo = fibo[-1] + fibo[-2]
for i in range(2,n):
fibo.append(temp_fibo)
temp_fibo = fibo[-1] + fibo[-2]
return temp_fibo |
#! /bin/bash
# WORKDIR /usr/local/
cd ./CAPI
i=1
flag=1
while (( $i <= 4 ))
do
cp -f ../mnt/player$i.cpp ./API/src
mv ./API/src/player$i.cpp ./API/src/AI.cpp
cmake ./CMakeLists.txt && make >build.log 2>&1
mv ./build.log ../mnt/build_log$i && mv ./capi ../mnt/capi$i
if [ $? -ne 0 ]; then
flag=0
fi
let "i++"
done
if [ $flag -eq 1 ]; then
curl http://localhost:28888/code/compileInfo -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer ${COMPILER_TOKEN}" -d '{"compile_status":"compiled"}'
else
curl http://localhost:28888/code/compileInfo -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer ${COMPILER_TOKEN}" -d '{"compile_status":"failed"}'
fi |
<gh_stars>0
import { getInput } from './utils';
const commands = getInput(2)
.split('\n')
.map(l => {
const [direction, value] = l.split(' ');
return {
direction,
value: parseInt(value),
};
});
let horiz = 0;
let depth1 = 0;
let depth2 = 0;
let aim = 0;
commands.forEach(({ direction, value }) => {
if (direction === 'forward') {
horiz += value;
depth2 += aim * value;
} else if (direction === 'up') {
depth1 -= value;
aim -= value;
} else if (direction === 'down') {
depth1 += value;
aim += value;
}
});
console.log(`Part 1: ${horiz * depth1}`);
console.log(`Part 2: ${horiz * depth2}`); |
<gh_stars>0
package ca.damocles.Items.Factories;
import org.bukkit.Material;
import org.bukkit.inventory.ItemFlag;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.Damageable;
import org.bukkit.inventory.meta.ItemMeta;
import ca.damocles.Items.ItemType;
import ca.damocles.Items.Types.Armor;
import ca.damocles.Runes.Rune;
public class ArmorFactory {
Armor armor;
public ArmorFactory(int customModelID, ItemType type) {
ItemStack item = null;
switch(type) {
case BOOTS:
item = new ItemStack(Material.DIAMOND_BOOTS, 1);
break;
case CHESTPLATE:
item = new ItemStack(Material.DIAMOND_CHESTPLATE, 1);
break;
case HELMET:
item = new ItemStack(Material.DIAMOND_HELMET, 1);
break;
case LEGS:
item = new ItemStack(Material.DIAMOND_LEGGINGS, 1);
break;
default:
break;
}
ItemMeta meta = item.getItemMeta();
((Damageable)meta).setDamage(customModelID);
meta.addItemFlags(ItemFlag.HIDE_ATTRIBUTES, ItemFlag.HIDE_UNBREAKABLE, ItemFlag.HIDE_ENCHANTS);
meta.setUnbreakable(true);
item.setItemMeta(meta);
armor = new Armor(item, type);
}
public ArmorFactory setMaxDurability(int maxDurability) {
armor.setMaxDurability(maxDurability);
return this;
}
public ArmorFactory setDurability(int durability) {
armor.setDurability(durability);
return this;
}
public ArmorFactory setArmor(double value) {
armor.setArmor(value);
return this;
}
public ArmorFactory setToughness(double value) {
armor.setToughness(value);
return this;
}
public ArmorFactory addRune(Rune rune, int level) {
armor.addRune(rune, level);
return this;
}
public ItemStack build() {
return armor.finish();
}
}
|
function longestCommonSubstring(string1, string2) {
let common = '';
let start = 0;
while (start < string1.length) {
let i = start;
let j = 0;
let substring = '';
while (i < string1.length && j < string2.length) {
if (string1[i] === string2[j]) {
substring += string1[i];
i++;
j++;
} else {
if (substring.length > common.length) {
common = substring;
}
substring = '';
i = ++start;
j = 0;
}
}
if (substring.length > common.length) {
common = substring;
}
start++;
}
return common;
}
console.log(longestCommonSubstring(string1,string2)); // "bcd" |
from typing import List, Tuple
import os
def traverse_directory(directory_path: str) -> Tuple[List[str], int]:
file_list = []
total_size = 0
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
file_list.append(file_path)
total_size += os.path.getsize(file_path)
return file_list, total_size |
# Data cleaning and outliers detecting
def detect_outliers(data):
# Obtain basic statistics
quartiles = np.percentile(data, [25, 50, 75])
lower_quartile, median, upper_quartile = quartiles
iqr = upper_quartile - lower_quartile
lower_inner_fence = lower_quartile - (1.5 * iqr)
upper_inner_fence = upper_quartile + (1.5 * iqr)
# Return outliers
outliers = [x for x in data if x > upper_inner_fence or x < lower_inner_fence]
return outliers
outliers = detect_outliers(arr) |
<gh_stars>10-100
/*
* File : main.c
* This file is part of RT-Thread RTOS
* COPYRIGHT (C) 2017, RT-Thread Development Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Change Logs:
* Date Author Notes
* 2017-5-30 Bernard the first version
*/
#include "rtthread.h"
#include "include.h"
#include "driver_pub.h"
#include "func_pub.h"
#include "app.h"
#include "ate_app.h"
#include "shell.h"
static int wlan_app_init(void);
int main(int argc, char **argv)
{
wlan_app_init();
return 0;
}
extern void rt_hw_wdg_start(int argc, char **argv);
void user_app_start(void)
{
rt_hw_wdg_start(0, NULL);
}
#ifdef BEKEN_USING_WLAN
extern void ate_app_init(void);
extern void ate_start(void);
static int wlan_app_init(void)
{
/* init ate mode check. */
ate_app_init();
if (get_ate_mode_state())
{
rt_kprintf("\r\n\r\nEnter automatic test mode...\r\n\r\n");
finsh_set_echo(0);
finsh_set_prompt("#");
ate_start();
}
else
{
rt_kprintf("Enter normal mode...\r\n\r\n");
app_start();
user_app_start();
}
return 0;
}
#endif
|
#ifdef MRT
gl_FragData[0] = vec4(emission.rgb + diffuse, alpha);
gl_FragData[1] = vec4(specular, material.metallic);
gl_FragData[2] = vec4(normalize(fragment.normal) * 0.5 + 0.5, material.roughness);
#ifdef SUBSURFACE
gl_FragData[3] = subsurface;
#endif
#else
gl_FragColor = vec4(emission.rgb + diffuse + specular, alpha);
//gl_FragColor = vec4(normalize(fragment.normal) * 0.5 + 0.5, 1.0);
//gl_FragColor = vec4(ibl_diffuse(fragment.normal) * material.albedo, 1.0);
//gl_FragColor = vec4(v_color.rgb, 1.0);
#endif
|
// Copyright (C) (See commit logs on github.com/robhz786/strf)
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <strf/to_string.hpp>
namespace xxx {
template <typename T>
struct base {
base(const T& t) : value(t) {}
T value;
};
} // namespace xxx
namespace strf {
template <typename T>
struct base_printing {
using override_tag = const xxx::base<T>&;
using forwarded_type = const xxx::base<T>&;
template <typename CharT, typename Preview, typename FPack>
static auto make_printer_input
( strf::tag<CharT>
, Preview& preview
, const FPack& fp
, forwarded_type x ) noexcept
{
return strf::make_default_printer_input<CharT>(preview, fp, x.value);
}
};
template <typename T>
inline base_printing<T> tag_invoke(strf::print_traits_tag, const xxx::base<T>&)
{ return {}; }
} // namespace strf
namespace yyy {
template <typename T>
struct derived: xxx::base<T> {
derived(const T& t): xxx::base<T>{t} {}
};
} // namespace yyy
int main()
{
yyy::derived<int> b{55};
auto s = strf::to_string(b);
assert(s == "55");
return 0;
}
|
import React from 'react';
import { shallowWithTheme } from '../util-test';
import Card from './Card';
describe('<Card />', () => {
test('rendering', () => {
const wrapper = shallowWithTheme(
<Card>
<p id="t1">Test</p>
</Card>
);
const classes = wrapper.prop('classes');
expect(wrapper.dive().name()).toBe('div');
expect(wrapper.dive().hasClass(classes.root)).toBe(true);
expect(wrapper.find('#t1').text()).toBe('Test');
});
test('rendering with custom component', () => {
const wrapper = shallowWithTheme(<Card component="main" />);
expect(wrapper.dive().name()).toBe('main');
});
test('inversing color', () => {
const wrapper = shallowWithTheme(<Card inverse />);
const classes = wrapper.prop('classes');
expect(wrapper.dive().hasClass(classes.inverse)).toBe(true);
});
test('spreading custom props', () => {
const wrapper = shallowWithTheme(<Card data-test="test" />);
expect(wrapper.prop('data-test')).toBe('test');
});
}); |
import React from 'react';
import {connect} from 'react-redux';
import {EditorModal} from "../../dumb/editor/EditorModal";
import {getCustomFieldEditorFormValidationResult, getCustomFieldEditorState, getTaskEditorState} from "../../selectors/ui";
import {CustomFieldEditorContent} from "./CustomFieldEditorContent";
import {closeCustomFieldEditor} from "../../actions/customFieldEditor";
import {saveEditedCustomField} from "../../actions/taskEditor";
const mapStateToProps = (state) => {
const saveButtonDisabled = !getCustomFieldEditorFormValidationResult(state).result;
const {errorMessage} = getCustomFieldEditorFormValidationResult(state);
const {isNewCustomField, customField} = getCustomFieldEditorState(state);
const {isNewTask, task, customFields} = getTaskEditorState(state);
const header = `
${isNewTask ? 'New task' : task.name}
:
${isNewCustomField
? 'New custom field'
: customFields && customFields.find(x => x.id === customField.id).data.label}
`;
return {
isActive: true,
header: header,
tabs: null,
content: <CustomFieldEditorContent/>,
saveButtonDisabled,
errorMessage
};
};
const mapDispatchToProps = (dispatch) => {
return {
onSaveClick: () => {
dispatch(saveEditedCustomField());
dispatch(closeCustomFieldEditor());
},
onCloseClick: () => dispatch(closeCustomFieldEditor())
};
};
export const CustomFieldEditor = connect(
mapStateToProps,
mapDispatchToProps
)(EditorModal); |
const winston = require('winston')
module.exports = winston.createLogger({
transports: [
new winston.transports.Console({ level: process.env.NODE_ENV === 'test' ? 'error' : 'info' })
],
format: winston.format.simple()
})
|
<gh_stars>0
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.aurora.scheduler.storage.mem;
import javax.inject.Singleton;
import com.google.common.annotations.VisibleForTesting;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Key;
import com.google.inject.PrivateModule;
import com.google.inject.TypeLiteral;
import org.apache.aurora.common.inject.Bindings.KeyFactory;
import org.apache.aurora.common.quantity.Amount;
import org.apache.aurora.common.quantity.Time;
import org.apache.aurora.common.stats.StatsProvider;
import org.apache.aurora.scheduler.storage.AttributeStore;
import org.apache.aurora.scheduler.storage.CronJobStore;
import org.apache.aurora.scheduler.storage.JobUpdateStore;
import org.apache.aurora.scheduler.storage.QuotaStore;
import org.apache.aurora.scheduler.storage.SchedulerStore;
import org.apache.aurora.scheduler.storage.Storage;
import org.apache.aurora.scheduler.storage.Storage.Volatile;
import org.apache.aurora.scheduler.storage.TaskStore;
import org.apache.aurora.scheduler.storage.mem.MemTaskStore.SlowQueryThreshold;
import org.apache.aurora.scheduler.testing.FakeStatsProvider;
import static java.util.Objects.requireNonNull;
/**
* Binding module for in-memory stores.
* <p>
* NOTE: These stores are being phased out in favor of database-backed stores.
*/
public final class MemStorageModule extends PrivateModule {
private final KeyFactory keyFactory;
public MemStorageModule() {
this(KeyFactory.PLAIN);
}
public MemStorageModule(KeyFactory keyFactory) {
this.keyFactory = requireNonNull(keyFactory);
}
private <T> void bindStore(Class<T> binding, Class<? extends T> impl) {
bind(binding).to(impl);
bind(impl).in(Singleton.class);
Key<T> key = Key.get(binding, Volatile.class);
bind(key).to(impl);
expose(key);
expose(binding);
}
@Override
protected void configure() {
bind(new TypeLiteral<Amount<Long, Time>>() { }).annotatedWith(SlowQueryThreshold.class)
.toInstance(Amount.of(25L, Time.MILLISECONDS));
bindStore(TaskStore.Mutable.class, MemTaskStore.class);
bindStore(CronJobStore.Mutable.class, MemCronJobStore.class);
bindStore(AttributeStore.Mutable.class, MemAttributeStore.class);
bindStore(QuotaStore.Mutable.class, MemQuotaStore.class);
bindStore(SchedulerStore.Mutable.class, MemSchedulerStore.class);
bindStore(JobUpdateStore.Mutable.class, MemJobUpdateStore.class);
Key<Storage> storageKey = keyFactory.create(Storage.class);
bind(storageKey).to(MemStorage.class);
bind(MemStorage.class).in(Singleton.class);
expose(storageKey);
}
/**
* Creates a new empty in-memory storage for use in testing.
*/
@VisibleForTesting
public static Storage newEmptyStorage() {
Injector injector = Guice.createInjector(
new MemStorageModule(),
new AbstractModule() {
@Override
protected void configure() {
bind(StatsProvider.class).to(FakeStatsProvider.class);
bind(FakeStatsProvider.class).in(Singleton.class);
}
});
Storage storage = injector.getInstance(Storage.class);
storage.prepare();
return storage;
}
}
|
def add_component_to_masters(glyph_name, component_name, position):
for idx, master in enumerate(font.masters):
comp_name = component_name.upper()
component = GSComponent(comp_name, position)
font.glyphs[glyph_name].components.append(component)
return font |
#!/bin/bash
mkdir -p /var/backups/stockdb
if [ -x /usr/bin/stockdb ]; then
DATE=$(date +%Y-%m-%d-%H%M%S)
UF=/var/backups/stockdb/$DATE-userdata.json.xz
sudo -u stockdb bash -c "stockdb export" | xz -z > $UF
chmod 777 $UF
echo "Exported database to $UF"
DF=/var/backups/stockdb/$DATE-prices.json.xz
sudo -u stockdb bash -c "stockdb data --export" | xz -z > $DF
chmod 777 $DF
echo "Exported database to $DF"
fi
|
name = input('What is your name? ')
print(f'Hello {name}!') |
IP=$(hostname -I | awk -F' ' '{print $2}')
REVERSE_NAME=$(dig -x $IP +short | sed 's/\.[^\.]*$//')
echo $IP | grep -q ':' && SERVER6=$(grep : /etc/resolv.conf | grep -v fe80 | cut -d" " -f2) && REVERSE_NAME=$(dig -6x $IP +short @$SERVER6 | sed 's/\.[^\.]*$//')
REGISTRY_NAME=${REVERSE_NAME:-$(hostname -f)}
REGISTRY=$REGISTRY_NAME:5000
PULL_SECRET="/root/openshift_pull.json"
image=$1
skopeo copy docker://$image docker://$REGISTRY/$(echo $image | cut -d'/' -f 2- ) --all --authfile $PULL_SECRET
|
#!/usr/bin/env bash
# Copyright 2021 Adevinta
docker-compose kill
docker-compose rm -f
|
<reponame>alairjt/modular-pkg-1
/**
* Simulate 3rd party
*/
define(['app', 'regServices', 'test/unit/factory/utestProvider' ], function (app, regServices, unitestFactory) {
console.log("* Running regServices.spec.js");
unitestFactory(regServices);
});
|
<reponame>holt/syringe-todos<filename>lib/syringe/syringe.mixins.js
// > http://syringejs.org
// > syringe.mixins.js v0.0.2. Copyright (c) 2013 <NAME>
// > holt.org. Distributed under the MIT License
/* jshint forin:true, noarg:true, noempty:true, eqeqeq:true, bitwise:false, strict:true,
undef:true, unused:true, curly:true, browser:true, indent:4, maxerr:50, laxcomma:true,
forin:false, curly:false, evil: true, laxbreak:true, multistr: true */
window.Syringe.mixin({
'dom': function (props) {
'use strict';
props = props || {};
var
actn = props.action,
nmsp = props.namespace,
proc = props.processor,
full = 'data-syringe-' + actn,
list, $arr;
nmsp = nmsp && (typeof nmsp === 'string') ? '.' + nmsp : '';
proc = (typeof proc === 'function') ? proc : function (item) {
return item;
};
list = [].slice.call(document.querySelectorAll('[' + full + ']'));
$arr = list.map(proc);
if (typeof props.before === 'function') {
props.before.call(this);
}
switch (actn) {
case ('add' || 'register'):
this.add(list.reduce(function (p, c, i) {
if (c && c.nodeType === 1) {
p[props.bindto + nmsp + '.'
+ c.getAttribute(full)] = $arr[i];
return p;
}
}, {}));
break;
}
if (typeof props.after === 'function') {
props.after.call(this);
}
return this;
}
}); |
<filename>Source/GeneratedServices/CloudPrivateCatalogProducer/GTLRCloudPrivateCatalogProducerQuery.h
// NOTE: This file was generated by the ServiceGenerator.
// ----------------------------------------------------------------------------
// API:
// Cloud Private Catalog Producer API (cloudprivatecatalogproducer/v1beta1)
// Description:
// Enables cloud users to manage and share enterprise catalogs intheir
// organizations.
// Documentation:
// https://cloud.google.com/private-catalog/
#if GTLR_BUILT_AS_FRAMEWORK
#import "GTLR/GTLRQuery.h"
#else
#import "GTLRQuery.h"
#endif
#if GTLR_RUNTIME_VERSION != 3000
#error This file was generated by a different version of ServiceGenerator which is incompatible with this GTLR library source.
#endif
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog;
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1CopyProductRequest;
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1CreateAssociationRequest;
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product;
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1UndeleteCatalogRequest;
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1UploadIconRequest;
@class GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version;
@class GTLRCloudPrivateCatalogProducer_GoogleIamV1SetIamPolicyRequest;
@class GTLRCloudPrivateCatalogProducer_GoogleIamV1TestIamPermissionsRequest;
@class GTLRCloudPrivateCatalogProducer_GoogleLongrunningCancelOperationRequest;
// Generated comments include content from the discovery document; avoid them
// causing warnings since clang's checks are some what arbitrary.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
NS_ASSUME_NONNULL_BEGIN
/**
* Parent class for other Cloud PrivateCatalog Producer query classes.
*/
@interface GTLRCloudPrivateCatalogProducerQuery : GTLRQuery
/** Selector specifying which fields to include in a partial response. */
@property(nonatomic, copy, nullable) NSString *fields;
@end
/**
* Creates an Association instance under a given Catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.associations.create
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsCreate : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsAssociationsCreateWithObject:parent:]
/** The `Catalog` resource's name. */
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Association.
*
* Creates an Association instance under a given Catalog.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1CreateAssociationRequest
* to include in the query.
* @param parent The `Catalog` resource's name.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsCreate
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1CreateAssociationRequest *)object
parent:(NSString *)parent;
@end
/**
* Deletes the given Association.
*
* Method: cloudprivatecatalogproducer.catalogs.associations.delete
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsDelete : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsAssociationsDeleteWithname:]
/** The resource name of the `Association` to delete. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleProtobufEmpty.
*
* Deletes the given Association.
*
* @param name The resource name of the `Association` to delete.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsDelete
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Returns the requested Association resource.
*
* Method: cloudprivatecatalogproducer.catalogs.associations.get
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsGet : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsAssociationsGetWithname:]
/** The resource name of the `Association` to retrieve. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Association.
*
* Returns the requested Association resource.
*
* @param name The resource name of the `Association` to retrieve.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsGet
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Lists all Association resources under a catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.associations.list
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsList : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsAssociationsListWithparent:]
/** The maximum number of catalog associations to return. */
@property(nonatomic, assign) NSInteger pageSize;
/**
* A pagination token returned from the previous call to
* `ListAssociations`.
*/
@property(nonatomic, copy, nullable) NSString *pageToken;
/**
* The resource name of the `Catalog` whose `Associations` are
* being retrieved. In the format `catalogs/<catalog>`.
*/
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1ListAssociationsResponse.
*
* Lists all Association resources under a catalog.
*
* @param parent The resource name of the `Catalog` whose `Associations` are
* being retrieved. In the format `catalogs/<catalog>`.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsAssociationsList
*
* @note Automatic pagination will be done when @c shouldFetchNextPages is
* enabled. See @c shouldFetchNextPages on @c GTLRService for more
* information.
*/
+ (instancetype)queryWithParent:(NSString *)parent;
@end
/**
* Creates a new Catalog resource.
*
* Method: cloudprivatecatalogproducer.catalogs.create
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsCreate : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsCreateWithObject:]
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleLongrunningOperation.
*
* Creates a new Catalog resource.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog
* to include in the query.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsCreate
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog *)object;
@end
/**
* Soft deletes an existing Catalog and all resources under it.
* The catalog can only be deleted if there is no associations under it or
* DeleteCatalogRequest.force is true. The delete operation
* can be recovered by the PrivateCatalogProducer.UndeleteCatalog
* method.
*
* Method: cloudprivatecatalogproducer.catalogs.delete
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsDelete : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsDeleteWithname:]
/**
* Forces deletion of the `Catalog` and its `Association` resources.
* If the `Catalog` is still associated with other resources and
* force is not set to true, then the operation fails.
*/
@property(nonatomic, assign) BOOL force;
/** The resource name of the catalog. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog.
*
* Soft deletes an existing Catalog and all resources under it.
* The catalog can only be deleted if there is no associations under it or
* DeleteCatalogRequest.force is true. The delete operation
* can be recovered by the PrivateCatalogProducer.UndeleteCatalog
* method.
*
* @param name The resource name of the catalog.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsDelete
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Returns the requested Catalog resource.
*
* Method: cloudprivatecatalogproducer.catalogs.get
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsGet : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsGetWithname:]
/** The resource name of the catalog. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog.
*
* Returns the requested Catalog resource.
*
* @param name The resource name of the catalog.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsGet
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Gets IAM policy for the specified Catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.getIamPolicy
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsGetIamPolicy : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsGetIamPolicyWithresource:]
/**
* REQUIRED: The resource for which the policy is being requested.
* See the operation documentation for the appropriate value for this field.
*/
@property(nonatomic, copy, nullable) NSString *resource;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleIamV1Policy.
*
* Gets IAM policy for the specified Catalog.
*
* @param resource REQUIRED: The resource for which the policy is being
* requested.
* See the operation documentation for the appropriate value for this field.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsGetIamPolicy
*/
+ (instancetype)queryWithResource:(NSString *)resource;
@end
/**
* Lists Catalog resources that the producer has access to, within the
* scope of the parent resource.
*
* Method: cloudprivatecatalogproducer.catalogs.list
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsList : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsList]
/** The maximum number of catalogs to return. */
@property(nonatomic, assign) NSInteger pageSize;
/**
* A pagination token returned from a previous call to ListCatalogs
* that indicates where this listing should continue from.
* This field is optional.
*/
@property(nonatomic, copy, nullable) NSString *pageToken;
/** The resource name of the parent resource. */
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1ListCatalogsResponse.
*
* Lists Catalog resources that the producer has access to, within the
* scope of the parent resource.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsList
*
* @note Automatic pagination will be done when @c shouldFetchNextPages is
* enabled. See @c shouldFetchNextPages on @c GTLRService for more
* information.
*/
+ (instancetype)query;
@end
/**
* Updates a specific Catalog resource.
*
* Method: cloudprivatecatalogproducer.catalogs.patch
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsPatch : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsPatchWithObject:name:]
/**
* Output only. The resource name of the catalog, in the format
* `catalogs/{catalog_id}'.
* A unique identifier for the catalog, which is generated
* by catalog service.
*/
@property(nonatomic, copy, nullable) NSString *name;
/**
* Field mask that controls which fields of the catalog should be updated.
*
* String format is a comma-separated list of fields.
*/
@property(nonatomic, copy, nullable) NSString *updateMask;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog.
*
* Updates a specific Catalog resource.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog
* to include in the query.
* @param name Output only. The resource name of the catalog, in the format
* `catalogs/{catalog_id}'.
* A unique identifier for the catalog, which is generated
* by catalog service.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsPatch
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog *)object
name:(NSString *)name;
@end
/**
* Copies a Product under another Catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.products.copy
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsCopy : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsCopyWithObject:name:]
/** The resource name of the current product that is copied from. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleLongrunningOperation.
*
* Copies a Product under another Catalog.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1CopyProductRequest
* to include in the query.
* @param name The resource name of the current product that is copied from.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsCopy
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1CopyProductRequest *)object
name:(NSString *)name;
@end
/**
* Creates a Product instance under a given Catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.products.create
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsCreate : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsCreateWithObject:parent:]
/** The catalog name of the new product's parent. */
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product.
*
* Creates a Product instance under a given Catalog.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product
* to include in the query.
* @param parent The catalog name of the new product's parent.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsCreate
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product *)object
parent:(NSString *)parent;
@end
/**
* Hard deletes a Product.
*
* Method: cloudprivatecatalogproducer.catalogs.products.delete
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsDelete : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsDeleteWithname:]
/** The resource name of the product. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleProtobufEmpty.
*
* Hard deletes a Product.
*
* @param name The resource name of the product.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsDelete
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Returns the requested Product resource.
*
* Method: cloudprivatecatalogproducer.catalogs.products.get
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsGet : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsGetWithname:]
/** The resource name of the product. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product.
*
* Returns the requested Product resource.
*
* @param name The resource name of the product.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsGet
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Creates an Icon instance under a given Product.
* If Product only has a default icon, a new Icon
* instance is created and associated with the given Product.
* If Product already has a non-default icon, the action creates
* a new Icon instance, associates the newly created
* Icon with the given Product and deletes the old icon.
*
* Method: cloudprivatecatalogproducer.catalogs.products.icons.upload
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsIconsUpload : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsIconsUploadWithObject:product:]
/** The resource name of the product. */
@property(nonatomic, copy, nullable) NSString *product;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleProtobufEmpty.
*
* Creates an Icon instance under a given Product.
* If Product only has a default icon, a new Icon
* instance is created and associated with the given Product.
* If Product already has a non-default icon, the action creates
* a new Icon instance, associates the newly created
* Icon with the given Product and deletes the old icon.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1UploadIconRequest
* to include in the query.
* @param product The resource name of the product.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsIconsUpload
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1UploadIconRequest *)object
product:(NSString *)product;
@end
/**
* Lists Product resources that the producer has access to, within the
* scope of the parent catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.products.list
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsList : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsListWithparent:]
/**
* A filter expression used to restrict the returned results based
* upon properties of the product.
*/
@property(nonatomic, copy, nullable) NSString *filter;
/** The maximum number of products to return. */
@property(nonatomic, assign) NSInteger pageSize;
/**
* A pagination token returned from a previous call to ListProducts
* that indicates where this listing should continue from.
* This field is optional.
*/
@property(nonatomic, copy, nullable) NSString *pageToken;
/** The resource name of the parent resource. */
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1ListProductsResponse.
*
* Lists Product resources that the producer has access to, within the
* scope of the parent catalog.
*
* @param parent The resource name of the parent resource.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsList
*
* @note Automatic pagination will be done when @c shouldFetchNextPages is
* enabled. See @c shouldFetchNextPages on @c GTLRService for more
* information.
*/
+ (instancetype)queryWithParent:(NSString *)parent;
@end
/**
* Updates a specific Product resource.
*
* Method: cloudprivatecatalogproducer.catalogs.products.patch
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsPatch : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsPatchWithObject:name:]
/**
* Required. The resource name of the product in the format
* `catalogs/{catalog_id}/products/a-z*[a-z0-9]'.
* A unique identifier for the product under a catalog, which cannot
* be changed after the product is created. The final
* segment of the name must between 1 and 256 characters in length.
*/
@property(nonatomic, copy, nullable) NSString *name;
/**
* Field mask that controls which fields of the product should be updated.
*
* String format is a comma-separated list of fields.
*/
@property(nonatomic, copy, nullable) NSString *updateMask;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product.
*
* Updates a specific Product resource.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product
* to include in the query.
* @param name Required. The resource name of the product in the format
* `catalogs/{catalog_id}/products/a-z*[a-z0-9]'.
* A unique identifier for the product under a catalog, which cannot
* be changed after the product is created. The final
* segment of the name must between 1 and 256 characters in length.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsPatch
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Product *)object
name:(NSString *)name;
@end
/**
* Creates a Version instance under a given Product.
*
* Method: cloudprivatecatalogproducer.catalogs.products.versions.create
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsCreate : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsVersionsCreateWithObject:parent:]
/** The product name of the new version's parent. */
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleLongrunningOperation.
*
* Creates a Version instance under a given Product.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version
* to include in the query.
* @param parent The product name of the new version's parent.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsCreate
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version *)object
parent:(NSString *)parent;
@end
/**
* Hard deletes a Version.
*
* Method: cloudprivatecatalogproducer.catalogs.products.versions.delete
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsDelete : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsVersionsDeleteWithname:]
/** The resource name of the version. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleProtobufEmpty.
*
* Hard deletes a Version.
*
* @param name The resource name of the version.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsDelete
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Returns the requested Version resource.
*
* Method: cloudprivatecatalogproducer.catalogs.products.versions.get
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsGet : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsVersionsGetWithname:]
/** The resource name of the version. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version.
*
* Returns the requested Version resource.
*
* @param name The resource name of the version.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsGet
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Lists Version resources that the producer has access to, within the
* scope of the parent Product.
*
* Method: cloudprivatecatalogproducer.catalogs.products.versions.list
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsList : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsVersionsListWithparent:]
/** The maximum number of versions to return. */
@property(nonatomic, assign) NSInteger pageSize;
/**
* A pagination token returned from a previous call to ListVersions
* that indicates where this listing should continue from.
* This field is optional.
*/
@property(nonatomic, copy, nullable) NSString *pageToken;
/** The resource name of the parent resource. */
@property(nonatomic, copy, nullable) NSString *parent;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1ListVersionsResponse.
*
* Lists Version resources that the producer has access to, within the
* scope of the parent Product.
*
* @param parent The resource name of the parent resource.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsList
*
* @note Automatic pagination will be done when @c shouldFetchNextPages is
* enabled. See @c shouldFetchNextPages on @c GTLRService for more
* information.
*/
+ (instancetype)queryWithParent:(NSString *)parent;
@end
/**
* Updates a specific Version resource.
*
* Method: cloudprivatecatalogproducer.catalogs.products.versions.patch
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsPatch : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsProductsVersionsPatchWithObject:name:]
/**
* Required. The resource name of the version, in the format
* `catalogs/{catalog_id}/products/{product_id}/versions/a-z*[a-z0-9]'.
* A unique identifier for the version under a product, which can't
* be changed after the version is created. The final segment of the name must
* between 1 and 63 characters in length.
*/
@property(nonatomic, copy, nullable) NSString *name;
/**
* Field mask that controls which fields of the version should be updated.
*
* String format is a comma-separated list of fields.
*/
@property(nonatomic, copy, nullable) NSString *updateMask;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version.
*
* Updates a specific Version resource.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version
* to include in the query.
* @param name Required. The resource name of the version, in the format
* `catalogs/{catalog_id}/products/{product_id}/versions/a-z*[a-z0-9]'.
* A unique identifier for the version under a product, which can't
* be changed after the version is created. The final segment of the name
* must
* between 1 and 63 characters in length.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsProductsVersionsPatch
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Version *)object
name:(NSString *)name;
@end
/**
* Sets the IAM policy for the specified Catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.setIamPolicy
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsSetIamPolicy : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsSetIamPolicyWithObject:resource:]
/**
* REQUIRED: The resource for which the policy is being specified.
* See the operation documentation for the appropriate value for this field.
*/
@property(nonatomic, copy, nullable) NSString *resource;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleIamV1Policy.
*
* Sets the IAM policy for the specified Catalog.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleIamV1SetIamPolicyRequest to include
* in the query.
* @param resource REQUIRED: The resource for which the policy is being
* specified.
* See the operation documentation for the appropriate value for this field.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsSetIamPolicy
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleIamV1SetIamPolicyRequest *)object
resource:(NSString *)resource;
@end
/**
* Tests the IAM permissions for the specified Catalog.
*
* Method: cloudprivatecatalogproducer.catalogs.testIamPermissions
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsTestIamPermissions : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsTestIamPermissionsWithObject:resource:]
/**
* REQUIRED: The resource for which the policy detail is being requested.
* See the operation documentation for the appropriate value for this field.
*/
@property(nonatomic, copy, nullable) NSString *resource;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleIamV1TestIamPermissionsResponse.
*
* Tests the IAM permissions for the specified Catalog.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleIamV1TestIamPermissionsRequest to
* include in the query.
* @param resource REQUIRED: The resource for which the policy detail is being
* requested.
* See the operation documentation for the appropriate value for this field.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsTestIamPermissions
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleIamV1TestIamPermissionsRequest *)object
resource:(NSString *)resource;
@end
/**
* Undeletes a deleted Catalog and all resources under it.
*
* Method: cloudprivatecatalogproducer.catalogs.undelete
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_CatalogsUndelete : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForCatalogsUndeleteWithObject:name:]
/** The resource name of the catalog. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1Catalog.
*
* Undeletes a deleted Catalog and all resources under it.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1UndeleteCatalogRequest
* to include in the query.
* @param name The resource name of the catalog.
*
* @return GTLRCloudPrivateCatalogProducerQuery_CatalogsUndelete
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleCloudPrivatecatalogproducerV1beta1UndeleteCatalogRequest *)object
name:(NSString *)name;
@end
/**
* Starts asynchronous cancellation on a long-running operation. The server
* makes a best effort to cancel the operation, but success is not
* guaranteed. If the server doesn't support this method, it returns
* `google.rpc.Code.UNIMPLEMENTED`. Clients can use
* Operations.GetOperation or
* other methods to check whether the cancellation succeeded or whether the
* operation completed despite cancellation. On successful cancellation,
* the operation is not deleted; instead, it becomes an operation with
* an Operation.error value with a google.rpc.Status.code of 1,
* corresponding to `Code.CANCELLED`.
*
* Method: cloudprivatecatalogproducer.operations.cancel
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_OperationsCancel : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForOperationsCancelWithObject:name:]
/** The name of the operation resource to be cancelled. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleProtobufEmpty.
*
* Starts asynchronous cancellation on a long-running operation. The server
* makes a best effort to cancel the operation, but success is not
* guaranteed. If the server doesn't support this method, it returns
* `google.rpc.Code.UNIMPLEMENTED`. Clients can use
* Operations.GetOperation or
* other methods to check whether the cancellation succeeded or whether the
* operation completed despite cancellation. On successful cancellation,
* the operation is not deleted; instead, it becomes an operation with
* an Operation.error value with a google.rpc.Status.code of 1,
* corresponding to `Code.CANCELLED`.
*
* @param object The @c
* GTLRCloudPrivateCatalogProducer_GoogleLongrunningCancelOperationRequest to
* include in the query.
* @param name The name of the operation resource to be cancelled.
*
* @return GTLRCloudPrivateCatalogProducerQuery_OperationsCancel
*/
+ (instancetype)queryWithObject:(GTLRCloudPrivateCatalogProducer_GoogleLongrunningCancelOperationRequest *)object
name:(NSString *)name;
@end
/**
* Deletes a long-running operation. This method indicates that the client is
* no longer interested in the operation result. It does not cancel the
* operation. If the server doesn't support this method, it returns
* `google.rpc.Code.UNIMPLEMENTED`.
*
* Method: cloudprivatecatalogproducer.operations.delete
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_OperationsDelete : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForOperationsDeleteWithname:]
/** The name of the operation resource to be deleted. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleProtobufEmpty.
*
* Deletes a long-running operation. This method indicates that the client is
* no longer interested in the operation result. It does not cancel the
* operation. If the server doesn't support this method, it returns
* `google.rpc.Code.UNIMPLEMENTED`.
*
* @param name The name of the operation resource to be deleted.
*
* @return GTLRCloudPrivateCatalogProducerQuery_OperationsDelete
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Gets the latest state of a long-running operation. Clients can use this
* method to poll the operation result at intervals as recommended by the API
* service.
*
* Method: cloudprivatecatalogproducer.operations.get
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_OperationsGet : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForOperationsGetWithname:]
/** The name of the operation resource. */
@property(nonatomic, copy, nullable) NSString *name;
/**
* Fetches a @c GTLRCloudPrivateCatalogProducer_GoogleLongrunningOperation.
*
* Gets the latest state of a long-running operation. Clients can use this
* method to poll the operation result at intervals as recommended by the API
* service.
*
* @param name The name of the operation resource.
*
* @return GTLRCloudPrivateCatalogProducerQuery_OperationsGet
*/
+ (instancetype)queryWithName:(NSString *)name;
@end
/**
* Lists operations that match the specified filter in the request. If the
* server doesn't support this method, it returns `UNIMPLEMENTED`.
* NOTE: the `name` binding allows API services to override the binding
* to use different resource name schemes, such as `users/ * /operations`. To
* override the binding, API services can add a binding such as
* `"/v1/{name=users/ *}/operations"` to their service configuration.
* For backwards compatibility, the default name includes the operations
* collection id, however overriding users must ensure the name binding
* is the parent resource, without the operations collection id.
*
* Method: cloudprivatecatalogproducer.operations.list
*
* Authorization scope(s):
* @c kGTLRAuthScopeCloudPrivateCatalogProducerCloudPlatform
*/
@interface GTLRCloudPrivateCatalogProducerQuery_OperationsList : GTLRCloudPrivateCatalogProducerQuery
// Previous library name was
// +[GTLQueryCloudPrivateCatalogProducer queryForOperationsList]
/** The standard list filter. */
@property(nonatomic, copy, nullable) NSString *filter;
/** The name of the operation's parent resource. */
@property(nonatomic, copy, nullable) NSString *name;
/** The standard list page size. */
@property(nonatomic, assign) NSInteger pageSize;
/** The standard list page token. */
@property(nonatomic, copy, nullable) NSString *pageToken;
/**
* Fetches a @c
* GTLRCloudPrivateCatalogProducer_GoogleLongrunningListOperationsResponse.
*
* Lists operations that match the specified filter in the request. If the
* server doesn't support this method, it returns `UNIMPLEMENTED`.
* NOTE: the `name` binding allows API services to override the binding
* to use different resource name schemes, such as `users/ * /operations`. To
* override the binding, API services can add a binding such as
* `"/v1/{name=users/ *}/operations"` to their service configuration.
* For backwards compatibility, the default name includes the operations
* collection id, however overriding users must ensure the name binding
* is the parent resource, without the operations collection id.
*
* @return GTLRCloudPrivateCatalogProducerQuery_OperationsList
*
* @note Automatic pagination will be done when @c shouldFetchNextPages is
* enabled. See @c shouldFetchNextPages on @c GTLRService for more
* information.
*/
+ (instancetype)query;
@end
NS_ASSUME_NONNULL_END
#pragma clang diagnostic pop
|
def remove_key_values(dictionary: dict, keys: list) -> dict:
"""
Removes specified key-value pairs from a dictionary
Args:
dictionary: Input dictionary
keys: List of keys to be removed
Returns:
Modified dictionary with specified key-value pairs removed
"""
for key in keys:
dictionary.pop(key, None)
return dictionary |
require_relative 'point'
module Geometry
=begin rdoc
An object repesenting a zero {Size} in N-dimensional space
A {SizeZero} object is a {Size} that will always compare equal to zero and unequal to
everything else, regardless of dimensionality. You can think of it as an application of the
{http://en.wikipedia.org/wiki/Null_Object_pattern Null Object Pattern}.
=end
class SizeZero
def eql?(other)
if other.respond_to? :all?
other.all? { |e| e.eql? 0 }
else
other == 0
end
end
alias == eql?
def coerce(other)
if other.is_a? Numeric
[other, 0]
elsif other.is_a? Array
[other, Array.new(other.size, 0)]
elsif other.is_a? Vector
[other, Vector[*Array.new(other.size, 0)]]
else
[Size[other], Size[Array.new(other.size, 0)]]
end
end
# @group Arithmetic
# @group Unary operators
def +@
self
end
def -@
self
end
# @endgroup
def +(other)
other
end
def -(other)
if other.respond_to? :-@
-other
elsif other.respond_to? :map
other.map { |a| -a }
end
end
def *(other)
self
end
def /(other)
raise OperationNotDefined unless other.is_a? Numeric
raise ZeroDivisionError if 0 == other
self
end
# @endgroup
# @group Enumerable
# Return the first, or first n, elements (always 0)
# @param n [Number] the number of elements to return
def first(n = nil)
Array.new(n, 0) rescue 0
end
# @endgroup
end
end
|
<gh_stars>100-1000
# -*- test-case-name: vumi.demos.tests.test_static_reply -*-
from datetime import date
from twisted.internet.defer import succeed, inlineCallbacks
from vumi.application import ApplicationWorker
from vumi.config import ConfigText
class StaticReplyConfig(ApplicationWorker.CONFIG_CLASS):
reply_text = ConfigText(
"Reply text to send in response to inbound messages.", static=False,
default="Hello {user} at {now}.")
class StaticReplyApplication(ApplicationWorker):
"""
Application that replies to incoming messages with a configured response.
"""
CONFIG_CLASS = StaticReplyConfig
@inlineCallbacks
def consume_user_message(self, message):
config = yield self.get_config(message)
yield self.reply_to(
message, config.reply_text.format(
user=message.user(), now=date.today()),
continue_session=False)
|
const express = require('express');
const app = express();
const bodyParser = require('body-parser');
app.use(bodyParser.json());
let books = require('./books.json');
app.post('/books', (req, res) => {
let book = req.body;
books.push(book);
res.send({
message: 'Book added successfully',
book
});
});
app.get('/books', (req, res) => {
res.send({ books });
});
app.listen(3000, () => console.log('Server started')); |
<reponame>Plutoz01/price-depo-ui<gh_stars>0
import { createFeatureSelector, createSelector } from '@ngrx/store';
import { AdminState } from './admin.state';
export const getAdminAppStateSelector = createFeatureSelector( 'app' );
export const getManufacturerMasterDetailsStateSelector = createSelector( getAdminAppStateSelector,
( state: AdminState ) => state.manufacturers );
export const getChainStoreMasterDetailsStateSelector = createSelector( getAdminAppStateSelector,
( state: AdminState ) => state.chainStores );
export const getShopMasterDetailsStateSelector = createSelector( getAdminAppStateSelector,
( state: AdminState ) => state.shops );
export const getProductMasterDetailsStateSelector = createSelector( getAdminAppStateSelector,
( state: AdminState ) => state.products );
export const getFormDefSelector = createSelector( getAdminAppStateSelector,
( state: AdminState ) => state.formDef );
|
<gh_stars>0
import { createReducer, on } from '@ngrx/store';
import { ConditionSummaryActions as conditionSummary } from '../actions';
import { ConditionLists } from '../../generated-data-api';
export const conditionsSummaryFeatureKey = 'conditionsSummary';
export interface State {
conditionsSummary: ConditionLists;
failure: boolean;
}
export const initialState: State = {
conditionsSummary: {
activeConcerns: [],
activeConditions: [],
inactiveConcerns: [],
inactiveConditions: []
},
failure: false
};
export const reducer = createReducer(
initialState,
on(conditionSummary.loadConditionSummaryForSubjectSuccessAction, (state, { data: payload }) =>
({
...state,
conditionsSummary: payload,
})),
on(conditionSummary.loadConditionSummaryForSubjectFailureAction, (state) =>
({
...state,
failure: true
})),
);
export const getConditionsSummary = (state: State) => state.conditionsSummary;
|
dataset=Weibo
model=BiAttEncoder # PostEncoder | BiAttEncoder
wb_data_tag=Weibo_src50_conv100_tgt10_v50000
tw_data_tag=Twitter_src35_conv100_tgt10_v30000
is_copyrnn=false
emb_size=200
seed=23
special=''
if [[ $dataset =~ 'Weibo' ]]
then
data_tag=$wb_data_tag
elif [[ $dataset =~ 'Twitter' ]]
then
data_tag=$tw_data_tag
else
echo 'Wrong dataset name'
fi
if $is_copyrnn
then
copy_cmd='-copy_attn -reuse_copy_attn'
model_tag='copyrnn'
else
copy_tag=''
model_tag='rnn'
fi
model_name=${dataset}_${model}_${model_tag}_${emb_size}emb_seed${seed}${special}
nohup \
python -u ../train.py \
-max_src_len 50 \
-max_conv_len 100 \
-word_vec_size ${emb_size} \
-share_embeddings \
-model_type text \
-encoder_type ${model} \
-decoder_type rnn \
-enc_layers 2 \
-dec_layers 1 \
-rnn_size 300 \
-rnn_type GRU \
-global_attention general ${copy_cmd} \
-save_model saved_models/${model_name} \
-seed ${seed} \
-data ../processed_data/${data_tag} \
-batch_size 64 \
-epochs 15 \
-optim adam \
-max_grad_norm 1 \
-dropout 0.1 \
-learning_rate 0.001 \
-learning_rate_decay 0.5 \
-gpuid 0 \
> log/train_${model_name}.log &
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Chart = void 0;
var tslib_1 = require("tslib");
var chart_1 = tslib_1.__importDefault(require("./chart/chart"));
exports.Chart = chart_1.default;
//# sourceMappingURL=api.js.map |
#!/bin/bash
../jeus8/bin/stopNodeManager -host localhost -port 7730
../jeus8/bin/stopServer -host localhost:19736 -u jeus -p jeus
../jeus8/bin/stopServer -host localhost:9736 -u jeus -p jeus
|
#!/bin/bash
#Get the agents names from docker (container id is the agent name)
agents=$(sudo docker ps -a |grep aws | awk '{ print $1 }')
#Loop to Delete agent from team city
for agent in $agents
do
#Stop and remove container
sudo docker start $agent
done
|
fn multmodp(a: u64, b: u64) -> u64 {
const P: u64 = 1_000_000_007; // Replace with the actual prime number
((a % P) * (b % P)) % P
}
fn modular_exponentiation(base: u64, exponent: u64) -> u64 {
const COMBINE_TABLE: [u64; 32] = [/* Populate with precomputed values */];
let mut xp = 1;
let mut k = 0;
let mut n = exponent;
while n != 0 {
if n & 1 != 0 {
xp = multmodp(COMBINE_TABLE[k], xp);
}
n >>= 1;
k += 1;
if k == 32 {
break; // Handle the case when exponent is larger than 32 bits
}
}
xp
} |
export enum Type {
STOCK = 'stock',
ETF = 'ETF',
REAL_ESTATE_INVESTIMENT_FUND = 'real estate investiment fund',
STOCK_INVESTIMENT_FUND = 'stock investiment fund',
CRYPTOCURRENCY = 'cryptocurrency'
} |
package spring_data.game_store.domain.enumeration;
public enum Role {
BASIC_USER, ADMINISTRATOR
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.repository.impexp;
import org.apache.atlas.exception.AtlasBaseException;
import org.apache.atlas.model.TypeCategory;
import org.apache.atlas.model.instance.AtlasClassification;
import org.apache.atlas.model.instance.AtlasEntity;
import org.apache.atlas.model.typedef.AtlasStructDef;
import org.apache.atlas.type.AtlasArrayType;
import org.apache.atlas.type.AtlasClassificationType;
import org.apache.atlas.type.AtlasEntityType;
import org.apache.atlas.type.AtlasEnumType;
import org.apache.atlas.type.AtlasMapType;
import org.apache.atlas.type.AtlasStructType;
import org.apache.atlas.type.AtlasType;
import org.apache.atlas.type.AtlasTypeRegistry;
import org.apache.commons.collections.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
class ExportTypeProcessor {
private static final Logger LOG = LoggerFactory.getLogger(ExportTypeProcessor.class);
private AtlasTypeRegistry typeRegistry;
private final ExportService.ExportContext context;
ExportTypeProcessor(AtlasTypeRegistry typeRegistry, ExportService.ExportContext context) {
this.typeRegistry = typeRegistry;
this.context = context;
}
public void addTypes(AtlasEntity entity, ExportService.ExportContext context) {
addEntityType(entity.getTypeName(), context);
if(CollectionUtils.isNotEmpty(entity.getClassifications())) {
for (AtlasClassification c : entity.getClassifications()) {
addClassificationType(c.getTypeName(), context);
}
}
}
private void addType(String typeName, ExportService.ExportContext context) {
AtlasType type = null;
try {
type = typeRegistry.getType(typeName);
addType(type, context);
} catch (AtlasBaseException excp) {
LOG.error("unknown type {}", typeName);
}
}
private void addEntityType(String typeName, ExportService.ExportContext context) {
if (!context.entityTypes.contains(typeName)) {
AtlasEntityType entityType = typeRegistry.getEntityTypeByName(typeName);
addEntityType(entityType, context);
}
}
private void addClassificationType(String typeName, ExportService.ExportContext context) {
if (!context.classificationTypes.contains(typeName)) {
AtlasClassificationType classificationType = typeRegistry.getClassificationTypeByName(typeName);
addClassificationType(classificationType, context);
}
}
private void addType(AtlasType type, ExportService.ExportContext context) {
if (type.getTypeCategory() == TypeCategory.PRIMITIVE) {
return;
}
if (type instanceof AtlasArrayType) {
AtlasArrayType arrayType = (AtlasArrayType)type;
addType(arrayType.getElementType(), context);
} else if (type instanceof AtlasMapType) {
AtlasMapType mapType = (AtlasMapType)type;
addType(mapType.getKeyType(), context);
addType(mapType.getValueType(), context);
} else if (type instanceof AtlasEntityType) {
addEntityType((AtlasEntityType)type, context);
} else if (type instanceof AtlasClassificationType) {
addClassificationType((AtlasClassificationType)type, context);
} else if (type instanceof AtlasStructType) {
addStructType((AtlasStructType)type, context);
} else if (type instanceof AtlasEnumType) {
addEnumType((AtlasEnumType)type, context);
}
}
private void addEntityType(AtlasEntityType entityType, ExportService.ExportContext context) {
if (!context.entityTypes.contains(entityType.getTypeName())) {
context.entityTypes.add(entityType.getTypeName());
addAttributeTypes(entityType, context);
if (CollectionUtils.isNotEmpty(entityType.getAllSuperTypes())) {
for (String superType : entityType.getAllSuperTypes()) {
addEntityType(superType, context);
}
}
}
}
private void addClassificationType(AtlasClassificationType classificationType, ExportService.ExportContext context) {
if (!context.classificationTypes.contains(classificationType.getTypeName())) {
context.classificationTypes.add(classificationType.getTypeName());
addAttributeTypes(classificationType, context);
if (CollectionUtils.isNotEmpty(classificationType.getAllSuperTypes())) {
for (String superType : classificationType.getAllSuperTypes()) {
addClassificationType(superType, context);
}
}
}
}
private void addStructType(AtlasStructType structType, ExportService.ExportContext context) {
if (!context.structTypes.contains(structType.getTypeName())) {
context.structTypes.add(structType.getTypeName());
addAttributeTypes(structType, context);
}
}
private void addEnumType(AtlasEnumType enumType, ExportService.ExportContext context) {
if (!context.enumTypes.contains(enumType.getTypeName())) {
context.enumTypes.add(enumType.getTypeName());
}
}
private void addAttributeTypes(AtlasStructType structType, ExportService.ExportContext context) {
for (AtlasStructDef.AtlasAttributeDef attributeDef : structType.getStructDef().getAttributeDefs()) {
addType(attributeDef.getTypeName(), context);
}
}
}
|
def reverse_array(array):
reversed_array = array[::-1]
return reversed_array |
#!/usr/bin/env node
/*****************************************************************************
* Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"). You may *
* not use this file except in compliance with the License. A copy of the *
* License is located at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* or in the 'license' file accompanying this file. This file is distributed *
* on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, *
* express or implied. See the License for the specific language governing *
* permissions and limitations under the License. *
*****************************************************************************/
import * as cdk from '@aws-cdk/core';
import * as lambda from '@aws-cdk/aws-lambda';
import * as events from '@aws-cdk/aws-events';
import * as eventstarget from '@aws-cdk/aws-events-targets';
import {
Effect,
PolicyDocument,
PolicyStatement,
ServicePrincipal,
Policy,
Role,
CfnRole,
ArnPrincipal,
CompositePrincipal,
CfnPolicy
} from '@aws-cdk/aws-iam';
import { Bucket, IBucket } from '@aws-cdk/aws-s3';
import * as e2l from '@aws-solutions-constructs/aws-events-rule-lambda';
import * as sfn from '@aws-cdk/aws-stepfunctions';
import * as iam from '@aws-cdk/aws-iam';
export interface IPlaybookConstructProps {
name: string;
description: string;
lambda_env?: any;
aws_region: string;
aws_accountid: string;
aws_partition: string;
lambda_handler?: string;
lambda_memsize?: number;
lambda_maxtime?: number;
custom_action_name: string;
findings: object;
solutionId: string;
solutionVersion: string;
solutionName: string;
distName: string;
distBucket: string;
}
export class PlaybookConstruct extends cdk.Construct {
public readonly lambdaRole: Role;
constructor(scope: cdk.Construct, id: string, props: IPlaybookConstructProps) {
super(scope, id);
let workflowStatusFilter = {
"Status": [ "NEW" ]
}
let complianceStatusFilter = {
"Status": [ "FAILED", "WARNING" ]
}
let RESOURCE_PREFIX = props.solutionId;
const basePolicy = new PolicyStatement();
basePolicy.addActions("cloudwatch:PutMetricData")
basePolicy.addActions("securityhub:BatchUpdateFindings")
basePolicy.effect = Effect.ALLOW
basePolicy.addResources("*")
const logsPolicy = new PolicyStatement();
logsPolicy.addActions("logs:CreateLogGroup")
logsPolicy.addActions("logs:CreateLogStream")
logsPolicy.addActions("logs:PutLogEvents")
logsPolicy.effect = Effect.ALLOW
logsPolicy.addResources("*")
const kmsPolicy = new PolicyStatement();
kmsPolicy.addActions("kms:Encrypt")
kmsPolicy.addActions("kms:Decrypt")
kmsPolicy.addActions("kms:GenerateDataKey")
kmsPolicy.effect = Effect.ALLOW
kmsPolicy.addResources('{{resolve:ssm:/Solutions/' + props.solutionId + '/CMK_ARN:1}}')
const ssmPolicy = new PolicyStatement({
actions: [
'ssm:GetParameter',
'ssm:GetParameters',
'ssm:PutParameter'
],
resources: [`arn:${props.aws_partition}:ssm:*:${props.aws_accountid}:parameter/Solutions/SO0111/*`]
})
const snsPolicy = new PolicyStatement();
snsPolicy.addActions("sns:Publish")
snsPolicy.effect = Effect.ALLOW
snsPolicy.addResources('arn:' + props.aws_partition + ':sns:' + props.aws_region + ':' +
props.aws_accountid + ':' + props.solutionId + '-SHARR_Topic')
const stsPolicy = new PolicyStatement();
stsPolicy.addActions("sts:AssumeRole")
stsPolicy.effect = Effect.ALLOW
stsPolicy.addResources('arn:' + props.aws_partition + ':iam::*:role/' +
RESOURCE_PREFIX + '_' + props.name + '_memberRole_' + props.aws_region)
const lambdaPolicy = new PolicyDocument();
lambdaPolicy.addStatements(logsPolicy)
lambdaPolicy.addStatements(basePolicy)
lambdaPolicy.addStatements(snsPolicy)
lambdaPolicy.addStatements(kmsPolicy)
lambdaPolicy.addStatements(stsPolicy)
lambdaPolicy.addStatements(ssmPolicy)
const principal = new ServicePrincipal('lambda.amazonaws.com');
const principalPolicyStatement = new PolicyStatement();
principalPolicyStatement.addActions("sts:AssumeRole");
principalPolicyStatement.effect = Effect.ALLOW;
principal.addToPolicy(principalPolicyStatement);
let roleName: string = RESOURCE_PREFIX + '_' + props.name + '_lambdaRole_' + props.aws_region;
this.lambdaRole = new Role(this, 'Role', {
assumedBy: principal,
inlinePolicies: {
'default_lambdaPolicy': lambdaPolicy
},
roleName: roleName
});
const lambdaRoleResource = this.lambdaRole.node.findChild('Resource') as CfnRole;
lambdaRoleResource.cfnOptions.metadata = {
cfn_nag: {
rules_to_suppress: [{
id: 'W11',
reason: 'Resource * is required due to the administrative nature of the solution.'
},{
id: 'W28',
reason: 'Static names chosen intentionally to provide integration in cross-account permissions'
}]
}
};
let lambdaName: string = props.name + '_lambda';
let s3BucketForLambda: IBucket = Bucket.fromBucketAttributes(this, "s3BucketForLambda", {
bucketName: props.distBucket + '-' + props.aws_region
})
let lambda_src = `${props.distName}/${props.solutionVersion}/playbooks/CIS/${props.name.toLowerCase()}.py.zip`;
let lambda_handler = undefined
if (props.lambda_handler) {
lambda_handler = props.lambda_handler
} else {
lambda_handler = props.name.toLowerCase() + '.lambda_handler';
}
let lambda_memsize = 256
if (props.lambda_memsize) {
lambda_memsize = props.lambda_memsize
}
let lambda_maxtime = 60
if (props.lambda_maxtime) {
lambda_maxtime = props.lambda_maxtime
}
// Event to Lambda
// ---------------
let eventRuleName: string = props.name + '_eventRule'
const customAction = new cdk.CustomResource(this, 'CustomAction', {
serviceToken: 'arn:' + props.aws_partition + ':lambda:' + props.aws_region + ':' +
props.aws_accountid + ':function:' + RESOURCE_PREFIX + '-SHARR-CustomAction',
resourceType: 'Custom::ActionTarget',
properties: {
Name: props.custom_action_name,
Description: props.description,
Id: props.name,
}
});
const e2lprops = {
deployLambda: true,
lambdaFunctionProps: {
functionName: lambdaName,
description: 'SHARR-' + props.description,
runtime: lambda.Runtime.PYTHON_3_8,
code: lambda.Code.fromBucket(s3BucketForLambda, lambda_src),
handler: lambda_handler,
memorySize: lambda_memsize,
role: this.lambdaRole,
timeout: cdk.Duration.seconds(lambda_maxtime),
environment: {
AWS_PARTITION: props.aws_partition,
log_level: 'info',
sendAnonymousMetrics: '{{resolve:ssm:/Solutions/' + props.solutionId + '/sendAnonymousMetrics:1}}'
}
},
eventRuleProps: {
description: props.description + ' event rule.',
ruleName: eventRuleName,
enabled: true,
eventPattern: {
source: ["aws.securityhub"],
detailType: ["Security Hub Findings - Custom Action"],
resources: [customAction.getAttString('Arn')],
detail: {
findings: {
Title: props.findings,
Workflow: workflowStatusFilter,
Compliance: complianceStatusFilter
}
}
}
}
}
let eventToLambda = new e2l.EventsRuleToLambda(this, 'eventToPlaybook', e2lprops)
if (props.lambda_env) {
let envs = Object.keys(props.lambda_env);
for (let env of envs) {
eventToLambda.lambdaFunction.addEnvironment(env, props.lambda_env[env])
}
}
const eventTarget = new eventstarget.LambdaFunction(eventToLambda.lambdaFunction);
const enable_auto_remediation_param = new cdk.CfnParameter(this, 'AutoEnable', {
description: `Enable/Disable automatically triggered remediation for ${props.description.slice(11)}. If enabled, findings for this control will be immediately remediated by the solution.`,
type: "String",
allowedValues: ["ENABLED", "DISABLED"],
default: "DISABLED"
});
enable_auto_remediation_param.overrideLogicalId(`${props.name}AutoRemediation`)
// Adding an automated even rule for the playbook
const eventRule_auto = new events.Rule(this, 'AutoEventRule', {
description: props.description + ' automatic remediation event rule.',
ruleName: eventRuleName + '_automated',
targets: [eventTarget]
});
const cfnEventRule_auto = eventRule_auto.node.defaultChild as events.CfnRule;
cfnEventRule_auto.addPropertyOverride('State', enable_auto_remediation_param.valueAsString);
eventRule_auto.addEventPattern({
source: ["aws.securityhub"],
detailType: ["Security Hub Findings - Imported"],
detail: {
findings: {
Title: props.findings,
Workflow: workflowStatusFilter,
Compliance: complianceStatusFilter
}
}
});
{
// let lambdaFunc = eventToLambda.lambdaFunction
const childToMod = eventToLambda.lambdaFunction.node.findChild('Resource') as lambda.CfnFunction;
childToMod.cfnOptions.metadata = {
cfn_nag: {
rules_to_suppress: [
{
id: 'W89',
reason: 'There is no need to run this lambda in a VPC'
},
{
id: 'W92',
reason: 'There is no need for Reserved Concurrency'
}
]
}
};
}
}
}
/*
* @author AWS Solutions Development
* @description SSM-based remediation parameters
* @type {playbookConstruct}
*/
import * as ssm from '@aws-cdk/aws-ssm';
import * as fs from 'fs';
import * as yaml from 'js-yaml';
export interface IssmPlaybookProps {
securityStandard: string; // ex. AFSBP
controlId: string;
ssmDocPath: string;
ssmDocFileName: string;
}
export class SsmPlaybook extends cdk.Construct {
constructor(scope: cdk.Construct, id: string, props: IssmPlaybookProps) {
super(scope, id);
let illegalChars = /[\.]/g;
const enableParam = new cdk.CfnParameter(this, 'Enable ' + props.controlId, {
type: "String",
description: `Enable/disable availability of remediation for AFSBP Control ${props.controlId} in Security Hub Console Custom Actions. If NOT Available the remediation cannot be triggered from the Security Hub console in the Security Hub Admin account.`,
default: "Available",
allowedValues: ["Available", "NOT Available"]
})
enableParam.overrideLogicalId(`${props.securityStandard}${props.controlId.replace(illegalChars, '')}Active`)
const installSsmDoc = new cdk.CfnCondition(this, 'Enable ' + props.controlId + ' Condition', {
expression: cdk.Fn.conditionEquals(enableParam, "Available")
})
let ssmDocName = `SHARR_Remediation_${props.securityStandard}_${props.controlId}`
let ssmDocFQFileName = `${props.ssmDocPath}${props.ssmDocFileName}`
let ssmDocType = props.ssmDocFileName.substr(props.ssmDocFileName.length - 4).toLowerCase()
let ssmDocSource = undefined
if (ssmDocType == 'json') {
ssmDocSource = JSON.parse(fs.readFileSync(ssmDocFQFileName, 'utf8'))
} else if (ssmDocType == 'yaml') {
ssmDocSource = yaml.safeLoad(fs.readFileSync(ssmDocFQFileName, 'utf8'))
}
const AutoDoc = new ssm.CfnDocument(this, 'Automation Document', {
content: ssmDocSource,
documentType: 'Automation',
name: ssmDocName
})
AutoDoc.cfnOptions.condition = installSsmDoc
}
}
export interface ISsmRemediationRoleProps {
// adminAccountNumber: string;
solutionId: string;
controlId: string;
adminAccountNumber: string;
remediationPolicy: Policy;
adminRoleName: string;
remediationRoleName: string;
}
export class SsmRemediationRole extends cdk.Construct {
constructor(scope: cdk.Construct, id: string, props: ISsmRemediationRoleProps) {
super(scope, id);
const stack = cdk.Stack.of(this)
const ssmDocPrefix = props.solutionId + '_Remediation_AFSBP_'
const basePolicy = new Policy(this, 'SHARR-AFSBP-Member-Base-Policy')
// Global Permissions
const iamPerms = new PolicyStatement();
iamPerms.addActions("iam:PassRole")
iamPerms.effect = Effect.ALLOW
iamPerms.addResources(
'arn:' + stack.partition + ':iam::' + stack.account +
':role/' + props.remediationRoleName
);
basePolicy.addStatements(iamPerms)
const ssmPerms = new PolicyStatement();
ssmPerms.addActions("ssm:StartAutomationExecution")
ssmPerms.addActions("ssm:GetAutomationExecution")
ssmPerms.effect = Effect.ALLOW
ssmPerms.addResources(
'arn:' + stack.partition + ':ssm:' + stack.region + ':' +
stack.account + ':document/' + ssmDocPrefix + props.controlId
);
ssmPerms.addResources(
'arn:' + stack.partition + ':ssm:' + stack.region + ':*:automation-definition/*'
);
ssmPerms.addResources(
'arn:' + stack.partition + ':ssm:' + stack.region + ':' +
stack.account + ':automation-execution/*'
);
basePolicy.addStatements(ssmPerms)
const ssmParmPerms = new PolicyStatement();
ssmParmPerms.addActions("ssm:GetParameters")
ssmParmPerms.addActions("ssm:GetParameter")
ssmParmPerms.addActions("ssm:PutParameter")
ssmParmPerms.effect = Effect.ALLOW
ssmParmPerms.addResources(
`arn:${stack.partition}:ssm:${stack.region}:${stack.account}:parameter/Solutions/SO0111/*`
);
basePolicy.addStatements(ssmParmPerms)
const sechubPerms = new PolicyStatement();
sechubPerms.addActions("cloudwatch:PutMetricData")
sechubPerms.addActions("securityhub:BatchUpdateFindings")
sechubPerms.effect = Effect.ALLOW
sechubPerms.addResources("*")
basePolicy.addStatements(sechubPerms)
{
let resourceForException = basePolicy.node.defaultChild as CfnPolicy;
resourceForException.cfnOptions.metadata = {
cfn_nag: {
rules_to_suppress: [{
id: 'W12',
reason: 'Resource * is required as the resource names are not predictable (randomly assigned).'
}]
}
};
}
// AssumeRole Policy
let principalPolicyStatement = new PolicyStatement();
principalPolicyStatement.addActions("sts:AssumeRole");
principalPolicyStatement.effect = Effect.ALLOW;
let roleprincipal = new ArnPrincipal(
'arn:' + stack.partition + ':iam::' + props.adminAccountNumber +
':role/' + props.adminRoleName
);
let principals = new CompositePrincipal(roleprincipal);
principals.addToPolicy(principalPolicyStatement);
let serviceprincipal = new ServicePrincipal('ssm.amazonaws.com')
principals.addPrincipals(serviceprincipal);
let memberRole = new Role(this, 'MemberAccountRole', {
assumedBy: principals,
roleName: props.remediationRoleName
});
memberRole.attachInlinePolicy(basePolicy)
memberRole.attachInlinePolicy(props.remediationPolicy)
const memberRoleResource = memberRole.node.findChild('Resource') as CfnRole;
memberRoleResource.cfnOptions.metadata = {
cfn_nag: {
rules_to_suppress: [{
id: 'W11',
reason: 'Resource * is required due to the administrative nature of the solution.'
},{
id: 'W28',
reason: 'Static names chosen intentionally to provide integration in cross-account permissions'
}]
}
};
}
}
/*
* @author AWS Solutions Development
* @description SSM-based remediation trigger
* @type {trigger}
*/
export interface ITriggerProps {
description?: string,
securityStandard: string; // ex. AFSBP
securityStandardArn: string; // ex. arn:aws:securityhub:::standards/aws-foundational-security-best-practices/v/1.0.0
controlId: string;
targetArn: string;
}
export class Trigger extends cdk.Construct {
constructor(scope: cdk.Construct, id: string, props: ITriggerProps) {
super(scope, id);
const stack = cdk.Stack.of(this)
let illegalChars = /[\.]/g;
// Event to Step Function
// ----------------------
// Create CWE rule
// Create custom action
const enableRemediation = new cdk.CfnParameter(this, 'Enable ' + props.controlId, {
type: "String",
description: `Enable/disable remediation for AFSBP Control ${props.controlId}. Note
that remediations can be disabled on a per-account basis in the Member template.`,
default: "Install",
allowedValues: ["Install", "DO NOT Install"],
})
enableRemediation.overrideLogicalId(`${props.securityStandard}${props.controlId.replace(illegalChars, '')}Installation`)
const installTrigger = new cdk.CfnCondition(this, `Enable ${props.controlId} Condition`, {
expression: cdk.Fn.conditionEquals(enableRemediation, "Install")
})
let description = 'Remediate AFSBP ' + props.controlId
if (props.description) {
description = props.description
}
let workflowStatusFilter = {
"Status": [ "NEW" ]
}
let complianceStatusFilter = {
"Status": [ "FAILED", "WARNING" ]
}
let customActionName: string = `${props.securityStandard} ${props.controlId}`
let stateMachine = sfn.StateMachine.fromStateMachineArn(this, 'orchestrator', props.targetArn);
// Note: Id is max 20 characters
const customAction = new cdk.CustomResource(this, 'Custom Action', {
serviceToken: `arn:${stack.partition}:lambda:${stack.region}:${stack.account}:function:SO0111-SHARR-CustomAction`,
resourceType: 'Custom::ActionTarget',
properties: {
Name: customActionName,
Description: description,
Id: props.securityStandard + props.controlId.replace(illegalChars, '')
}
});
{
const childToMod = customAction.node.defaultChild as cdk.CfnCustomResource;
childToMod.cfnOptions.condition = installTrigger
}
// Create an IAM role for Events to start the State Machine
const eventsRole = new iam.Role(this, 'EventsRuleRole', {
assumedBy: new iam.ServicePrincipal('events.amazonaws.com')
});
// Grant the start execution permission to the Events service
stateMachine.grantStartExecution(eventsRole);
// Create an event rule to trigger the step function
const stateMachineTarget: events.IRuleTarget = {
bind: () => ({
id: '',
arn: props.targetArn,
role: eventsRole
})
};
const eventPattern: events.EventPattern = {
source: ["aws.securityhub"],
detailType: ["Security Hub Findings - Custom Action"],
resources: [ customAction.getAttString('Arn') ],
detail: {
findings: {
ProductFields: {
StandardsArn: [ props.securityStandardArn ],
ControlId: [ props.controlId ],
},
Compliance: complianceStatusFilter
}
}
}
const custom_action_rule = new events.Rule(this, props.securityStandard + ' ' + props.controlId + ' Custom Action', {
description: description,
enabled: true,
eventPattern: eventPattern,
ruleName: `${props.securityStandard}_${props.controlId}_CustomAction`,
targets: [stateMachineTarget]
})
{
let childToMod = custom_action_rule.node.defaultChild as events.CfnRule;
childToMod.cfnOptions.condition = installTrigger
}
const enable_auto_remediation_param = new cdk.CfnParameter(this, 'AutoEnable', {
description: "This will fully enable automated remediation for "+ props.securityStandard + ' ' + props.controlId,
type: "String",
allowedValues: ["ENABLED", "DISABLED"],
default: "DISABLED"
});
enable_auto_remediation_param.overrideLogicalId(`${props.securityStandard}${props.controlId.replace(illegalChars, '')}AutoTrigger`)
const triggerPattern: events.EventPattern = {
source: ["aws.securityhub"],
detailType: ["Security Hub Findings - Imported"],
detail: {
findings: {
ProductFields: {
StandardsArn: [ props.securityStandardArn ],
ControlId: [ props.controlId ]
},
Workflow: workflowStatusFilter,
Compliance: complianceStatusFilter
}
}
}
// Adding an automated even rule for the playbook
const eventRule_auto = new events.Rule(this, 'AutoEventRule', {
description: description + ' automatic remediation trigger event rule.',
ruleName: `${props.securityStandard}_${props.controlId}_AutoTrigger`,
targets: [stateMachineTarget],
eventPattern: triggerPattern
});
const cfnEventRule_auto = eventRule_auto.node.defaultChild as events.CfnRule;
cfnEventRule_auto.addPropertyOverride('State', enable_auto_remediation_param.valueAsString);
cfnEventRule_auto.cfnOptions.condition = installTrigger
}
}
|
SELECT name, company
FROM customers
ORDER BY DATE(date_created) ASC
LIMIT 1; |
for (int i = 1; i <= 10; i++)
{
Console.Write(i);
Console.Write(" ");
}
Console.WriteLine(); |
<gh_stars>1-10
import builder from 'focus-core/util/url/builder';
import {apiRoot} from './index';
const movieRoot = `${apiRoot}movies/`;
export default {
create: builder(movieRoot, 'POST'),
casting: builder(movieRoot + '${id}/casting', 'GET'),
load: builder(movieRoot + '${id}', 'GET'),
search: builder(movieRoot + 'search?listState.skip=${skip}&listState.sortDesc=${sortDesc}&listState.top=${top}', 'POST'),
update: builder(movieRoot + '${id}', 'PUT')
};
|
CREATE TABLE movies (
id INT AUTO_INCREMENT NOT NULL,
title VARCHAR(255) NOT NULL,
year INT NOT NULL,
description TEXT,
PRIMARY KEY (id)
);
CREATE TABLE reviews (
id INT AUTO_INCREMENT NOT NULL,
movie_id INT NOT NULL,
reviewer_name VARCHAR(255) NOT NULL,
rating INT NOT NULL,
comment TEXT,
PRIMARY KEY (id)
);
ALTER TABLE reviews ADD CONSTRAINT fk_reviews_movies FOREIGN KEY (movie_id) REFERENCES movies(id) ON DELETE CASCADE; |
/*
*
*/
package net.community.chest.regexp;
import java.util.regex.Pattern;
import net.community.chest.lang.StringUtil;
import net.community.chest.util.compare.AbstractComparator;
/**
* <P>Copyright as per GPLv2</P>
* @author <NAME>.
* @since Feb 14, 2011 9:20:35 AM
*/
public class PatternComparator extends AbstractComparator<Pattern> {
/**
*
*/
private static final long serialVersionUID = 8142223814330028243L;
public PatternComparator (boolean ascending)
{
super(Pattern.class, !ascending);
}
/*
* @see net.community.chest.util.compare.AbstractComparator#compareValues(java.lang.Object, java.lang.Object)
*/
@Override
public int compareValues (Pattern v1, Pattern v2)
{
final String p1=(v1 == null) ? null : v1.pattern(),
p2=(v2 == null) ? null : v2.pattern();
return StringUtil.compareDataStrings(p1, p2, true /* patterns are always case-sensitive */);
}
public static final PatternComparator ASCENDING=new PatternComparator(true),
DESCENDING=new PatternComparator(false);
}
|
#!/usr/bin/env bash
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function print_usage() {
echo -e "\nUsage:
[OPTION]
\nOptions:
test: run all unit tests
check_style: run code style check
"
}
function check_style() {
trap 'abort' 0
set -e
export PATH=/usr/bin:$PATH
pre-commit install
clang-format --version
if ! pre-commit run -a ; then
git diff
exit 1
fi
trap : 0
}
function test() {
cd alf
python3 -m unittest -v \
alf.algorithms.actor_critic_algorithm_test \
alf.algorithms.actor_critic_loss_test \
alf.algorithms.agent_test \
alf.algorithms.algorithm_test \
alf.algorithms.containers_test \
alf.algorithms.data_transformer_test \
alf.algorithms.ddpg_algorithm_test \
alf.algorithms.diayn_algorithm_test \
alf.algorithms.entropy_target_algorithm_test \
alf.algorithms.functional_particle_vi_algorithm_test \
alf.algorithms.hypernetwork_algorithm_test \
alf.algorithms.icm_algorithm_test \
alf.algorithms.generator_test \
alf.algorithms.inverse_mvp_algorithm_test \
alf.algorithms.lagrangian_reward_weight_algorithm_test \
alf.algorithms.mcts_algorithm_test \
alf.algorithms.merlin_algorithm_test \
alf.algorithms.mi_estimator_test \
alf.algorithms.muzero_algorithm_test \
alf.algorithms.particle_vi_algorithm_test \
alf.algorithms.ppo_algorithm_test \
alf.algorithms.predictive_representation_learner_test \
alf.algorithms.prior_actor_test \
alf.algorithms.rl_algorithm_test \
alf.algorithms.sarsa_algorithm_test \
alf.algorithms.sac_algorithm_test \
alf.algorithms.oac_algorithm_test \
alf.algorithms.trac_algorithm_test \
alf.algorithms.vae_test \
alf.bin.train_play_test \
alf.data_structures_test \
alf.device_ctx_test \
alf.environments.gym_wrappers_test \
alf.environments.parallel_environment_test \
alf.environments.process_environment_test \
alf.environments.random_alf_environment_test \
alf.environments.simple.noisy_array_test \
alf.environments.suite_go_test \
alf.environments.suite_gym_test \
alf.environments.suite_mario_test \
alf.environments.suite_socialbot_test \
alf.environments.suite_tic_tac_toe_test \
alf.environments.suite_unittest_test \
alf.environments.alf_environment_test \
alf.environments.alf_gym_wrapper_test \
alf.environments.alf_wrappers_test \
alf.experience_replayers.replay_buffer_test \
alf.experience_replayers.segment_tree_test \
alf.layers_test \
alf.metrics.metrics_test \
alf.nest.nest_test \
alf.networks.action_encoder_test \
alf.networks.actor_distribution_networks_test \
alf.networks.actor_networks_test \
alf.networks.containers_test \
alf.networks.critic_networks_test \
alf.networks.encoding_networks_test \
alf.networks.memory_test \
alf.networks.network_test \
alf.networks.networks_test \
alf.networks.param_networks_test \
alf.networks.preprocessors_test \
alf.networks.projection_networks_test \
alf.networks.q_networks_test \
alf.networks.relu_mlp_test \
alf.networks.value_networks_test \
alf.optimizers.optimizers_test \
alf.optimizers.trusted_updater_test \
alf.summary.summary_ops_test \
alf.tensor_specs_test \
alf.trainers.policy_trainer_test \
alf.utils.checkpoint_utils_test \
alf.utils.common_test \
alf.utils.data_buffer_test \
alf.utils.dist_utils_test \
alf.utils.lean_function_test \
alf.utils.math_ops_test \
alf.utils.normalizers_test \
alf.utils.tensor_utils_test \
alf.utils.value_ops_test \
cd ..
}
function main() {
set -e
local CMD=$1
case $CMD in
check_style)
check_style
;;
test)
test
;;
*)
print_usage
exit 0
;;
esac
}
main $@
|
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs';
import { HttpRequestService } from './http-request-services';
import { OcApiPaths } from '../oc-ng-common-service.module';
import { Page } from '../model/api/page.model';
import { Transaction } from '../model/api/transaction.model';
import { HttpHeaders } from '@angular/common/http';
import { OcHttpParams } from '../model/api/http-params-encoder-model';
/**
* Description: API service to work with Transactions.<br>
*
* [OpenChannel Documentation]{@link https://support.openchannel.io/documentation/api/#426-transactions}
*
* Endpoints:<br>
*
* GET 'v2/transactions'<br>
*
* GET 'v2/transactions/{transactionId}'<br>
*
* POST 'v2/transactions/{transactionId}'<br>
*
* DELETE 'v2/transactions/{transactionId}'<br>
*/
@Injectable({
providedIn: 'root',
})
export class TransactionsService {
constructor(public httpRequest: HttpRequestService, private apiPaths: OcApiPaths) {}
/**
*
* Description: Returns the list of transactions for the current user
*
* @param pageNumber - (optional) Current page index. Starts from >= 1.
* @param limit - (optional) Count apps into response. Starts from >= 1.
* @param sort - (optional) Sort apps by specific field.
* [OpenChannel Documentation]{@link https://support.openchannel.io/documentation/api/#381-sort-document}
* @param query - (optional) Your specific search query.
* [OpenChannel Documentation]{@link https://support.openchannel.io/documentation/api/#380-query-document}
* @returns {Observable<Page<Transaction>>} `Observable<Page<Transaction>>`
*
* ### Example
*
* `getTransactionsList(1, 10, { date: 1 }, { type: 'payment' });`
*/
getTransactionsList(pageNumber: number = 1, limit: number = 100, sort: any = {}, query: any = {}): Observable<Page<Transaction>> {
const params = new OcHttpParams()
.append('pageNumber', String(pageNumber))
.append('limit', String(limit))
.append('sort', JSON.stringify(sort))
.append('query', JSON.stringify(query));
return this.httpRequest.get(`${this.apiPaths.transactions}`, { params });
}
/**
*
* Description: Returns a transaction by the id
*
* @param {string} transactionId - The id of the transaction to be returned
* @param {HttpHeaders} headers - (optional) HTTP headers for the request
* @returns {Observable<Transaction>} `Observable<Transaction>`
*
* ### Example
*
* `getTransactionById('transaction-id');`
*/
getTransactionById(transactionId: string, headers: HttpHeaders = new HttpHeaders()): Observable<Transaction> {
return this.httpRequest.get(`${this.apiPaths.transactions}/${transactionId}`, { headers });
}
/**
*
* Description: Updates a transaction by the id
*
* @param {string} transactionId - The id of the transaction to be updated
* @param {string} customData - A custom JSON object to attach to this transaction
* @param {HttpHeaders} headers - (optional) HTTP headers for the request
* @returns {Observable<Transaction>} `Observable<Transaction>`
*
* ### Example
*
* `updateTransactionById('transaction-id', { department: 'billing' });`
*/
updateTransactionById(transactionId: string, customData: any, headers: HttpHeaders = new HttpHeaders()): Observable<Transaction> {
return this.httpRequest.post(`${this.apiPaths.transactions}/${transactionId}`, { customData }, { headers });
}
/**
*
* Description: Deletes a transaction by the id
*
* @param {string} transactionId - The id of the transaction to be deleted
* @param {HttpHeaders} headers - (optional) HTTP headers for the request
* @returns {Observable<{}>} `Observable<{}>`
*
* ### Example
*
* `deleteTransactionById('transaction-id');`
*/
deleteTransactionById(transactionId: string, headers: HttpHeaders = new HttpHeaders()): Observable<{}> {
return this.httpRequest.delete(`${this.apiPaths.transactions}/${transactionId}`, { headers });
}
}
|
//
// SecondContentViewController.h
// Onus
//
// Created by User on 2016-02-12.
// Copyright © 2016 <NAME>. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface SecondContentViewController : UIViewController
@end
|
<form action="/createUser" method="POST">
<label>Name: </label>
<input type="text" name="name">
<br>
<label>Email: </label>
<input type="email" name="email">
<br>
<label>Password: </label>
<input type="password" name="password">
<br>
<input type="submit" value="Create User">
</form> |
SELECT *
FROM orders o1
INNER JOIN (SELECT AVG(order_date) AS avg_order_date
FROM orders) o2
ON o1.order_date > o2.avg_order_date
GROUP BY o1.order_date; |
/*******************************************************************************
* This file is part of the Symfony eclipse plugin.
*
* (c) <NAME> <<EMAIL>>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
******************************************************************************/
package com.dubture.symfony.core.codeassist.strategies;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.eclipse.dltk.core.IScriptProject;
import org.eclipse.dltk.core.ISourceRange;
import org.eclipse.dltk.internal.core.ModelElement;
import org.eclipse.jface.text.BadLocationException;
import org.eclipse.php.core.codeassist.ICompletionContext;
import org.eclipse.php.core.codeassist.ICompletionReporter;
import org.eclipse.php.internal.core.codeassist.strategies.MethodParameterKeywordStrategy;
import com.dubture.symfony.core.codeassist.contexts.ServiceContainerContext;
import com.dubture.symfony.core.model.Service;
import com.dubture.symfony.core.model.SymfonyModelAccess;
/**
* CompletionStrategy to provide service names for DI::get calls like
*
*
* <pre>
*
* // inside a ContainerAware interface
* $this->get('|
* $this->container->get('|
*
* </pre>
*
*
*
* @author "<NAME> <<EMAIL>>"
*
*/
@SuppressWarnings({ "restriction" })
public class ServiceContainerCompletionStrategy extends
MethodParameterKeywordStrategy {
public ServiceContainerCompletionStrategy(ICompletionContext context) {
super(context);
}
@Override
public void apply(ICompletionReporter reporter) throws BadLocationException {
ServiceContainerContext context = (ServiceContainerContext) getContext();
IScriptProject project = getCompanion().getSourceModule().getScriptProject();
SymfonyModelAccess model= SymfonyModelAccess.getDefault();
List<Service> services = model.findServices(project.getPath());
ISourceRange range = getReplacementRange(context);
String prefix = context.getPrefix();
if (services == null) {
return;
}
for(Service service : services) {
if (StringUtils.startsWithIgnoreCase(service.getId(), prefix)) {
ModelElement parent = (ModelElement) getCompanion().getSourceModule();
Service s = new Service(parent, service.getElementName());
s.setId(service.getId());
reporter.reportType(s, "", range);
}
}
}
}
|
#!/usr/bin/env bash
# VPSTOOLBOX
# 一键安装Trojan-GFW代理,Hexo博客,Nextcloud等應用程式.
# One click install Trojan-gfw Hexo Nextcloud and so on.
# MIT License
#
# Copyright (c) 2019-2021 JohnRosen
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#如果你在使用VPSToolBox时遇到任何问题,请仔细阅读README.md/code或者**通过 [Telegram](https://t.me/vpstoolbox_chat)请求支援** !
clear
set +e
#System Requirement
if [[ $(id -u) != 0 ]]; then
echo -e "请使用root或者sudo用户运行,Please run this script as root or sudoer."
exit 1
fi
if [[ $(uname -m 2> /dev/null) != x86_64 ]]; then
echo -e "本程式仅适用于x86_64机器,Please run this script on x86_64 machine."
exit 1
fi
if [[ $(free -m | grep Mem | awk '{print $2}' 2> /dev/null) -le "100" ]]; then
echo -e "本程式需要至少100MB记忆体才能正常运作,Please run this script on machine with more than 100MB total ram."
exit 1
fi
if [[ $(df $PWD | awk '/[0-9]%/{print $(NF-2)}' 2> /dev/null) -le "3000000" ]]; then
echo -e "本程式需要至少3GB磁碟空间才能正常运作,Please run this script on machine with more than 3G free disk space."
exit 1
fi
## Predefined env
export DEBIAN_FRONTEND=noninteractive
export COMPOSER_ALLOW_SUPERUSER=1
# ----------------------------------
# Colors
# ----------------------------------
NOCOLOR='\033[0m'
RED='\033[0;31m'
GREEN='\033[0;32m'
ORANGE='\033[0;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
LIGHTGRAY='\033[0;37m'
DARKGRAY='\033[1;30m'
LIGHTRED='\033[1;31m'
LIGHTGREEN='\033[1;32m'
YELLOW='\033[1;33m'
LIGHTBLUE='\033[1;34m'
LIGHTPURPLE='\033[1;35m'
LIGHTCYAN='\033[1;36m'
WHITE='\033[1;37m'
###Legacy Defined Colors
ERROR="31m" # Error message
SUCCESS="32m" # Success message
WARNING="33m" # Warning message
INFO="36m" # Info message
LINK="92m" # Share Link Message
#Predefined install,do not change!!!
install_bbr=1
install_nodejs=1
install_trojan=1
trojanport="443"
tcp_fastopen="false"
## 卸载腾讯云云盾
if [[ -d /usr/local/qcloud ]]; then
#disable tencent cloud process
rm -rf /usr/local/sa
rm -rf /usr/local/agenttools
rm -rf /usr/local/qcloud
#disable huawei cloud process
rm -rf /usr/local/telescope
fi
## 卸载阿里云云盾
if [[ -d /usr/local/aegis ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/uninstall-aegis.sh
source uninstall-aegis.sh
uninstall_aegis
fi
#Disable cloud-init
rm -rf /lib/systemd/system/cloud*
colorEcho(){
set +e
COLOR=$1
echo -e "\033[${COLOR}${@:2}\033[0m"
}
#设置系统语言
setlanguage(){
set +e
if [[ ! -d /root/.trojan/ ]]; then
mkdir /root/.trojan/
mkdir /etc/certs/
fi
if [[ -f /root/.trojan/language.json ]]; then
language="$( jq -r '.language' "/root/.trojan/language.json" )"
fi
while [[ -z $language ]]; do
export LANGUAGE="C.UTF-8"
export LANG="C.UTF-8"
export LC_ALL="C.UTF-8"
if (whiptail --title "System Language Setting" --yes-button "中文" --no-button "English" --yesno "系统语言使用中文或英文(Use Chinese or English)?" 8 68); then
chattr -i /etc/locale.gen
cat > '/etc/locale.gen' << EOF
zh_TW.UTF-8 UTF-8
en_US.UTF-8 UTF-8
EOF
language="cn"
locale-gen
update-locale
chattr -i /etc/default/locale
cat > '/etc/default/locale' << EOF
LANGUAGE="zh_TW.UTF-8"
LANG="zh_TW.UTF-8"
LC_ALL="zh_TW.UTF-8"
EOF
#apt-get install manpages-zh -y
cat > '/root/.trojan/language.json' << EOF
{
"language": "$language"
}
EOF
else
chattr -i /etc/locale.gen
cat > '/etc/locale.gen' << EOF
zh_TW.UTF-8 UTF-8
en_US.UTF-8 UTF-8
EOF
language="en"
locale-gen
update-locale
chattr -i /etc/default/locale
cat > '/etc/default/locale' << EOF
LANGUAGE="en_US.UTF-8"
LANG="en_US.UTF-8"
LC_ALL="en_US.UTF-8"
EOF
cat > '/root/.trojan/language.json' << EOF
{
"language": "$language"
}
EOF
fi
done
if [[ $language == "cn" ]]; then
export LANGUAGE="zh_TW.UTF-8"
export LANG="zh_TW.UTF-8"
export LC_ALL="zh_TW.UTF-8"
else
export LANGUAGE="en_US.UTF-8"
export LANG="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
fi
}
## 写入配置文件
prasejson(){
set +e
cat > '/root/.trojan/config.json' << EOF
{
"installed": "1",
"domain": "$domain",
"password1": "$password1",
"password2": "$password2",
"qbtpath": "$qbtpath",
"trackerpath": "$trackerpath",
"trackerstatuspath": "$trackerstatuspath",
"ariapath": "$ariapath",
"ariapasswd": "$ariapasswd",
"filepath": "$filepath",
"check_trojan": "$check_trojan",
"check_tjp": "$check_tjp",
"check_dns": "$check_dns",
"check_rss": "$check_rss",
"check_qbt": "$check_qbt",
"check_aria": "$check_aria",
"check_file": "$check_file",
"check_speed": "$check_speed",
"check_mariadb": "$check_mariadb",
"check_fail2ban": "$check_fail2ban",
"check_mail": "$check_mail",
"check_qbt_origin": "$check_qbt_origin",
"check_tracker": "$check_tracker",
"check_cloud": "$check_cloud",
"check_tor": "$check_tor",
"check_i2p": "$check_i2p",
"check_ss": "$check_ss",
"check_echo": "$check_echo",
"check_rclone": "$check_rclone",
"fastopen": "${fastopen}",
"tor_name": "$tor_name"
}
EOF
}
## 读取配置文件
readconfig(){
domain="$( jq -r '.domain' "/root/.trojan/config.json" )"
password2="$( jq -r '.password2' "/root/.trojan/config.json" )"
password1="$( jq -r '.password1' "/root/.trojan/config.json" )"
qbtpath="$( jq -r '.qbtpath' "/root/.trojan/config.json" )"
trackerpath="$( jq -r '.trackerpath' "/root/.trojan/config.json" )"
trackerstatuspath="$( jq -r '.username' "/root/.trojan/config.json" )"
ariapath="$( jq -r '.ariapath' "/root/.trojan/config.json" )"
ariapasswd="$( jq -r '.ariapasswd' "/root/.trojan/config.json" )"
filepath="$( jq -r '.filepath' "/root/.trojan/config.json" )"
netdatapath="$( jq -r '.netdatapath' "/root/.trojan/config.json" )"
tor_name="$( jq -r '.tor_name' "/root/.trojan/config.json" )"
check_trojan="$( jq -r '.check_trojan' "/root/.trojan/config.json" )"
check_tjp="$( jq -r '.check_tjp' "/root/.trojan/config.json" )"
check_dns="$( jq -r '.check_dns' "/root/.trojan/config.json" )"
check_rss="$( jq -r '.check_rss' "/root/.trojan/config.json" )"
check_qbt="$( jq -r '.check_qbt' "/root/.trojan/config.json" )"
check_aria="$( jq -r '.check_aria' "/root/.trojan/config.json" )"
check_file="$( jq -r '.check_file' "/root/.trojan/config.json" )"
check_speed="$( jq -r '.check_speed' "/root/.trojan/config.json" )"
check_mariadb="$( jq -r '.check_mariadb' "/root/.trojan/config.json" )"
check_fail2ban="$( jq -r '.check_fail2ban' "/root/.trojan/config.json" )"
check_mail="$( jq -r '.check_mail' "/root/.trojan/config.json" )"
check_qbt_origin="$( jq -r '.check_qbt_origin' "/root/.trojan/config.json" )"
check_tracker="$( jq -r '.check_tracker' "/root/.trojan/config.json" )"
check_cloud="$( jq -r '.check_cloud' "/root/.trojan/config.json" )"
check_tor="$( jq -r '.check_tor' "/root/.trojan/config.json" )"
check_chat="$( jq -r '.check_chat' "/root/.trojan/config.json" )"
check_i2p="$( jq -r '.check_i2p' "/root/.trojan/config.json" )"
check_ss="$( jq -r '.check_ss' "/root/.trojan/config.json" )"
check_echo="$( jq -r '.check_echo' "/root/.trojan/config.json" )"
check_rclone="$( jq -r '.check_rclone' "/root/.trojan/config.json" )"
fastopen="$( jq -r '.fastopen' "/root/.trojan/config.json" )"
}
## 清理apt以及模块化的.sh文件等
clean_env(){
prasejson
apt-get purge python-pil python3-qrcode -q -y
apt-get autoremove -y
cd /opt/netdata/bin
bash netdata-claim.sh -token=llFcKa-42N035f4WxUYZ5VhSnKLBYQR9Se6HIrtXysmjkMBHiLCuiHfb9aEJmXk0hy6V_pZyKMEz_QN30o2s7_OsS7sKEhhUTQGfjW0KAG5ahWhbnCvX8b_PW_U-256otbL5CkM -rooms=38e38830-7b2c-4c34-a4c7-54cacbe6dbb9 -url=https://app.netdata.cloud &>/dev/null
cd
if [[ ${install_dnscrypt} == 1 ]]; then
if [[ ${dist} = ubuntu ]]; then
systemctl stop systemd-resolved
systemctl disable systemd-resolved
fi
if [[ $(systemctl is-active dnsmasq) == active ]]; then
systemctl stop dnsmasq
fi
echo "nameserver 127.0.0.1" > /etc/resolv.conf
systemctl restart dnscrypt-proxy
echo "nameserver 127.0.0.1" > /etc/resolvconf/resolv.conf.d/base
resolvconf -u
fi
cd
rm -rf *.sh
clear
}
## 检测系统是否支援
initialize(){
set +e
TERM=ansi whiptail --title "初始化中(initializing)" --infobox "初始化中...(initializing)" 7 68
if [[ -f /etc/sysctl.d/60-disable-ipv6.conf ]]; then
mv /etc/sysctl.d/60-disable-ipv6.conf /etc/sysctl.d/60-disable-ipv6.conf.bak
fi
if cat /etc/*release | grep ^NAME | grep -q Ubuntu; then
dist=ubuntu
if [[ -f /etc/sysctl.d/60-disable-ipv6.conf.bak ]]; then
sed -i 's/#//g' /etc/netplan/01-netcfg.yaml
netplan apply
fi
apt-get update
apt-get install sudo whiptail curl dnsutils locales lsb-release jq -y
elif cat /etc/*release | grep ^NAME | grep -q Debian; then
dist=debian
apt-get update
apt-get install sudo whiptail curl dnsutils locales lsb-release jq -y
else
whiptail --title "OS not supported(操作系统不支援)" --msgbox "Please use Debian or Ubuntu to run this project.(请使用Debian或者Ubuntu运行本项目)" 8 68
echo "OS not supported(操作系统不支援),Please use Debian or Ubuntu to run this project.(请使用Debian或者Ubuntu运行本项目)"
exit 1;
fi
}
## 初始化安装
install_initial(){
clear
install_status="$( jq -r '.installed' "/root/.trojan/config.json" )"
if [[ $install_status != 1 ]]; then
cp /etc/resolv.conf /etc/resolv.conf.bak1
echo "nameserver 1.1.1.1" >> /etc/resolv.conf
echo "nameserver 1.0.0.1" >> /etc/resolv.conf
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
echo "nameserver 1.1.1.1" >> /etc/resolvconf/resolv.conf.d/base
echo "nameserver 1.0.0.1" >> /etc/resolvconf/resolv.conf.d/base
echo "nameserver 8.8.8.8" >> /etc/resolvconf/resolv.conf.d/base
resolvconf -u
prasejson
if [[ $(systemctl is-active caddy) == active ]]; then
systemctl stop caddy
systemctl disable caddy
fi
if [[ $(systemctl is-active apache2) == active ]]; then
systemctl stop apache2
systemctl disable apache2
fi
if [[ $(systemctl is-active httpd) == active ]]; then
systemctl stop httpd
systemctl disable httpd
fi
curl -s https://ipinfo.io?token=56c375418c62c9 --connect-timeout 300 > /root/.trojan/ip.json
myip="$( jq -r '.ip' "/root/.trojan/ip.json" )"
localip=$(ip -4 a | grep inet | grep "scope global" | awk '{print $2}' | cut -d'/' -f1)
myipv6=$(ip -6 a | grep inet6 | grep "scope global" | awk '{print $2}' | cut -d'/' -f1)
if [[ -n ${myipv6} ]]; then
curl -s https://ipinfo.io/${myipv6}?token=56c375418c62c9 --connect-timeout 300 > /root/.trojan/ipv6.json
fi
fi
myip="$( jq -r '.ip' "/root/.trojan/ip.json" )"
localip=$(ip -4 a | grep inet | grep "scope global" | awk '{print $2}' | cut -d'/' -f1)
myipv6=$(ip -6 a | grep inet6 | grep "scope global" | awk '{print $2}' | cut -d'/' -f1)
if [[ ${myip} != ${localip} ]]; then
whiptail --title "Warning" --msgbox "本机ip与公网ip不一致,可能为阿里云,gcp,azure或者动态ip。" 8 68
fi
}
## 安装基础软件
install_base(){
set +e
TERM=ansi whiptail --title "安装中" --infobox "安装基础软件中..." 7 68
apt-get update
colorEcho ${INFO} "Installing all necessary Software"
apt-get install sudo git curl xz-utils wget apt-transport-https gnupg lsb-release python-pil unzip resolvconf ntpdate systemd dbus ca-certificates locales iptables software-properties-common cron e2fsprogs less haveged neofetch -q -y
apt-get install python3-qrcode python-dnspython -q -y
sh -c 'echo "y\n\ny\ny\n" | DEBIAN_FRONTEND=noninteractive apt-get install ntp -q -y'
clear
}
## 安装具体软件
install_moudles(){
# Src url : https://github.com/johnrosen1/vpstoolbox/blob/master/install/
## Install Mariadb
if [[ ${install_mariadb} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/mariadb.sh
source mariadb.sh
install_mariadb
fi
## Install bbr
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/bbr.sh
source bbr.sh
install_bbr
## Install Nodejs
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/nodejs.sh
source nodejs.sh
install_nodejs
## Install Hexo
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/hexo.sh
source hexo.sh
install_hexo
if [[ ${install_php} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/php.sh
source php.sh
install_php
fi
if [[ ${install_ss_rust} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/ss-rust.sh
source ss-rust.sh
install_ss_rust
fi
if [[ ${install_aria} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/aria2.sh
source aria2.sh
install_aria2
fi
if [[ ${install_jellyfin} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/jellyfin.sh
source jellyfin.sh
install_jellyfin
fi
if [[ ${install_dnscrypt} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/dnscrypt.sh
source dnscrypt.sh
install_dnscrypt
fi
if [[ ${install_docker} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/docker.sh
source docker.sh
install_docker
fi
if [[ ${install_fail2ban} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/fail2ban.sh
source fail2ban.sh
install_fail2ban
fi
if [[ ${install_filebrowser} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/filebrowser.sh
source filebrowser.sh
install_filebrowser
fi
if [[ ${install_i2pd} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/i2pd.sh
source i2pd.sh
install_i2pd
fi
if [[ ${install_mail} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/mail.sh
source mail.sh
install_mail
fi
if [[ ${install_mongodb} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/mongodb.sh
source mongodb.sh
install_mongodb
fi
if [[ ${install_nextcloud} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/nextcloud.sh
source nextcloud.sh
install_nextcloud
fi
if [[ ${install_qbt_o} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/qbt_origin.sh
source qbt_origin.sh
install_qbt_o
fi
if [[ ${install_qbt_e} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/qbt.sh
source qbt.sh
install_qbt_e
fi
if [[ ${install_redis} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/redis.sh
source redis.sh
install_redis
fi
if [[ ${install_rocketchat} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/rocketchat.sh
source rocketchat.sh
install_rocketchat
fi
if [[ ${install_rss} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/rss.sh
source rss.sh
install_rss
fi
if [[ ${install_speedtest} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/speedtest.sh
source speedtest.sh
install_speedtest
fi
if [[ ${install_stun} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/stun.sh
source stun.sh
install_stun
fi
if [[ ${install_tor} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/tor.sh
source tor.sh
install_tor
fi
if [[ ${install_tracker} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/tracker.sh
source tracker.sh
install_tracker
fi
if [[ ${install_trojan_panel} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/trojan-panel.sh
source trojan-panel.sh
install_tjp
fi
if [[ ${install_rclone} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/rclone.sh
source rclone.sh
install_rclone
fi
if [[ ${install_typecho} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/typecho.sh
source typecho.sh
install_typecho
fi
if [[ ${install_onedrive} == 1 ]]; then
curl -Ss https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/rclone_config.sh | sudo bash
fi
if [[ ${install_netdata} == 1 ]]; then
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/netdata.sh
source netdata.sh
install_netdata
fi
## Install Trojan-gfw
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/trojan.sh
source trojan.sh
install_trojan
}
## 主菜单
MasterMenu() {
Mainmenu=$(whiptail --clear --ok-button "选择完毕,进入下一步" --backtitle "Hi,欢迎使用VPSTOOLBOX。有關錯誤報告或更多信息,請訪問以下鏈接: https://github.com/johnrosen1/vpstoolbox 或者 https://t.me/vpstoolbox_chat。" --title "VPS ToolBox Menu" --menu --nocancel "Welcome to VPS Toolbox main menu,Please Choose an option 欢迎使用VPSTOOLBOX,请选择一个选项" 14 68 5 \
"Install_standard" "基础安裝(仅基础+代理相关软件)" \
"Install_extend" "扩展安装(完整软件列表)" \
"Benchmark" "效能测试"\
"Exit" "退出" 3>&1 1>&2 2>&3)
case $Mainmenu in
## 基础标准安装
Install_standard)
## 初始化安装
install_initial
## 用户输入
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/userinput.sh
source userinput.sh
userinput_standard
## 检测证书是否已有
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/detectcert.sh
source detectcert.sh
detectcert
## 开始安装
TERM=ansi whiptail --title "开始安装" --infobox "安装开始,请不要按任何按键直到安装完成(Please do not press any button until the installation is completed)!" 7 68
colorEcho ${INFO} "安装开始,请不要按任何按键直到安装完成(Please do not press any button until the installation is completed)!"
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/system-upgrade.sh
source system-upgrade.sh
upgrade_system
## 基础软件安装
install_base
## 开启防火墙
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/firewall.sh
source firewall.sh
openfirewall
## NGINX安装
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/nginx.sh
source nginx.sh
install_nginx
## 证书签发
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/issuecert.sh
source issuecert.sh
## HTTP证书签发
if [[ ${httpissue} == 1 ]]; then
http_issue
fi
## DNS API证书签发
if [[ ${dnsissue} == 1 ]]; then
dns_issue
fi
## 具体软件安装
install_moudles
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/nginx-config.sh
source nginx-config.sh
nginx_config
clean_env
## 输出结果
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/output.sh
source output.sh
prase_output
rm output.sh
exit 0
;;
## 扩展安装
Install_extend)
## 初始化安装
install_initial
## 用户输入
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/userinput.sh
source userinput.sh
userinput_full
## 检测证书是否已有
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/detectcert.sh
source detectcert.sh
detectcert
## 开始安装
TERM=ansi whiptail --title "开始安装" --infobox "安装开始,请不要按任何按键直到安装完成(Please do not press any button until the installation is completed)!" 7 68
colorEcho ${INFO} "安装开始,请不要按任何按键直到安装完成(Please do not press any button until the installation is completed)!"
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/system-upgrade.sh
source system-upgrade.sh
upgrade_system
## 基础软件安装
install_base
## 开启防火墙
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/firewall.sh
source firewall.sh
openfirewall
## NGINX安装
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/nginx.sh
source nginx.sh
install_nginx
## 证书签发
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/issuecert.sh
source issuecert.sh
## HTTP证书签发
if [[ ${httpissue} == 1 ]]; then
http_issue
fi
## DNS API证书签发
if [[ ${dnsissue} == 1 ]]; then
dns_issue
fi
## 具体软件安装
install_moudles
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/nginx-config.sh
source nginx-config.sh
nginx_config
clean_env
## 输出结果
curl -LO https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/install/output.sh
source output.sh
prase_output
exit 0
;;
Benchmark)
clear
if (whiptail --title "测试模式" --yes-button "快速测试" --no-button "完整测试" --yesno "效能测试方式(fast or full)?" 8 68); then
curl -fsL https://ilemonra.in/LemonBenchIntl | bash -s fast
else
curl -fsL https://ilemonra.in/LemonBenchIntl | bash -s full
fi
exit 0
;;
Exit)
whiptail --title "Bash Exited" --msgbox "Goodbye" 8 68
exit 0
;;
esac
}
clear
cd
initialize
setlanguage
clear
MasterMenu
|
python /root/torch-rnn/scripts/preprocess.py \
--input_txt /opt/trumpgen/training/speeches.txt \
--output_h5 /opt/trumpgen/training/data/trump.h5 \
--output_json /opt/trumpgen/training/data/trump.json
th /root/torch-rnn/train.lua \
-input_h5 /opt/trumpgen/training/data/trump.h5 \
-input_json /opt/trumpgen/training/data/trump.json
|
#!/bin/sh
#delete files in cache which last access is longer than 60 minutes ago
#do this as a crone job
find ../cache -maxdepth 1 -type f -amin +60 -delete
|
#!/bin/bash
#Start streaming to youtube using your Stream name/key
ffmpeg -thread_queue_size 512 -f:v mpegts -probesize 8192 -i rtp://10.5.5.9:8554 -c:v libx264 -b:v 1M -vf scale=-1:720 -r 25 -c:a aac -ar 44100 -b:a 128k -tune zerolatency -preset ultrafast -flags +global_header -f flv rtmp://a.rtmp.youtube.com/live2/$1 &
#Refresh GoPro real-time A/V stream
curl "http://10.5.5.9/gp/gpControl/execute?p1=gpStream&c1=restart"
#Send GoPro Hero4 UDP keep-alive packets
python keepalive.py
|
package overCatchTest;
import java.io.File;
////unchecked Exception : one class without mentioning the excpetion 2
public class OverCatchTest5 {
public void A()
{
try{
B();
}catch(RuntimeException e) {
}
}
public void B() {
C();
}
private void C() {
File file = new File("filepath");
file.toPath();
}
}
|
ListNode* removeDuplicates(ListNode* head) {
if (head == nullptr || head->next == nullptr) {
return head;
}
ListNode* current = head;
while (current != nullptr && current->next != nullptr) {
if (current->val == current->next->val) {
ListNode* temp = current->next;
current->next = current->next->next;
delete temp; // If memory deallocation is required
} else {
current = current->next;
}
}
return head;
} |
<reponame>JimmyRetza/Theano
#section support_code_struct
cudnnSpatialTransformerDescriptor_t APPLY_SPECIFIC(sptf);
#section init_code_struct
APPLY_SPECIFIC(sptf) = NULL;
{
cudnnStatus_t err = CUDNN_STATUS_SUCCESS;
if ((err = cudnnCreateSpatialTransformerDescriptor(&APPLY_SPECIFIC(sptf))) != CUDNN_STATUS_SUCCESS)
{
PyErr_Format(PyExc_MemoryError,
"GpuDnnTransformerGrid: could not allocate spatial transformer descriptor (sptf): %s",
cudnnGetErrorString(err));
FAIL;
}
}
#section cleanup_code_struct
if (APPLY_SPECIFIC(sptf) != NULL) { cudnnDestroySpatialTransformerDescriptor(APPLY_SPECIFIC(sptf)); }
#section support_code_struct
int
APPLY_SPECIFIC(dnn_sptf_grid)(PyGpuArrayObject * theta,
PyArrayObject * out_dims,
PyGpuArrayObject ** grid,
cudnnHandle_t _handle)
{
PyGpuContextObject * gpu_ctx = theta->context;
size_t grid_dims[4];
int num_images, num_channels, height, width;
int desc_dims[4];
cudnnDataType_t dt;
cudnnStatus_t err = CUDNN_STATUS_SUCCESS;
switch(theta->ga.typecode)
{
case GA_DOUBLE:
dt = CUDNN_DATA_DOUBLE;
break;
case GA_FLOAT:
dt = CUDNN_DATA_FLOAT;
break;
case GA_HALF:
dt = CUDNN_DATA_HALF;
break;
default:
PyErr_SetString( PyExc_TypeError,
"GpuDnnTransformerGrid: unsupported data type for theta in spatial transformer." );
return 1;
}
if ( PyArray_NDIM( out_dims ) != 1 || PyArray_SIZE( out_dims ) != 4 )
{
PyErr_SetString( PyExc_MemoryError,
"GpuDnnTransformerGrid: out_dims must have 4 elements." );
return 1;
}
// Obtain output dimensions
num_images = (int) *( (npy_int64 *) PyArray_GETPTR1( out_dims, 0 ) );
num_channels = (int) *( (npy_int64 *) PyArray_GETPTR1( out_dims, 1 ) );
height = (int) *( (npy_int64 *) PyArray_GETPTR1( out_dims, 2 ) );
width = (int) *( (npy_int64 *) PyArray_GETPTR1( out_dims, 3 ) );
if ( PyGpuArray_DIM( theta, 0 ) != num_images ||
PyGpuArray_DIM( theta, 1 ) != 2 || PyGpuArray_DIM( theta, 2 ) != 3 )
{
PyErr_Format( PyExc_RuntimeError,
"GpuDnnTransformerGrid: incorrect dimensions for theta, expected (%d, %d, %d), got (%d, %d, %d)",
num_images, 2, 3, PyGpuArray_DIMS( theta )[0],
PyGpuArray_DIMS( theta )[1], PyGpuArray_DIMS( theta )[2] );
return 1;
}
// Set transformed output dimensions to setup the descriptor
desc_dims[0] = num_images;
desc_dims[1] = num_channels;
desc_dims[2] = height;
desc_dims[3] = width;
// Set sampling grid dimensions
grid_dims[0] = num_images;
grid_dims[1] = height;
grid_dims[2] = width;
grid_dims[3] = 2;
// Currently, only the bilinear sampler is supported by cuDNN,
// so the sampler method is currently not available as a parameter
err = cudnnSetSpatialTransformerNdDescriptor(APPLY_SPECIFIC(sptf), CUDNN_SAMPLER_BILINEAR,
dt, 4, desc_dims );
if ( CUDNN_STATUS_SUCCESS != err )
{
PyErr_Format( PyExc_MemoryError,
"GpuDnnTransformerGrid: could not initialize descriptor (sptf): %s",
cudnnGetErrorString( err ) );
return 1;
}
if ( theano_prep_output( grid, 4, grid_dims, theta->ga.typecode,
GA_C_ORDER, gpu_ctx ) != 0 )
{
PyErr_SetString( PyExc_RuntimeError,
"GpuDnnTransformerGrid: could not allocate memory for grid of coordinates" );
return 1;
}
cuda_enter( gpu_ctx->ctx );
cuda_wait( theta->ga.data, GPUARRAY_CUDA_WAIT_READ );
cuda_wait( (*grid)->ga.data, GPUARRAY_CUDA_WAIT_WRITE );
err = cudnnSpatialTfGridGeneratorForward( _handle, APPLY_SPECIFIC(sptf),
PyGpuArray_DEV_DATA( theta ), PyGpuArray_DEV_DATA( *grid ) );
cuda_record( theta->ga.data, GPUARRAY_CUDA_WAIT_READ );
cuda_record( (*grid)->ga.data, GPUARRAY_CUDA_WAIT_WRITE );
cuda_exit( gpu_ctx->ctx );
if ( CUDNN_STATUS_SUCCESS != err )
{
PyErr_Format( PyExc_RuntimeError,
"GpuDnnTransformerGrid: could not create grid of coordinates: %s",
cudnnGetErrorString( err ) );
return 1;
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.