text stringlengths 1 1.05M |
|---|
// Copyright (c) 2015-2016, ETH Zurich, <NAME>, Zurich Eye
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the ETH Zurich, Wyss Zurich, Zurich Eye nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL ETH Zurich, Wyss Zurich, Zurich Eye BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <numeric>
#include <vector>
#include <ze/common/types.hpp>
#include <ze/common/logging.hpp>
//! @file stl_utils.hpp
//! Various utilities to work with the standard template library.
namespace ze {
// -----------------------------------------------------------------------------
//! Transform Eigen::Vector to std::vector.
template <typename DerivedVec>
std::vector<typename DerivedVec::Scalar> eigenVectorToStlVector(
const Eigen::MatrixBase<DerivedVec>& v)
{
//! @todo: both data is continuous, can we do this more efficiently?
EIGEN_STATIC_ASSERT_VECTOR_ONLY(DerivedVec);
std::vector<typename DerivedVec::Scalar> rv(v.size());
for(int i = 0; i < v.size(); ++i)
{
rv[i] = v(i);
}
return rv;
}
// -----------------------------------------------------------------------------
//! @return Returns a vector of indices form start to stop.
inline std::vector<uint32_t> range(uint32_t start, uint32_t stop)
{
DEBUG_CHECK_GE(stop, start);
std::vector<uint32_t> vec(stop - start);
std::iota(vec.begin(), vec.end(), start);
return vec;
}
// -----------------------------------------------------------------------------
//! @return Returns a vector of indices form 0 to stop.
inline std::vector<uint32_t> range(uint32_t stop)
{
return range(0u, stop);
}
} // namespace ze
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sshd.client.session;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.rmi.RemoteException;
import java.rmi.ServerException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.sshd.client.SshClient;
import org.apache.sshd.client.future.AuthFuture;
import org.apache.sshd.common.AttributeRepository;
import org.apache.sshd.common.AttributeRepository.AttributeKey;
import org.apache.sshd.common.session.Session;
import org.apache.sshd.common.session.SessionListener;
import org.apache.sshd.core.CoreModuleProperties;
import org.apache.sshd.server.SshServer;
import org.apache.sshd.server.auth.keyboard.KeyboardInteractiveAuthenticator;
import org.apache.sshd.server.auth.pubkey.AcceptAllPublickeyAuthenticator;
import org.apache.sshd.util.test.BaseTestSupport;
import org.apache.sshd.util.test.BogusPasswordAuthenticator;
import org.apache.sshd.util.test.CommandExecutionHelper;
import org.apache.sshd.util.test.CoreTestSupportUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runners.MethodSorters;
/**
* @author <a href="mailto:<EMAIL>">Apache MINA SSHD Project</a>
*/
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
public class ClientSessionTest extends BaseTestSupport {
private static SshServer sshd;
private static SshClient client;
private static int port;
public ClientSessionTest() {
super();
}
@BeforeClass
public static void setupClientAndServer() throws Exception {
sshd = CoreTestSupportUtils.setupTestServer(ClientSessionTest.class);
sshd.start();
port = sshd.getPort();
client = CoreTestSupportUtils.setupTestClient(ClientSessionTest.class);
client.start();
}
@AfterClass
public static void tearDownClientAndServer() throws Exception {
if (sshd != null) {
try {
sshd.stop(true);
} finally {
sshd = null;
}
}
if (client != null) {
try {
client.stop();
} finally {
client = null;
}
}
}
@Before
public void setUp() {
sshd.setPasswordAuthenticator(BogusPasswordAuthenticator.INSTANCE);
sshd.setPublickeyAuthenticator(AcceptAllPublickeyAuthenticator.INSTANCE);
sshd.setKeyboardInteractiveAuthenticator(KeyboardInteractiveAuthenticator.NONE);
}
@Test
public void testDefaultExecuteCommandMethod() throws Exception {
String expectedCommand = getCurrentTestName() + "-CMD";
String expectedResponse = getCurrentTestName() + "-RSP";
sshd.setCommandFactory((session, command) -> new CommandExecutionHelper(command) {
private boolean cmdProcessed;
@Override
protected boolean handleCommandLine(String command) throws Exception {
assertEquals("Mismatched incoming command", expectedCommand, command);
assertFalse("Duplicated command call", cmdProcessed);
OutputStream stdout = getOutputStream();
stdout.write(expectedResponse.getBytes(StandardCharsets.US_ASCII));
stdout.flush();
cmdProcessed = true;
return false;
}
});
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
session.auth().verify(AUTH_TIMEOUT);
// NOTE !!! The LF is only because we are using a buffered reader on the server end to read the command
String actualResponse = session.executeRemoteCommand(expectedCommand + "\n");
assertEquals("Mismatched command response", expectedResponse, actualResponse);
}
}
@Test
public void testExceptionThrownIfRemoteStderrWrittenTo() throws Exception {
String expectedCommand = getCurrentTestName() + "-CMD";
String expectedErrorMessage = getCurrentTestName() + "-ERR";
sshd.setCommandFactory((session, command) -> new CommandExecutionHelper(command) {
private boolean cmdProcessed;
@Override
protected boolean handleCommandLine(String command) throws Exception {
assertEquals("Mismatched incoming command", expectedCommand, command);
assertFalse("Duplicated command call", cmdProcessed);
OutputStream stderr = getErrorStream();
stderr.write(expectedErrorMessage.getBytes(StandardCharsets.US_ASCII));
stderr.flush();
cmdProcessed = true;
return false;
}
});
String actualErrorMessage = null;
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
session.auth().verify(AUTH_TIMEOUT);
// NOTE !!! The LF is only because we are using a buffered reader on the server end to read the command
String response = session.executeRemoteCommand(expectedCommand + "\n");
fail("Unexpected successful response: " + response);
} catch (Exception e) {
if (!(e instanceof RemoteException)) {
throw e;
}
Throwable cause = e.getCause();
if (!(cause instanceof ServerException)) {
throw e;
}
actualErrorMessage = cause.getMessage();
}
assertEquals("Mismatched captured error message", expectedErrorMessage, actualErrorMessage);
}
@Test
public void testExceptionThrownIfNonZeroExitStatus() throws Exception {
String expectedCommand = getCurrentTestName() + "-CMD";
int expectedErrorCode = 7365;
sshd.setCommandFactory((session, command) -> new CommandExecutionHelper(command) {
private boolean cmdProcessed;
@Override
protected void onExit(int exitValue, String exitMessage) {
super.onExit((exitValue == 0) ? expectedErrorCode : exitValue, exitMessage);
}
@Override
protected boolean handleCommandLine(String command) throws Exception {
assertEquals("Mismatched incoming command", expectedCommand, command);
assertFalse("Duplicated command call", cmdProcessed);
OutputStream stdout = getOutputStream();
stdout.write(command.getBytes(StandardCharsets.US_ASCII));
stdout.flush();
cmdProcessed = true;
return false;
}
});
String actualErrorMessage = null;
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
session.auth().verify(AUTH_TIMEOUT);
// NOTE !!! The LF is only because we are using a buffered reader on the server end to read the command
String response = session.executeRemoteCommand(expectedCommand + "\n");
fail("Unexpected successful response: " + response);
} catch (Exception e) {
if (!(e instanceof RemoteException)) {
throw e;
}
Throwable cause = e.getCause();
if (!(cause instanceof ServerException)) {
throw e;
}
actualErrorMessage = cause.getMessage();
}
assertEquals("Mismatched captured error code", Integer.toString(expectedErrorCode), actualErrorMessage);
}
@Test // see SSHD-859
public void testConnectionContextPropagation() throws Exception {
AttributeRepository expected = AttributeRepository.ofKeyValuePair(
new AttributeKey<String>(), getCurrentTestName());
AtomicInteger creationCount = new AtomicInteger(0);
SessionListener listener = new SessionListener() {
@Override
public void sessionCreated(Session session) {
AttributeRepository actual = ((ClientSession) session).getConnectionContext();
assertSame("Mismatched connection context", expected, actual);
creationCount.incrementAndGet();
}
};
try {
client.addSessionListener(listener);
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port, expected)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(<PASSWORD>());
session.auth().verify(AUTH_TIMEOUT);
assertEquals("Session listener invocation count mismatch", 1, creationCount.getAndSet(0));
}
} finally {
client.removeSessionListener(listener);
}
}
@Test // SSHD-1050
public void testAuthGetsNotifiedIfErrorBeforeFirstAuth() throws Exception {
testEarlyErrorAuthAttempts(1);
}
@Test // SSHD-1050
public void testSecondAuthNotifiedAfterEarlyError() throws Exception {
testEarlyErrorAuthAttempts(3);
}
private void testEarlyErrorAuthAttempts(int maxAttempts) throws Exception {
int limit = CoreModuleProperties.MAX_IDENTIFICATION_SIZE.getRequired(sshd);
String line = getClass().getCanonicalName() + "#" + getCurrentTestName();
StringBuilder sb = new StringBuilder(limit + line.length());
while (sb.length() <= limit) {
if (sb.length() > 0) {
sb.append(CoreModuleProperties.SERVER_EXTRA_IDENT_LINES_SEPARATOR);
}
sb.append(line);
}
CoreModuleProperties.SERVER_EXTRA_IDENTIFICATION_LINES.set(sshd, sb.toString());
try (ClientSession session = client.connect(getCurrentTestName(), TEST_LOCALHOST, port)
.verify(CONNECT_TIMEOUT)
.getSession()) {
session.addPasswordIdentity(getCurrentTestName());
// Give time to the client to signal the overflow in server identification
Thread.sleep(AUTH_TIMEOUT.toMillis() / 2L);
for (int index = 1; index <= maxAttempts; index++) {
String authId = "Auth " + index + "/" + maxAttempts;
outputDebugMessage("%s(%s)", getCurrentTestName(), authId);
AuthFuture future = session.auth();
assertTrue(authId + " not completed on time", future.await(AUTH_TIMEOUT));
assertTrue(authId + " has no result", future.isDone());
assertFalse(authId + " unexpected success", future.isSuccess());
assertTrue(authId + " not marked as failed", future.isFailure());
Throwable exception = future.getException();
String message = exception.getMessage();
assertTrue(authId + " invalid exception message: " + message, message.contains("too many header lines"));
}
} finally {
CoreModuleProperties.SERVER_EXTRA_IDENTIFICATION_LINES.set(sshd, null);
}
}
}
|
/**
* Zen
*/
// environment
var env = process.env.NODE_ENV || 'development';
/**
* Log error using console.error.
*/
function logerror(err){
if (env !== 'test') console.error("Error:",err.stack || err.toString());
}
/**
* Default error handler
*/
var errorHandler = function error(/*args,*/ /*err*/) {
var err=(arguments.length >0)?arguments[arguments.length-1]:null;
if (typeof err == "undefined" || !err)
return logerror("This is the end, with no result given");
logerror(err);
return;
};
/**
* Default result handler
*/
var resultHandler = function result(/*args,*/ /*res*/) {
var res=(arguments.length >0)?arguments[arguments.length-1]:null;
if (res)
console.log(res);
return;
};
/**
* Zen uses a 'setup' pattern, returning a callable engine function
*/
module.exports= function (/*handlers*/) {
/**
* Default handler delegates to the error handler
*/
var defaultHandler = function(/*args*/) {
var args= Array.prototype.slice.call(arguments);
try {
args[args.length-1]=undefined;//err
return engine.errorHandler.apply(this,args);
} catch (ex) {
args[args.length-1]=ex;//err
return errorHandler.apply(this,args);
}
};
var handlers=Array.prototype.slice.call(arguments);
handlers.push(defaultHandler);
var _engineRequests=[];
var _enginePaused=false;
var _engineStopped=false;
var firstM=handlers[0];
// The real Zen Engine
var engine= function (/*handleArgs*/) {
var handleArgs=Array.prototype.slice.call(arguments);
var handleArgsLength=handleArgs.length;
if (_enginePaused || _engineStopped){
if (_enginePaused) {_engineRequests.push(handleArgs);return;}
handleArgs.push(undefined); return defaultHandler.apply(this,handleArgs);
}
var i=1;
var self=this;
try {
//handler optimization
var handle;
var ha0,ha1,ha2,ha3;
switch (handleArgsLength) {
// faster
case 0:
handle= function(_handler){
return _handler.call(self,next);
};
break;
case 1:
ha0=handleArgs[0];
handle= function(_handler){
return _handler.call(self,ha0,next);
};
break;
case 2:
ha0=handleArgs[0];ha1=handleArgs[1];
handle= function(_handler){
return _handler.call(self,ha0,ha1, next);
};
break;
case 3:
ha0=handleArgs[0];ha1=handleArgs[1];ha2=handleArgs[2];
handle= function(_handler){
return _handler.call(self,ha0,ha1,ha2,next);
};
break;
case 4:
ha0=handleArgs[0];ha1=handleArgs[1];ha2=handleArgs[2];ha3=handleArgs[3];
handle= function(_handler){
return _handler.call(self,ha0,ha1,ha2,ha3, next);
};
break;
// slower
default:
handle= function(_handler){
return _handler.apply(self, handleArgs);
}
break;
}
function next (err,res) {
if(res || err){
if (res) {
handleArgs[handleArgsLength]=res;
return engine.resultHandler.apply(this,handleArgs);
}
handleArgs[handleArgsLength]=err;
return engine.errorHandler.apply(this,handleArgs);
}
return handle(handlers[i++]);
}
handleArgs.push(next);
return handle(firstM);
} catch (err) {
try {
handleArgs[handleArgsLength]=err;
return engine.errorHandler.apply(this,handleArgs);
} catch (ex) {
handleArgs[handleArgsLength]=ex;
return errorHandler.apply(this,handleArgs);
}
}
}
/*if (handlers.length==1)
engine=defaultHandler; //default */
engine.errorHandler = errorHandler;
engine.resultHandler = resultHandler;
/*EXTRA FEATURE*/
engine.pause=function (){_enginePaused=true;};
engine.stop =function (){_engineStopped=true;};
engine.resume=function(){
_enginePaused=_engineStopped=false;
var requests= Array.prototype.slice.call(_engineRequests);
_engineRequests=[];
var request=requests.shift();
while(typeof request !=='undefined'){
engine.apply(this,request);
request=requests.shift();
}
};
return engine;
};
|
#!/bin/bash
set -e
# Add the path of habitat to PATH
PATH=$PATH:/opt/sd/bin
# Install kmod
if ! [ -e /bin/kmod ]; then
hab pkg install core/kmod
hab pkg binlink core/kmod kmod
ln -sf kmod /bin/lsmod
ln -sf kmod /bin/modprobe
fi
# Install iptables which is needed for dockerd
if ! [ -e /bin/iptables ]; then
hab pkg install core/iptables
hab pkg binlink core/iptables iptables
# Load ip_tables
modprobe ip_tables
fi
if ! [ -e /bin/docker ]; then
# Install docker and symlink
hab pkg install core/docker
hab pkg binlink core/docker docker
hab pkg binlink core/docker dockerd
hab pkg binlink core/docker docker-containerd
hab pkg binlink core/docker docker-init
hab pkg binlink core/docker docker-runc
hab pkg binlink core/docker docker-containerd-shim
hab pkg binlink core/docker docker-containerd-ctr
hab pkg binlink core/docker docker-proxy
fi
# Mount cgroup
mkdir -p /cgroup/devices
mount -t cgroup -o devices devices /cgroup/devices
# Start dockerd
dockerd &
|
<filename>src/main/java/com/modesteam/urutau/model/Epic.java
package com.modesteam.urutau.model;
import javax.persistence.Entity;
@Entity
public class Epic extends Requirement {
private String content;
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
}
|
public TimeSpan GetServerPageTimeLimit(ClassInstance classInstance)
{
try {
JCObject val = (JCObject)classInstance.Get("ServerPageTimeLimit");
return new TimeSpan(val);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
} |
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-N/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-N/512+512+512-shuffled-N-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_remove_all_but_nouns_first_third_full --eval_function last_element_eval |
public class DelegateManager
{
public void ExecuteAction(ActionCall action)
{
action();
}
public void ExecuteAction<T1>(ActionCall<T1> action, T1 arg1)
{
action(arg1);
}
public void ExecuteAction<T1, T2>(ActionCall<T1, T2> action, T1 arg1, T2 arg2)
{
action(arg1, arg2);
}
public void ExecuteAction<T1, T2, T3>(ActionCall<T1, T2, T3> action, T1 arg1, T2 arg2, T3 arg3)
{
action(arg1, arg2, arg3);
}
public void ExecuteAction<T1, T2, T3, T4>(ActionCall<T1, T2, T3, T4> action, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
action(arg1, arg2, arg3, arg4);
}
public TResult ExecuteFunction<TResult>(FunctionCall<TResult> function)
{
return function();
}
public TResult ExecuteFunction<T1, TResult>(FunctionCall<T1, TResult> function, T1 arg1)
{
return function(arg1);
}
public TResult ExecuteFunction<T1, T2, TResult>(FunctionCall<T1, T2, TResult> function, T1 arg1, T2 arg2)
{
return function(arg1, arg2);
}
public TResult ExecuteFunction<T1, T2, T3, TResult>(FunctionCall<T1, T2, T3, TResult> function, T1 arg1, T2 arg2, T3 arg3)
{
return function(arg1, arg2, arg3);
}
public TResult ExecuteFunction<T1, T2, T3, T4, TResult>(FunctionCall<T1, T2, T3, T4, TResult> function, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
return function(arg1, arg2, arg3, arg4);
}
} |
#!/bin/ksh
#remove result files
cat values.txt | awk '{print $2}' | xargs -t -I{} sh -c 'rm -fr work/{}/*/*/*.nwk'
cat values.txt | awk '{print $2}' | xargs -t -I{} sh -c 'rm -fr work/{}/*/*/*.r8s'
cat values.txt | awk '{print $2}' | xargs -t -I{} sh -c 'rm -fr work/{}/*/*/*.cfg'
|
import React from 'react'
class DateComponent extends React.Component {
render() {
const today = new Date();
const month = today.toLocaleString('default', { month: 'long' });
const day = today.getDate();
const year = today.getFullYear();
return <span>{`${month} ${day}, ${year}`}</span>
}
} |
<filename>app/src/main/java/test/singleton/TestOfSingleton3.java
package test.singleton;
/**
* @Class: TestOfSingleton3
* @Description: java类作用描述
* @Author: hubohua
* @CreateDate: 2018/8/28
*/
/**
* 单例模式-懒汉模式2
* 我们使用synchronized关键字对getInstance方法进行同步。
* 但是缺点就是效率太低,是同步运行的,下个线程想要取得对象,
* 就必须要等上一个线程释放,才可以继续执行。
*/
public class TestOfSingleton3 {
private static TestOfSingleton3 singleton3;
private TestOfSingleton3() {
System.out.println("TestOfSingleton3 constructor on thread: " +
Thread.currentThread().getName());
}
private static synchronized TestOfSingleton3 getInstance() {
if (singleton3 == null) {
System.out.println("TestOfSingleton3 getInstance on thread: " +
Thread.currentThread().getName());
try {
Thread.sleep(2000l);
} catch (InterruptedException e) {
e.printStackTrace();
}
singleton3 = new TestOfSingleton3();
}
return singleton3;
}
public static void main(String[] args) {
// 1. 创建多条线程调用单例模式,打印结果:
// TestOfSingleton3 getInstance on thread: Thread-1
// TestOfSingleton3 constructor on thread: Thread-1
// singleton obj: 1564989180, on thread: Thread-1
// singleton obj: 1564989180, on thread: Thread-8
// singleton obj: 1564989180, on thread: Thread-9
// singleton obj: 1564989180, on thread: Thread-6
// singleton obj: 1564989180, on thread: Thread-3
// singleton obj: 1564989180, on thread: Thread-7
// singleton obj: 1564989180, on thread: Thread-0
// singleton obj: 1564989180, on thread: Thread-2
// singleton obj: 1564989180, on thread: Thread-4
// singleton obj: 1564989180, on thread: Thread-5
// 通过上述结果可以得知,使用synchronized关键字修饰的
// 静态方法,能够保证当前单例对象的创建过程是线程安全的。
for (int i = 0; i < 10; i ++) {
new PrintThread().start();
}
}
static class PrintThread extends Thread {
@Override
public void run() {
System.out.println("singleton obj: " +
TestOfSingleton3.getInstance().hashCode() + ", on thread: " +
Thread.currentThread().getName());
}
}
}
|
package com.dongql.mybatis.tenant.enums;
import com.dongql.mybatis.tenant.enums.base.BaseEntityEnum;
public enum VipLevel implements BaseEntityEnum {
NORMAL(1, "普通"),
GOLD(2, "黄金"),
DIAMOND(3, "钻石");
private int code;
private String description;
VipLevel(int code, String description) {
this.code = code;
this.description = description;
}
public int getCode() {
return code;
}
public String getDescription() {
return description;
}
@Override
public int getIntValue() {
return this.code;
}
}
|
# Turn on "strict mode." See http://redsymbol.net/articles/unofficial-bash-strict-mode/.
# -e: exit if any command unexpectedly fails.
# -u: exit if we have a variable typo.
# -o pipefail: don't ignore errors in the non-last command in a pipeline
set -euo pipefail
function hide_output {
# This function hides the output of a command unless the command fails
# and returns a non-zero exit code.
# Get a temporary file.
OUTPUT=$(mktemp)
# Execute command, redirecting stderr/stdout to the temporary file. Since we
# check the return code ourselves, disable 'set -e' temporarily.
set +e
"$@" &> $OUTPUT
E=$?
set -e
# If the command failed, show the output that was captured in the temporary file.
if [ $E != 0 ]; then
# Something failed.
echo
echo FAILED: "$@"
echo -----------------------------------------
cat $OUTPUT
echo -----------------------------------------
exit $E
fi
# Remove temporary file.
rm -f $OUTPUT
}
function apt_get_quiet {
# Run apt-get in a totally non-interactive mode.
#
# Somehow all of these options are needed to get it to not ask the user
# questions about a) whether to proceed (-y), b) package options (noninteractive),
# and c) what to do about files changed locally (we don't cause that to happen but
# some VM providers muck with their images; -o).
#
# Although we could pass -qq to apt-get to make output quieter, many packages write to stdout
# and stderr things that aren't really important. Use our hide_output function to capture
# all of that and only show it if there is a problem (i.e. if apt_get returns a failure exit status).
DEBIAN_FRONTEND=noninteractive hide_output apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confnew" "$@"
}
function apt_install {
# Install a bunch of packages. We used to report which packages were already
# installed and which needed installing, before just running an 'apt-get
# install' for all of the packages. Calling `dpkg` on each package is slow,
# and doesn't affect what we actually do, except in the messages, so let's
# not do that anymore.
apt_get_quiet install "$@"
}
function get_default_hostname {
# Guess the machine's hostname. It should be a fully qualified
# domain name suitable for DNS. None of these calls may provide
# the right value, but it's the best guess we can make.
set -- $(hostname --fqdn 2>/dev/null ||
hostname --all-fqdns 2>/dev/null ||
hostname 2>/dev/null)
printf '%s\n' "$1" # return this value
}
function get_publicip_from_web_service {
# This seems to be the most reliable way to determine the
# machine's public IP address: asking a very nice web API
# for how they see us. Thanks go out to icanhazip.com.
# See: https://major.io/icanhazip-com-faq/
#
# Pass '4' or '6' as an argument to this function to specify
# what type of address to get (IPv4, IPv6).
curl -$1 --fail --silent --max-time 15 icanhazip.com 2>/dev/null || /bin/true
}
function get_default_privateip {
# Return the IP address of the network interface connected
# to the Internet.
#
# Pass '4' or '6' as an argument to this function to specify
# what type of address to get (IPv4, IPv6).
#
# We used to use `hostname -I` and then filter for either
# IPv4 or IPv6 addresses. However if there are multiple
# network interfaces on the machine, not all may be for
# reaching the Internet.
#
# Instead use `ip route get` which asks the kernel to use
# the system's routes to select which interface would be
# used to reach a public address. We'll use 8.8.8.8 as
# the destination. It happens to be Google Public DNS, but
# no connection is made. We're just seeing how the box
# would connect to it. There many be multiple IP addresses
# assigned to an interface. `ip route get` reports the
# preferred. That's good enough for us. See issue #121.
#
# With IPv6, the best route may be via an interface that
# only has a link-local address (fe80::*). These addresses
# are only unique to an interface and so need an explicit
# interface specification in order to use them with bind().
# In these cases, we append "%interface" to the address.
# See the Notes section in the man page for getaddrinfo and
# https://discourse.mailinabox.email/t/update-broke-mailinabox/34/9.
#
# Also see ae67409603c49b7fa73c227449264ddd10aae6a9 and
# issue #3 for why/how we originally added IPv6.
target=8.8.8.8
# For the IPv6 route, use the corresponding IPv6 address
# of Google Public DNS. Again, it doesn't matter so long
# as it's an address on the public Internet.
if [ "$1" == "6" ]; then target=2001:4860:4860::8888; fi
# Get the route information.
route=$(ip -$1 -o route get $target 2>/dev/null | grep -v unreachable)
# Parse the address out of the route information.
address=$(echo $route | sed "s/.* src \([^ ]*\).*/\1/")
if [[ "$1" == "6" && $address == fe80:* ]]; then
# For IPv6 link-local addresses, parse the interface out
# of the route information and append it with a '%'.
interface=$(echo $route | sed "s/.* dev \([^ ]*\).*/\1/")
address=$address%$interface
fi
echo $address
}
function ufw_allow {
if [ -z "${DISABLE_FIREWALL:-}" ]; then
# ufw has completely unhelpful output
ufw allow "$1" > /dev/null;
fi
}
function ufw_limit {
if [ -z "${DISABLE_FIREWALL:-}" ]; then
# ufw has completely unhelpful output
ufw limit "$1" > /dev/null;
fi
}
function restart_service {
hide_output service $1 restart
}
## Dialog Functions ##
function message_box {
dialog --title "$1" --msgbox "$2" 0 0
}
function input_box {
# input_box "title" "prompt" "defaultvalue" VARIABLE
# The user's input will be stored in the variable VARIABLE.
# The exit code from dialog will be stored in VARIABLE_EXITCODE.
# Temporarily turn off 'set -e' because we need the dialog return code.
declare -n result=$4
declare -n result_code=$4_EXITCODE
set +e
result=$(dialog --stdout --title "$1" --inputbox "$2" 0 0 "$3")
result_code=$?
set -e
}
function input_menu {
# input_menu "title" "prompt" "tag item tag item" VARIABLE
# The user's input will be stored in the variable VARIABLE.
# The exit code from dialog will be stored in VARIABLE_EXITCODE.
declare -n result=$4
declare -n result_code=$4_EXITCODE
local IFS=^$'\n'
set +e
result=$(dialog --stdout --title "$1" --menu "$2" 0 0 0 $3)
result_code=$?
set -e
}
function wget_verify {
# Downloads a file from the web and checks that it matches
# a provided hash. If the comparison fails, exit immediately.
URL=$1
HASH=$2
DEST=$3
CHECKSUM="$HASH $DEST"
rm -f $DEST
hide_output wget -O $DEST $URL
if ! echo "$CHECKSUM" | sha1sum --check --strict > /dev/null; then
echo "------------------------------------------------------------"
echo "Download of $URL did not match expected checksum."
echo "Found:"
sha1sum $DEST
echo
echo "Expected:"
echo "$CHECKSUM"
rm -f $DEST
exit 1
fi
}
function git_clone {
# Clones a git repository, checks out a particular commit or tag,
# and moves the repository (or a subdirectory in it) to some path.
# We use separate clone and checkout because -b only supports tags
# and branches, but we sometimes want to reference a commit hash
# directly when the repo doesn't provide a tag.
REPO=$1
TREEISH=$2
SUBDIR=$3
TARGETPATH=$4
TMPPATH=/tmp/git-clone-$$
rm -rf $TMPPATH $TARGETPATH
git clone -q $REPO $TMPPATH || exit 1
(cd $TMPPATH; git checkout -q $TREEISH;) || exit 1
mv $TMPPATH/$SUBDIR $TARGETPATH
rm -rf $TMPPATH
}
function php_version {
php --version | head -n 1 | cut -d " " -f 2 | cut -c 1-3
}
function python_version {
python3 --version | cut -d " " -f 2 | cut -c 1-3
}
export OS_UNSUPPORTED=0
export OS_DEBIAN_10=1
export OS_UBUNTU_2004=2
function get_os_code {
# A lot of if-statements here - dirty code looking tasting today
ID=$(lsb_release -is)
VER=$(lsb_release -rs)
if [[ $ID == "Debian" ]]; then
if [[ $VER == "10" ]]; then
echo $OS_DEBIAN_10
return 0
fi
elif [[ $ID == "Ubuntu" ]]; then
if [[ $VER == "20.04" ]]; then
echo $OS_UBUNTU_2004
return 0
fi
fi
echo $OS_UNSUPPORTED
}
function valid_ip_v4()
{
local ip=$1
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
} |
def levenshtein_distance(str1, str2):
m = len(str1)
n = len(str2)
dp = [[0 for x in range(n+1)] for x in range(m+1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif str1[i-1] == str2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = 1 + min(dp[i][j-1],
dp[i-1][j],
dp[i-1][j-1])
return dp[m][n] |
<filename>test/index.test.ts
import request from 'supertest'
import mongoose from 'mongoose'
import app from '../src/app'
const api = request(app)
const initialUsers = [
{
username: 'MrKrrot',
password: <PASSWORD>',
name: '<NAME>',
},
{
username: 'Oddy',
password: <PASSWORD>',
name: '<NAME>',
},
]
describe('register', () => {
test('with valid credentials', async () => {
await api
.post('/register')
.send({ username: 'MrKrrot', name: 'Rafael Olguin', password: '<PASSWORD>' })
.expect(201)
})
})
describe('login', () => {
test('with empty credentials', async () => {
await api
.post('/login')
.expect(400)
.expect('Content-Type', /application\/json/)
})
test('with valid credentials', async () => {
const response = await api.post('/login').send({ username: 'MrKrrot', password: '<PASSWORD>' })
expect(response.body.username).toBeDefined()
expect(response.body.name).toBeDefined()
expect(response.body.token).toBeDefined()
expect(200)
})
})
afterEach(() => {
//mongoose.connection.close()
})
afterAll(() => {
mongoose.connection.close()
app.listen().close()
})
|
<filename>client/components/Default.js
import React from 'react';
import styled from 'styled-components';
const Default = () => {
return (
<>
<DefaultDiv>
<div className="container">
<div className="row">
<div className="col">One of three columns</div>
<div className="col">One of three columns</div>
<div className="col">One of three columns</div>
</div>
</div>
</DefaultDiv>
</>
);
};
export default Default;
const DefaultDiv = styled.div`
font-size: 16px;
color: #000000;
`;
|
import React from 'react';
import Container from '@material-ui/core/Container';
function TemplateFooterBar(){
return(
<div style={{backgroundColor: "#eee", padding: 10, textAlign: 'center'}}>
<Container>
<small style={{fontFamily: 'arial'}}>Fans - Copyright @ 2019 - `R</small>
</Container>
</div>
);
}
export default TemplateFooterBar; |
#!/bin/bash
cd ~
echo "stopping hsd and hscli"
pkill hsd
pkill hscli
sleep 3
GENESIS_FILE_URL='https://github.91chifun.workers.dev/https://github.com//orientwalt/htdf/releases/download/v2.0.1/genesis.json.tar.gz'
HSD_RELEASE='https://github.91chifun.workers.dev/https://github.com//orientwalt/htdf/releases/download/v2.0.1/hsd_v2.0.1_linux_amd64.tar.gz'
if [ ! -d ".hsd" ]; then
echo ".hsd NOT EXISTS"
exit 1
fi
HSDHOME='.hsd'
HSDPATH=$(cd $HSDHOME; pwd -P)
echo $HSDPATH
cd ~
CURTIME=$(date "+%Y_%m_%d_%H_%M_%S")
# 备份
HSDBAK=$HSDPATH'_bak_'$CURTIME
echo $HSDBAK
read -r -p "now, execute 'mv $HSDPATH $HSDBAK' , Are you sure ? [y/n] " input
case $input in
[yY][eE][sS]|[yY])
mv $HSDPATH $HSDBAK
echo 'backup old .hsd OK'
;;
[nN][oO]|[nN])
exit 1
;;
*)
echo "Invalid input..."
exit 1
;;
esac
mkdir $HSDPATH
mkdir -p $HSDPATH/data
cp -r $HSDBAK/config $HSDPATH
cp -f $HSDBAK/data/priv_validator_state.json $HSDPATH/data/priv_validator_state.json
echo 'mkdir new .hsd OK'
# 替换 ~/.hsd/config/config.toml 中的 db_backend 为 goleveldb
sed -i 's/db_backend = "leveldb"/db_backend = "goleveldb"/g' $HSDPATH/config/config.toml
echo 'modify config.toml OK'
DOWLOADDIR=tmpdownload
mkdir $DOWLOADDIR
echo 'downloading genesis.json'
wget $GENESIS_FILE_URL -O $DOWLOADDIR/genesis.json.tar.gz && tar xzf $DOWLOADDIR/genesis.json.tar.gz -C $HSDPATH/config/
echo 'donwload genesis.json OK'
cp $(which hsd) $(which hsd)'_bak_v1.3.1'
wget $HSD_RELEASE -O $DOWLOADDIR/hsd_v2.0.1_linux_amd64.tar.gz && tar xzf $DOWLOADDIR/hsd_v2.0.1_linux_amd64.tar.gz -C $(dirname $(which hsd))
hsd version
read -r -p "now, execute 'hsd unsafe-reset-all' , Are you sure ? [y/n] " input
case $input in
[yY][eE][sS]|[yY])
hsd unsafe-reset-all
;;
[nN][oO]|[nN])
exit 1
;;
*)
echo "Invalid input..."
exit 1
;;
esac
|
class Word
attr_accessor(:word, :id, :definitions)
@@dictionary = []
define_method (:initialize) do |attributes|
@word = attributes.fetch(:word)
@id = @@dictionary.length+1
@definitions = []
end
define_singleton_method (:all) do
@@dictionary
end
define_method(:save) do
@@dictionary.push(self)
end
define_singleton_method(:clear) do
@@dictionary = []
end
define_singleton_method(:find) do |id|
word_found = nil
@@dictionary.each do |word|
if(word.id() == id)
word_found = word
end
end
word_found
end
define_method(:add_definition) do |definition|
definition.id = @definitions.length()+1
@definitions.push(definition)
end
end
|
import {expect} from 'chai';
import {spec} from '../../../modules/projectLimeLightBidAdapter.js';
describe('ProjectLimeLightAdapter', function () {
const bid1 = {
bidId: '2dd581a2b6281d',
bidder: 'project-limelight',
bidderRequestId: '145e1d6a7837c9',
params: {
host: 'ads.project-limelight.com',
adUnitId: 123,
adUnitType: 'banner'
},
placementCode: 'placement_0',
auctionId: '74f78609-a92d-4cf1-869f-1b244bbfb5d2',
sizes: [[300, 250]],
transactionId: '3bb2f6da-87a6-4029-aeb0-bfe951372e62'
}
const bid2 = {
bidId: '58ee9870c3164a',
bidder: 'project-limelight',
bidderRequestId: '209fdaf1c81649',
params: {
host: 'cpm.project-limelight.com',
adUnitId: 456,
adUnitType: 'banner'
},
placementCode: 'placement_1',
auctionId: '482f88de-29ab-45c8-981a-d25e39454a34',
sizes: [[350, 200]],
transactionId: '068867d1-46ec-40bb-9fa0-e24611786fb4'
}
const bid3 = {
bidId: '019645c7d69460',
bidder: 'project-limelight',
bidderRequestId: 'f2b15f89e77ba6',
params: {
host: 'ads.project-limelight.com',
adUnitId: 789,
adUnitType: 'video'
},
placementCode: 'placement_2',
auctionId: 'e4771143-6aa7-41ec-8824-ced4342c96c8',
sizes: [[800, 600]],
transactionId: '738d5915-6651-43b9-9b6b-d50517350917'
}
describe('buildRequests', function () {
const serverRequests = spec.buildRequests([bid1, bid2, bid3])
it('Creates two ServerRequests', function() {
expect(serverRequests).to.exist
expect(serverRequests).to.have.lengthOf(2)
})
serverRequests.forEach(serverRequest => {
it('Creates a ServerRequest object with method, URL and data', function () {
expect(serverRequest).to.exist
expect(serverRequest.method).to.exist
expect(serverRequest.url).to.exist
expect(serverRequest.data).to.exist
})
it('Returns POST method', function () {
expect(serverRequest.method).to.equal('POST')
})
it('Returns valid data if array of bids is valid', function () {
let data = serverRequest.data
expect(data).to.be.an('object')
expect(data).to.have.all.keys('deviceWidth', 'deviceHeight', 'secure', 'adUnits')
expect(data.deviceWidth).to.be.a('number')
expect(data.deviceHeight).to.be.a('number')
expect(data.secure).to.be.a('boolean')
data.adUnits.forEach(adUnit => {
expect(adUnit).to.have.all.keys('id', 'bidId', 'type', 'sizes', 'transactionId')
expect(adUnit.id).to.be.a('number')
expect(adUnit.bidId).to.be.a('string')
expect(adUnit.type).to.be.a('string')
expect(adUnit.transactionId).to.be.a('string')
expect(adUnit.sizes).to.be.an('array')
})
})
})
it('Returns valid URL', function () {
expect(serverRequests[0].url).to.equal('https://ads.project-limelight.com/hb')
expect(serverRequests[1].url).to.equal('https://cpm.project-limelight.com/hb')
})
it('Returns valid adUnits', function () {
validateAdUnit(serverRequests[0].data.adUnits[0], bid1)
validateAdUnit(serverRequests[1].data.adUnits[0], bid2)
validateAdUnit(serverRequests[0].data.adUnits[1], bid3)
})
it('Returns empty data if no valid requests are passed', function () {
const serverRequests = spec.buildRequests([])
expect(serverRequests).to.be.an('array').that.is.empty
})
})
describe('interpretBannerResponse', function () {
let resObject = {
body: [ {
requestId: '123',
mediaType: 'banner',
cpm: 0.3,
width: 320,
height: 50,
ad: '<h1>Hello ad</h1>',
ttl: 1000,
creativeId: '123asd',
netRevenue: true,
currency: 'USD'
} ]
};
let serverResponses = spec.interpretResponse(resObject);
it('Returns an array of valid server responses if response object is valid', function () {
expect(serverResponses).to.be.an('array').that.is.not.empty;
for (let i = 0; i < serverResponses.length; i++) {
let dataItem = serverResponses[i];
expect(dataItem).to.have.all.keys('requestId', 'cpm', 'width', 'height', 'ad', 'ttl', 'creativeId',
'netRevenue', 'currency', 'mediaType');
expect(dataItem.requestId).to.be.a('string');
expect(dataItem.cpm).to.be.a('number');
expect(dataItem.width).to.be.a('number');
expect(dataItem.height).to.be.a('number');
expect(dataItem.ad).to.be.a('string');
expect(dataItem.ttl).to.be.a('number');
expect(dataItem.creativeId).to.be.a('string');
expect(dataItem.netRevenue).to.be.a('boolean');
expect(dataItem.currency).to.be.a('string');
expect(dataItem.mediaType).to.be.a('string');
}
it('Returns an empty array if invalid response is passed', function () {
serverResponses = spec.interpretResponse('invalid_response');
expect(serverResponses).to.be.an('array').that.is.empty;
});
});
});
describe('interpretVideoResponse', function () {
let resObject = {
body: [ {
requestId: '123',
mediaType: 'video',
cpm: 0.3,
width: 320,
height: 50,
vastXml: '<VAST></VAST>',
ttl: 1000,
creativeId: '123asd',
netRevenue: true,
currency: 'USD'
} ]
};
let serverResponses = spec.interpretResponse(resObject);
it('Returns an array of valid server responses if response object is valid', function () {
expect(serverResponses).to.be.an('array').that.is.not.empty;
for (let i = 0; i < serverResponses.length; i++) {
let dataItem = serverResponses[i];
expect(dataItem).to.have.all.keys('requestId', 'cpm', 'width', 'height', 'vastXml', 'ttl', 'creativeId',
'netRevenue', 'currency', 'mediaType');
expect(dataItem.requestId).to.be.a('string');
expect(dataItem.cpm).to.be.a('number');
expect(dataItem.width).to.be.a('number');
expect(dataItem.height).to.be.a('number');
expect(dataItem.vastXml).to.be.a('string');
expect(dataItem.ttl).to.be.a('number');
expect(dataItem.creativeId).to.be.a('string');
expect(dataItem.netRevenue).to.be.a('boolean');
expect(dataItem.currency).to.be.a('string');
expect(dataItem.mediaType).to.be.a('string');
}
it('Returns an empty array if invalid response is passed', function () {
serverResponses = spec.interpretResponse('invalid_response');
expect(serverResponses).to.be.an('array').that.is.empty;
});
});
});
describe('isBidRequestValid', function() {
let bid = {
bidId: '2dd581a2b6281d',
bidder: 'project-limelight',
bidderRequestId: '145e1d6a7837c9',
params: {
adUnitId: 123,
adUnitType: 'banner'
},
placementCode: 'placement_0',
auctionId: '74f78609-a92d-4cf1-869f-1b244bbfb5d2',
sizes: [[300, 250]],
transactionId: '3bb2f6da-87a6-4029-aeb0-bfe951372e62'
};
it('should return true when required params found', function() {
expect(spec.isBidRequestValid(bid)).to.equal(true);
});
it('should return false when required params are not passed', function() {
let bidFailed = {
bidder: 'project-limelight',
bidderRequestId: '145e1d6a7837c9',
params: {
adUnitId: 123,
adUnitType: 'banner'
},
placementCode: 'placement_0',
auctionId: '74f78609-a92d-4cf1-869f-1b244bbfb5d2',
sizes: [[300, 250]],
transactionId: '3bb2f6da-87a6-4029-aeb0-bfe951372e62'
};
expect(spec.isBidRequestValid(bidFailed)).to.equal(false);
});
});
describe('interpretResponse', function() {
let resObject = {
requestId: '123',
mediaType: 'banner',
cpm: 0.3,
width: 320,
height: 50,
ad: '<h1>Hello ad</h1>',
ttl: 1000,
creativeId: '123asd',
netRevenue: true,
currency: 'USD'
};
it('should skip responses which do not contain required params', function() {
let bidResponses = {
body: [ {
mediaType: 'banner',
cpm: 0.3,
ttl: 1000,
currency: 'USD'
}, resObject ]
}
expect(spec.interpretResponse(bidResponses)).to.deep.equal([ resObject ]);
});
it('should skip responses which do not contain expected mediaType', function() {
let bidResponses = {
body: [ {
requestId: '123',
mediaType: 'native',
cpm: 0.3,
creativeId: '123asd',
ttl: 1000,
currency: 'USD'
}, resObject ]
}
expect(spec.interpretResponse(bidResponses)).to.deep.equal([ resObject ]);
});
});
});
function validateAdUnit(adUnit, bid) {
expect(adUnit.id).to.equal(bid.params.adUnitId)
expect(adUnit.bidId).to.equal(bid.bidId)
expect(adUnit.type).to.equal(bid.params.adUnitType.toUpperCase())
expect(adUnit.transactionId).to.equal(bid.transactionId)
expect(adUnit.sizes).to.deep.equal(bid.sizes.map(size => {
return {
width: size[0],
height: size[1]
}
}))
}
|
#! @runtimeShell@
# shellcheck shell=bash
if [ -x "@runtimeShell@" ]; then export SHELL="@runtimeShell@"; fi;
set -e
set -o pipefail
shopt -s inherit_errexit
export PATH=@path@:$PATH
showSyntax() {
exec man nixos-rebuild
exit 1
}
# Parse the command line.
origArgs=("$@")
copyClosureFlags=()
extraBuildFlags=()
lockFlags=()
flakeFlags=()
action=
buildNix=1
fast=
rollback=
upgrade=
upgrade_all=
profile=/nix/var/nix/profiles/system
buildHost=localhost
targetHost=
remoteSudo=
# comma separated list of vars to preserve when using sudo
preservedSudoVars=NIXOS_INSTALL_BOOTLOADER
while [ "$#" -gt 0 ]; do
i="$1"; shift 1
case "$i" in
--help)
showSyntax
;;
switch|boot|test|build|edit|dry-build|dry-run|dry-activate|build-vm|build-vm-with-bootloader)
if [ "$i" = dry-run ]; then i=dry-build; fi
action="$i"
;;
--install-grub)
echo "$0: --install-grub deprecated, use --install-bootloader instead" >&2
export NIXOS_INSTALL_BOOTLOADER=1
;;
--install-bootloader)
export NIXOS_INSTALL_BOOTLOADER=1
;;
--no-build-nix)
buildNix=
;;
--rollback)
rollback=1
;;
--upgrade)
upgrade=1
;;
--upgrade-all)
upgrade=1
upgrade_all=1
;;
-s|--use-substitutes)
copyClosureFlags+=("$i")
;;
--max-jobs|-j|--cores|-I|--builders)
j="$1"; shift 1
extraBuildFlags+=("$i" "$j")
;;
--show-trace|--keep-failed|-K|--keep-going|-k|--verbose|-v|-vv|-vvv|-vvvv|-vvvvv|--fallback|--repair|--no-build-output|-Q|-j*|-L|--refresh|--no-net|--offline|--impure)
extraBuildFlags+=("$i")
;;
--option)
j="$1"; shift 1
k="$1"; shift 1
extraBuildFlags+=("$i" "$j" "$k")
;;
--fast)
buildNix=
fast=1
;;
--profile-name|-p)
if [ -z "$1" ]; then
echo "$0: ‘--profile-name’ requires an argument"
exit 1
fi
if [ "$1" != system ]; then
profile="/nix/var/nix/profiles/system-profiles/$1"
mkdir -p -m 0755 "$(dirname "$profile")"
fi
shift 1
;;
--build-host|h)
buildHost="$1"
shift 1
;;
--target-host|t)
targetHost="$1"
shift 1
;;
--use-remote-sudo)
remoteSudo=1
;;
--flake)
flake="$1"
flakeFlags=(--extra-experimental-features 'nix-command flakes')
shift 1
;;
--recreate-lock-file|--no-update-lock-file|--no-write-lock-file|--no-registries|--commit-lock-file)
lockFlags+=("$i")
;;
--update-input)
j="$1"; shift 1
lockFlags+=("$i" "$j")
;;
--override-input)
j="$1"; shift 1
k="$1"; shift 1
lockFlags+=("$i" "$j" "$k")
;;
*)
echo "$0: unknown option \`$i'"
exit 1
;;
esac
done
if [[ -n "$SUDO_USER" || -n $remoteSudo ]]; then
maybeSudo=(sudo --preserve-env="$preservedSudoVars" --)
fi
if [[ -z "$buildHost" && -n "$targetHost" ]]; then
buildHost="$targetHost"
fi
if [ "$targetHost" = localhost ]; then
targetHost=
fi
if [ "$buildHost" = localhost ]; then
buildHost=
fi
buildHostCmd() {
if [ -z "$buildHost" ]; then
"$@"
elif [ -n "$remoteNix" ]; then
ssh $SSHOPTS "$buildHost" "${maybeSudo[@]}" env PATH="$remoteNix":'$PATH' "$@"
else
ssh $SSHOPTS "$buildHost" "${maybeSudo[@]}" "$@"
fi
}
targetHostCmd() {
if [ -z "$targetHost" ]; then
"${maybeSudo[@]}" "$@"
else
ssh $SSHOPTS "$targetHost" "${maybeSudo[@]}" "$@"
fi
}
copyToTarget() {
if ! [ "$targetHost" = "$buildHost" ]; then
if [ -z "$targetHost" ]; then
NIX_SSHOPTS=$SSHOPTS nix-copy-closure "${copyClosureFlags[@]}" --from "$buildHost" "$1"
elif [ -z "$buildHost" ]; then
NIX_SSHOPTS=$SSHOPTS nix-copy-closure "${copyClosureFlags[@]}" --to "$targetHost" "$1"
else
buildHostCmd nix-copy-closure "${copyClosureFlags[@]}" --to "$targetHost" "$1"
fi
fi
}
nixBuild() {
if [ -z "$buildHost" ]; then
nix-build "$@"
else
local instArgs=()
local buildArgs=()
local drv=
while [ "$#" -gt 0 ]; do
local i="$1"; shift 1
case "$i" in
-o)
local out="$1"; shift 1
buildArgs+=("--add-root" "$out" "--indirect")
;;
-A)
local j="$1"; shift 1
instArgs+=("$i" "$j")
;;
-I) # We don't want this in buildArgs
shift 1
;;
--no-out-link) # We don't want this in buildArgs
;;
"<"*) # nix paths
instArgs+=("$i")
;;
*)
buildArgs+=("$i")
;;
esac
done
drv="$(nix-instantiate "${instArgs[@]}" "${extraBuildFlags[@]}")"
if [ -a "$drv" ]; then
NIX_SSHOPTS=$SSHOPTS nix-copy-closure --to "$buildHost" "$drv"
buildHostCmd nix-store -r "$drv" "${buildArgs[@]}"
else
echo "nix-instantiate failed"
exit 1
fi
fi
}
nixFlakeBuild() {
if [[ -z "$buildHost" && -z "$targetHost" && "$action" != switch && "$action" != boot ]]
then
nix "${flakeFlags[@]}" build "$@"
readlink -f ./result
elif [ -z "$buildHost" ]; then
nix "${flakeFlags[@]}" build "$@" --out-link "${tmpDir}/result"
readlink -f "${tmpDir}/result"
else
local attr="$1"
shift 1
local evalArgs=()
local buildArgs=()
local drv=
while [ "$#" -gt 0 ]; do
local i="$1"; shift 1
case "$i" in
--recreate-lock-file|--no-update-lock-file|--no-write-lock-file|--no-registries|--commit-lock-file)
evalArgs+=("$i")
;;
--update-input)
local j="$1"; shift 1
evalArgs+=("$i" "$j")
;;
--override-input)
local j="$1"; shift 1
local k="$1"; shift 1
evalArgs+=("$i" "$j" "$k")
;;
*)
buildArgs+=("$i")
;;
esac
done
drv="$(nix "${flakeFlags[@]}" eval --raw "${attr}.drvPath" "${evalArgs[@]}" "${extraBuildFlags[@]}")"
if [ -a "$drv" ]; then
NIX_SSHOPTS=$SSHOPTS nix "${flakeFlags[@]}" copy --derivation --to "ssh://$buildHost" "$drv"
buildHostCmd nix-store -r "$drv" "${buildArgs[@]}"
else
echo "nix eval failed"
exit 1
fi
fi
}
if [ -z "$action" ]; then showSyntax; fi
# Only run shell scripts from the Nixpkgs tree if the action is
# "switch", "boot", or "test". With other actions (such as "build"),
# the user may reasonably expect that no code from the Nixpkgs tree is
# executed, so it's safe to run nixos-rebuild against a potentially
# untrusted tree.
canRun=
if [[ "$action" = switch || "$action" = boot || "$action" = test ]]; then
canRun=1
fi
# If ‘--upgrade’ or `--upgrade-all` is given,
# run ‘nix-channel --update nixos’.
if [[ -n $upgrade && -z $_NIXOS_REBUILD_REEXEC && -z $flake ]]; then
# If --upgrade-all is passed, or there are other channels that
# contain a file called ".update-on-nixos-rebuild", update them as
# well. Also upgrade the nixos channel.
for channelpath in /nix/var/nix/profiles/per-user/root/channels/*; do
channel_name=$(basename "$channelpath")
if [[ "$channel_name" == "nixos" ]]; then
nix-channel --update "$channel_name"
elif [ -e "$channelpath/.update-on-nixos-rebuild" ]; then
nix-channel --update "$channel_name"
elif [[ -n $upgrade_all ]] ; then
nix-channel --update "$channel_name"
fi
done
fi
# Make sure that we use the Nix package we depend on, not something
# else from the PATH for nix-{env,instantiate,build}. This is
# important, because NixOS defaults the architecture of the rebuilt
# system to the architecture of the nix-* binaries used. So if on an
# amd64 system the user has an i686 Nix package in her PATH, then we
# would silently downgrade the whole system to be i686 NixOS on the
# next reboot.
if [ -z "$_NIXOS_REBUILD_REEXEC" ]; then
export PATH=@nix@/bin:$PATH
fi
# Use /etc/nixos/flake.nix if it exists. It can be a symlink to the
# actual flake.
if [[ -z $flake && -e /etc/nixos/flake.nix ]]; then
flake="$(dirname "$(readlink -f /etc/nixos/flake.nix)")"
fi
# Re-execute nixos-rebuild from the Nixpkgs tree.
# FIXME: get nixos-rebuild from $flake.
if [[ -z $_NIXOS_REBUILD_REEXEC && -n $canRun && -z $fast && -z $flake ]]; then
if p=$(nix-build --no-out-link --expr 'with import <nixpkgs/nixos> {}; config.system.build.nixos-rebuild' "${extraBuildFlags[@]}"); then
export _NIXOS_REBUILD_REEXEC=1
exec "$p/bin/nixos-rebuild" "${origArgs[@]}"
exit 1
fi
fi
# For convenience, use the hostname as the default configuration to
# build from the flake.
if [[ -n $flake ]]; then
if [[ $flake =~ ^(.*)\#([^\#\"]*)$ ]]; then
flake="${BASH_REMATCH[1]}"
flakeAttr="${BASH_REMATCH[2]}"
fi
if [[ -z $flakeAttr ]]; then
read -r hostname < /proc/sys/kernel/hostname
if [[ -z $hostname ]]; then
hostname=default
fi
flakeAttr="nixosConfigurations.\"$hostname\""
else
flakeAttr="nixosConfigurations.\"$flakeAttr\""
fi
fi
# Find configuration.nix and open editor instead of building.
if [ "$action" = edit ]; then
if [[ -z $flake ]]; then
NIXOS_CONFIG=${NIXOS_CONFIG:-$(nix-instantiate --find-file nixos-config)}
if [[ -d $NIXOS_CONFIG ]]; then
NIXOS_CONFIG=$NIXOS_CONFIG/default.nix
fi
exec ${EDITOR:-nano} "$NIXOS_CONFIG"
else
exec nix "${flakeFlags[@]}" edit "${lockFlags[@]}" -- "$flake#$flakeAttr"
fi
exit 1
fi
tmpDir=$(mktemp -t -d nixos-rebuild.XXXXXX)
SSHOPTS="$NIX_SSHOPTS -o ControlMaster=auto -o ControlPath=$tmpDir/ssh-%n -o ControlPersist=60"
cleanup() {
for ctrl in "$tmpDir"/ssh-*; do
ssh -o ControlPath="$ctrl" -O exit dummyhost 2>/dev/null || true
done
rm -rf "$tmpDir"
}
trap cleanup EXIT
# First build Nix, since NixOS may require a newer version than the
# current one.
if [[ -n "$rollback" || "$action" = dry-build ]]; then
buildNix=
fi
nixSystem() {
machine="$(uname -m)"
if [[ "$machine" =~ i.86 ]]; then
machine=i686
fi
echo $machine-linux
}
prebuiltNix() {
machine="$1"
if [ "$machine" = x86_64 ]; then
echo @nix_x86_64_linux@
elif [[ "$machine" =~ i.86 ]]; then
echo @nix_i686_linux@
elif [[ "$machine" = aarch64 ]]; then
echo @nix_aarch64_linux@
else
echo "$0: unsupported platform"
exit 1
fi
}
if [[ -n $buildNix && -z $flake ]]; then
echo "building Nix..." >&2
nixDrv=
if ! nixDrv="$(nix-instantiate '<nixpkgs/nixos>' --add-root "$tmpDir/nix.drv" --indirect -A config.nix.package.out "${extraBuildFlags[@]}")"; then
if ! nixDrv="$(nix-instantiate '<nixpkgs>' --add-root "$tmpDir/nix.drv" --indirect -A nix "${extraBuildFlags[@]}")"; then
if ! nixStorePath="$(nix-instantiate --eval '<nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix>' -A "$(nixSystem)" | sed -e 's/^"//' -e 's/"$//')"; then
nixStorePath="$(prebuiltNix "$(uname -m)")"
fi
if ! nix-store -r "$nixStorePath" --add-root "${tmpDir}/nix" --indirect \
--option extra-binary-caches https://cache.nixos.org/; then
echo "warning: don't know how to get latest Nix" >&2
fi
# Older version of nix-store -r don't support --add-root.
[ -e "$tmpDir/nix" ] || ln -sf "$nixStorePath" "$tmpDir/nix"
if [ -n "$buildHost" ]; then
remoteNixStorePath="$(prebuiltNix "$(buildHostCmd uname -m)")"
remoteNix="$remoteNixStorePath/bin"
if ! buildHostCmd nix-store -r "$remoteNixStorePath" \
--option extra-binary-caches https://cache.nixos.org/ >/dev/null; then
remoteNix=
echo "warning: don't know how to get latest Nix" >&2
fi
fi
fi
fi
if [ -a "$nixDrv" ]; then
nix-store -r "$nixDrv"'!'"out" --add-root "$tmpDir/nix" --indirect >/dev/null
if [ -n "$buildHost" ]; then
nix-copy-closure "${copyClosureFlags[@]}" --to "$buildHost" "$nixDrv"
# The nix build produces multiple outputs, we add them all to the remote path
for p in $(buildHostCmd nix-store -r "$(readlink "$nixDrv")" "${buildArgs[@]}"); do
remoteNix="$remoteNix${remoteNix:+:}$p/bin"
done
fi
fi
PATH="$tmpDir/nix/bin:$PATH"
fi
# Update the version suffix if we're building from Git (so that
# nixos-version shows something useful).
if [[ -n $canRun && -z $flake ]]; then
if nixpkgs=$(nix-instantiate --find-file nixpkgs "${extraBuildFlags[@]}"); then
suffix=$($SHELL "$nixpkgs/nixos/modules/installer/tools/get-version-suffix" "${extraBuildFlags[@]}" || true)
if [ -n "$suffix" ]; then
echo -n "$suffix" > "$nixpkgs/.version-suffix" || true
fi
fi
fi
if [ "$action" = dry-build ]; then
extraBuildFlags+=(--dry-run)
fi
# Either upgrade the configuration in the system profile (for "switch"
# or "boot"), or just build it and create a symlink "result" in the
# current directory (for "build" and "test").
if [ -z "$rollback" ]; then
echo "building the system configuration..." >&2
if [[ "$action" = switch || "$action" = boot ]]; then
if [[ -z $flake ]]; then
pathToConfig="$(nixBuild '<nixpkgs/nixos>' --no-out-link -A system "${extraBuildFlags[@]}")"
else
pathToConfig="$(nixFlakeBuild "$flake#$flakeAttr.config.system.build.toplevel" "${extraBuildFlags[@]}" "${lockFlags[@]}")"
fi
copyToTarget "$pathToConfig"
targetHostCmd nix-env -p "$profile" --set "$pathToConfig"
elif [[ "$action" = test || "$action" = build || "$action" = dry-build || "$action" = dry-activate ]]; then
if [[ -z $flake ]]; then
pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A system -k "${extraBuildFlags[@]}")"
else
pathToConfig="$(nixFlakeBuild "$flake#$flakeAttr.config.system.build.toplevel" "${extraBuildFlags[@]}" "${lockFlags[@]}")"
fi
elif [ "$action" = build-vm ]; then
if [[ -z $flake ]]; then
pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A vm -k "${extraBuildFlags[@]}")"
else
pathToConfig="$(nixFlakeBuild "$flake#$flakeAttr.config.system.build.vm" "${extraBuildFlags[@]}" "${lockFlags[@]}")"
fi
elif [ "$action" = build-vm-with-bootloader ]; then
if [[ -z $flake ]]; then
pathToConfig="$(nixBuild '<nixpkgs/nixos>' -A vmWithBootLoader -k "${extraBuildFlags[@]}")"
else
pathToConfig="$(nixFlakeBuild "$flake#$flakeAttr.config.system.build.vmWithBootLoader" "${extraBuildFlags[@]}" "${lockFlags[@]}")"
fi
else
showSyntax
fi
# Copy build to target host if we haven't already done it
if ! [[ "$action" = switch || "$action" = boot ]]; then
copyToTarget "$pathToConfig"
fi
else # [ -n "$rollback" ]
if [[ "$action" = switch || "$action" = boot ]]; then
targetHostCmd nix-env --rollback -p "$profile"
pathToConfig="$profile"
elif [[ "$action" = test || "$action" = build ]]; then
systemNumber=$(
targetHostCmd nix-env -p "$profile" --list-generations |
sed -n '/current/ {g; p;}; s/ *\([0-9]*\).*/\1/; h'
)
pathToConfig="$profile"-${systemNumber}-link
if [ -z "$targetHost" ]; then
ln -sT "$pathToConfig" ./result
fi
else
showSyntax
fi
fi
# If we're not just building, then make the new configuration the boot
# default and/or activate it now.
if [[ "$action" = switch || "$action" = boot || "$action" = test || "$action" = dry-activate ]]; then
if ! targetHostCmd "$pathToConfig/bin/switch-to-configuration" "$action"; then
echo "warning: error(s) occurred while switching to the new configuration" >&2
exit 1
fi
fi
if [[ "$action" = build-vm || "$action" = build-vm-with-bootloader ]]; then
cat >&2 <<EOF
Done. The virtual machine can be started by running $(echo "${pathToConfig}/bin/"run-*-vm)
EOF
fi
|
# --------------------------------------------------------------------------------------
# Certificate Authority (CA)
# https://superuser.com/questions/738612/openssl-ca-keyusage-extension
# Creating our root PRIVATE Key
openssl genrsa -out saevon.root.key 4096
# Add this if you want to encrypt the key as well (with a passphrase)
-des3
# Create our ROOT Certificate
openssl req -x509 -new -key saevon.root.key -sha256 -days 365 -out saevon.root.crt
# You need to explicitly choose to encrypt (or not)
-nodes
# OR
-des3
# Sign people's Certificates (send back the *.crt)
openssl req -x509 -in mydomain.com.csr -CA saevon.root.crt -CAkey saevon.root.key
-out mydomain.com.crt -config ssl.conf.ini
# Every certificate you sign needs a unique serial number... this is stored on a file
# You will need one of the following:
# The first time you create a serial (indicating how many things you've signed)
-CAcreateserial
# it creates a file with the same name as the *.crt except *.srl
# Every time afterwards, just supply this file
-CAserial saevon.root.srl
# Verify that something was signed by you
openssl verify -CAfile saevon.root.crt mydomain.com.crt
# MacOSX Splitting from Keychain.app (after exporting)
openssl pkcs12 -in path.p12 -out newfile.crt.pem -clcerts -nokeys
openssl pkcs12 -in path.p12 -out newfile.key.pem -nocerts -nodes
# --------------------------------------------------------------------------------------
# Server SSL
# TODO:
# 1) Create this system once
# 2) Verify that extensions (e.g. the SAN[second name] get copied over when you sign the crt)
# 3) Theres also EV (Extended Validation) in case you want the green lock symbol
# https://gist.github.com/Soarez/9688998
# --------------------------------------------------------------------------------------
# Server SSL
# Create our Server Certificate PRIVATE KEY
openssl genrsa -out mydomain.com.key 2048
# Certificate Signing Request (csr)
# Ask a Root CA to sign your SSL Certificate
openssl req -new -key mydomain.com.key -out mydomain.com.csr -config mydomain.com.conf
# You can also add in other things... such as a SAN: (extra domain names)
# Now Get the CA to actually sign it
# csr >> crt
# The CSR is useless after wards
rm mydomain.com.csr
# OR self-sign it
# Note you need the INI again to get extensions working properly...???
openssl x509 -req -days 365 -in mydomain.com.csr -signkey mydomain.com.key -out mydomain.com.crt -extfile mydomain.com.ini
# Getting information out of a .crt
openssl x509 -text -noout -in mydomain.com.crt
# Checking a servers certificate
# You might need
openssl s_client -connect server.com:443
# Optionally you can specify a root CA to use, a bundle
# OR you can use an intermediate cert (especially if the server doesn't provide it)
-CAfile certificate.bundle.pem
# --------------------------------------------------------------------------------------
# Double check permissions (and location)
sudo chmod u=rw,g=r,o= /etc/ssl/private/*.key
chown root:ssl-cert
sudo chmod u=rw,g=r,o=r /etc/ssl/cert/*.crt
chown root:ssl-cert
# Notes:
*.pem # A Jumble of things: CSR, CRT, KEY, etc
*.p12 # A Jumble of Certificate + Private Key (CRT + KEY) (MacOSX)
*.crt # A certificate PEM file (but apps know its a certificate)
*.csr # Basically your public key for the CA to issue a certificate
*.srl # A Serial identifier for a CA
# Don't forget about wildcard certificates (locking down all subdomains)
*.saevon.ca
# Which won't actually match the base(naked) domain... so include them both
|
#pragma GCC optimize("O3")
#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx")
#define __USE_MINGW_ANSI_STDIO 0
#include <bits/stdc++.h>
#define TASK "zak"
#define pb push_back
#define fi first
#define se second
#define sz(a) (int)(a.size())
using namespace std;
#ifdef LOCAL
#define eprintf(...) fprintf(stderr, __VA_ARGS__)
#else
#define eprintf(...) (void)42
#endif
// every true-boy doesn't read statement!
typedef long long ll;
typedef pair < int, int > pii;
typedef pair < ll, ll > pll;
typedef long double ld;
typedef vector < vector < int > > vvi;
typedef vector <int> vi;
const int MAXN = 1e5 + 9;
vvi gg;
int n, m;
int col[MAXN];
bool used[MAXN];
int deg[MAXN];
bool dfs(int v, int color) {
col[v] = color;
used[v] = 1;
bool h = 1;
for(auto to : gg[v]) {
if(used[to]) {
if(col[to] == col[v]) return 0;
}
else h &= dfs(to, 3 - color);
}
return h;
}
void input() {
cin >> n >> m;
gg.resize(2 * n);
for(int i = 0; i < m; i++) {
int a, b;
cin >> a >> b;
a--, b--;
deg[a]++;
deg[b]++;
gg[a].pb(b);
gg[b].pb(a);
}
}
vi a, b, c;
void solve() {
for(int i = 0; i < 2 * n; i++) {
if(!used[i] && deg[i]) {
if(dfs(i, 1)) {
}
else {
cout << "IMPOSSIBLE";
return;
}
}
}
for(int i = 0; i < 2 * n; i++) {
if(col[i] == 1) {
a.pb(i + 1);
}
else if(col[i] == 2) b.pb(i + 1);
else c.pb(i + 1);
}
if(max(sz(a), sz(b)) > n) {
cout << "IMPOSSIBLE;;";
return;
}
for(int i = 0; i < sz(c); i++) {
if(sz(a) < n) a.pb(c[i]);
else b.pb(c[i]);
}
assert(sz(a) == sz(b) && sz(a) == n);
for(auto x : a) cout << x << ' ';
cout << endl;
for(auto x : b) cout << x << ' ';
}
int main() {
ios_base::sync_with_stdio(0);
cin.tie(0);
#ifdef LOCAL
assert(freopen("xxx.in", "r", stdin));
assert(freopen("xxx.out", "w", stdout));
#else
//assert(freopen("input.txt", "r", stdin));
//assert(freopen("output.txt", "w", stdout));
#endif
input();
solve();
return 0;
} |
import React from 'react';
import {connect} from 'react-redux'
import {addTodo, fetchingCurrentUser} from '../BucketListActions'
class BucketListForm extends React.Component{
state = {
description: '',
completed: false,
}
componentDidMount(){
this.props.fetchingCurrentUser();
}
handleChange = e => {
e.preventDefault();
this.setState({[e.target.name]: e.target.value})
}
addToList = e => {
e.preventDefault();
const {description, completed} = this.state;
const newBucketlist = {
description, completed, user_id : this.props.user.id
}
this.props.addTodo(newBucketlist)
}
render() {
return (
<div className="form-group row">
<div className="col-sm-10">
<h1>Bucket List</h1>
<div>
{}
</div>
<form onSubmit = {this.addToList}>
<input className = 'form-control'
type = 'text'
name = 'description'
placeholder = 'Description'
value = {this.state.description}
onChange = {this.handleChange} />
<button className = 'btn btn-primary btn-md'
type = 'submit'>Add Bucket List</button>
</form>
</div>
</div>
)
}
}
const mapStateToProps = state => {
console.log('BucketListForm mapStateToProps ',state)
return {
completed:state.bucketlistReducer.completed,
description:state.bucketlistReducer.description,
user_id:state.bucketlistReducer.user_id,
addingItem:state.bucketlistReducer.addingItem,
error:state.bucketlistReducer.error,
user: state.bucketlistReducer.user
}
}
export default connect(mapStateToProps, {addTodo,fetchingCurrentUser })(BucketListForm); |
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def sentimentClassifcation(tweet):
# Create a SentimentIntensityAnalyzer object
sid = SentimentIntensityAnalyzer()
# Get the sentiment scores
scores = sid.polarity_scores(tweet)
# Get the compound score
compound = scores['compound']
# Classify the sentiment based on the compound score
if compound >= 0.05:
sentiment = 'positive'
elif compound <= -0.05:
sentiment = 'negative'
else:
sentiment = 'neutral'
return sentiment
tweet = 'I love this new phone!'
print(sentimentClassifcation(tweet)) |
<gh_stars>0
/**
* @athenna/ioc
*
* (c) <NAME> <<EMAIL>>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
export class StringHelper {
constructor() {}
}
|
token = '<KEY>'
import lyricsgenius
genius = lyricsgenius.Genius(token)
muzik = input("Şarkı sözü ara:")
song = genius.search_song(muzik)
print(song.lyrics) |
import tweepy
import config
import csv
class IDPrinter(tweepy.StreamingClient):
def on_tweet(self, tweet):
tweet_text = tweet.text
tweet_text = tweet_text.replace(",", " ")
if tweet.referenced_tweets == None:
# result = str(tweet.referenced_tweets).find("retweeted")
# if result == -1:
data = [
tweet.created_at,
tweet.id,
tweet.author_id,
tweet_text,
tweet.in_reply_to_user_id,
tweet.conversation_id,
]
print(
tweet.created_at,
tweet.id,
tweet.author_id,
tweet_text,
tweet.in_reply_to_user_id,
tweet.conversation_id,
)
with open("guns.csv", "a", encoding="UTF8", newline = '') as f:
writer = csv.writer(f)
writer.writerow(data)
streaming_client = tweepy.StreamingClient(config.bearer_token)
# streaming_client.add_rules(tweepy.StreamRule("biden"))
# streaming_client.filter()
printer = IDPrinter(config.bearer_token)
# printer.add_rules(tweepy.StreamRule("NRA OR gun violence OR gun control OR mass shootings"))
# print(printer.delete_rules(1533181818362646528))
# print(printer.get_rules())
headers = [
"tweet.created_at",
"tweet.id",
"tweet.author_id",
"tweet.text",
"tweet.in_reply_to_user_id",
"tweet.conversation_id",
]
f = open("guns.csv", "w")
writer = csv.writer(f)
writer.writerow(headers)
f.close()
printer.filter(
tweet_fields=[
"attachments",
"author_id",
"context_annotations",
"conversation_id",
"created_at",
"entities",
"geo",
"id",
"in_reply_to_user_id",
"lang",
"possibly_sensitive",
"public_metrics",
"referenced_tweets",
"reply_settings",
"source",
"text",
"withheld",
],
user_fields=[
"created_at",
"description",
"entities,id",
"location",
"name",
"pinned_tweet_id",
"profile_image_url",
"protected,public_metrics",
"url",
"username",
"verified",
"withheld",
],
expansions=[
"attachments.poll_ids",
"attachments.media_keys",
"author_id",
"geo.place_id",
"in_reply_to_user_id",
"referenced_tweets.id",
"entities.mentions.username",
"referenced_tweets.id.author_id",
],
media_fields=[
"duration_ms",
"height",
"media_key",
"preview_image_url",
"promoted_metrics",
"public_metrics",
"type,url",
],
place_fields=[
"contained_within,country",
"country_code",
"full_name",
"geo,id",
"name",
"place_type",
],
poll_fields=["duration_minutes", "end_datetime", "id", "options", "voting_status"],
)
|
#!/bin/sh
## input:
# $1 - harness file path
r_lparen='\('
r_rparen='\)'
r_ws_opt='\s*'
r_c_comment='\/\/'
r_py_comment='#'
r_sq="\\'"
r_dq="\\\""
r_q="[${r_sq}${r_dq}]"
r_qname="${r_ws_opt}${r_q}${r_lparen}.*${r_rparen}${r_q}${r_ws_opt}"
r_destination_pattern="^${r_ws_opt}destination(${r_qname})"
r_source_pattern="^${r_ws_opt}source(${r_qname})"
r_procedure_pattern="^${r_ws_opt}procedure(${r_qname})"
r_cuda_source_procedure_pattern="^${r_ws_opt}read_IR(${r_qname},${r_qname})"
get_opt_from_c() {
## input
# $1 - file path
# $2 - option name
local pattern="^${r_c_comment}>$2${r_ws_opt}:${r_ws_opt}${r_lparen}.*${r_rparen}"
local cmd="sed -n \"s/$pattern/\\1/p\" $1"
echo `eval $cmd`
}
get_opt_from_py() {
## input
# $1 - file path
# $2 - option name
local pattern="^${r_py_comment}>$2${r_ws_opt}:${r_ws_opt}${r_lparen}.*${r_rparen}"
local cmd="sed -n \"s/$pattern/\\1/p\" $1"
echo `eval $cmd`
}
get_destination_from_test_script() {
## input
# $1 - file path
local cmd="sed -n \"s/$r_destination_pattern/\\1/p\" $1"
echo `eval $cmd`
}
get_source_from_test_script() {
## input
# $1 - file path
if [ -z ${1##*/cuda-chill/testcases/*.py} ]; then
local cmd="sed -n \"s/$r_cuda_source_procedure_pattern/\\1/p\" $1"
echo `eval $cmd`
else
local cmd="sed -n \"s/$r_source_pattern/\\1/p\" $1"
echo `eval $cmd`
fi
}
get_procedure_from_test_script() {
## input
# $1 - file path
if [ -z ${1##*/cuda-chill/testcases/*.py} ]; then
local cmd="sed -n \"s/$r_cuda_source_procedure_pattern/\\2/p\" $1"
echo `eval $cmd`
else
local cmd="sed -n \"s/$r_procedure_pattern/\\1/p\" $1"
echo `eval $cmd`
fi
}
chill_dir=`readlink -f $(dirname $0)/../../`
harness_dir=`readlink -f $(dirname $1)`
harness_name=`basename $1`
test_dir=`readlink -f $(dirname $0)`
harness_source=$harness_dir/$harness_name
procedure_compiler=`get_opt_from_c $1 procedure_compiler`
procedure_linker=`get_opt_from_c $1 procedure_linker`
script_file=$chill_dir/`get_opt_from_c $1 script`
original_header=$chill_dir/`get_opt_from_c $1 original_header`
original_source=`dirname $script_file`/`get_source_from_test_script $script_file`
procedure_name=`get_procedure_from_test_script $script_file`
generated_source=`dirname $script_file`/`get_destination_from_test_script $script_file`
harness_object="$test_dir/harness_object.o"
procedure_source="$test_dir/procedure_generated.${generated_source##*.}"
procedure_object="$test_dir/procedure_object.o"
test_exec="$test_dir/test_exec"
echo "#!/bin/sh"
echo "## generated by $0"
echo ""
echo "#harness_source = \"$harness_source\""
echo "#procedure_name = \"$procedure_name\""
echo "#procedure_compiler = \"$procedure_compiler\""
echo "#procedure_linker = \"$procedure_linker\""
echo "#original_header = \"$original_header\""
echo "#original_source = \"$original_source\""
echo "#generated_source = \"$generated_source\""
echo "#combinded_procedure_source = \"$procedure_source\""
echo ""
echo ""
#echo "## create and step into the test directory"
#echo "mkdir $test_dir"
#echo "pushd $test_dir > /dev/null"
echo ""
echo "## compile harness"
echo "g++ -std=c++11 -c $harness_source -o $harness_object"
echo ""
echo "## create & compile procedure source"
echo "m4 \\"
echo " -Dproc_name=$procedure_name\\"
echo " -Doriginal_header=$original_header\\"
echo " -Doriginal_source=$original_source\\"
echo " -Dgenerated_source=$generated_source\\"
echo " $harness_dir/c-wrapper.c.m4 > $procedure_source"
echo ""
echo "$procedure_compiler -c $procedure_source -o $procedure_object"
echo ""
echo "## link"
echo "$procedure_linker $harness_object $procedure_object -o $test_exec"
echo ""
echo "## run test"
echo "$test_exec"
echo "err_code=\$?"
echo ""
echo "## remove temparary objects"
echo "rm $harness_object"
echo "rm $procedure_source"
echo "rm $procedure_object"
echo "rm $test_exec"
echo ""
#echo "## leave the test directory and remove it"
#echo "popd > /dev/null"
#echo "#rm -r $test_dir"
echo "## exit with error code"
echo "exit \$err_code"
echo ""
|
from unittest.mock import MagicMock
def simulate_auth_flow():
mock_prompt_toolkit = MagicMock()
responses = {}
# Simulate user interaction
responses["continue"] = input("Continue? (True/False): ").lower() == "true"
responses["method"] = input("Method? (oauth/api_key): ")
responses["bundled"] = input("Bundled? (True/False): ").lower() == "true"
responses["redirection_url"] = input("Redirection URL? (string): ")
return responses
# Test the function
auth_flow_result = simulate_auth_flow()
print(auth_flow_result) |
// calculate area of triangle
func TriangleArea(base float64, height float64) float64 {
return 0.5 * base * height
}
func main() {
base := 4.5
height := 3.0
area := TriangleArea(base, height)
fmt.Println("Area of the triangle is", area)
} |
#include <memory>
#include <vector>
#include "DocumentRetriever.h"
class CommitAndWaitDocumentRetriever : public DocumentRetriever {
private:
std::unique_ptr<DocumentRetriever> _retriever;
public:
CommitAndWaitDocumentRetriever(std::unique_ptr<DocumentRetriever> retriever)
: _retriever(std::move(retriever)) {}
CachedSelect::SP parseSelect(const vespalib::string &selection) const override {
return _retriever->parseSelect(selection);
}
IDocumentRetriever::ReadGuard getReadGuard() const override {
return _retriever->getReadGuard();
}
void visitDocuments(const std::vector<int> &lids, DocumentVisitor &visitor, ReadConsistency readConsistency) override {
_retriever->visitDocuments(lids, visitor, readConsistency);
}
void waitComplete(const std::vector<int> &lids) override {
_retriever->waitComplete(lids);
}
}; |
echo '================================'
echo 'Installing XP'
echo ''
# Variables
profile_file=~/.profile
xp_directory=$(PWD)
xp_script=$xp_directory/src/xp.py
xp_autocomplete_script=$xp_directory/src/autocompletion_xp.sh
echo ' ' >> $profile_file
echo '# XP: Script for Component Management ' >> $profile_file
echo 'Creating alias at '$profile_file
echo 'export XP_DIR='$xp_directory >> $profile_file
echo 'alias xp="python '$xp_script'"' >> $profile_file
echo 'Adding autocomplete feature'
echo 'source "'$xp_autocomplete_script'"' >> $profile_file
echo ''
echo 'Update '$profile_file
source $profile_file
echo ' '
echo 'Installation completed!'
echo '================================' |
const { Book } = require('../models');
const bookData = [
{
title: "Flowers for Algernon",
description: "Oscar-winning film <NAME>arring <NAME> and <NAME>-a mentally challenged man receives an operation that turns him into a genius...and introduces him to heartache.",
image_link: "http://books.google.com/books/content?id=_oG_iTxP1pIC&printsec=frontcover&img=1&zoom=1&edge=curl&source=gbs_api",
author: "<NAME>",
// publish_date: "2007-12-01",
reader_id: 1,
read: true,
},
]
const bookSeed = () => Book.bulkCreate(bookData);
module.exports = bookSeed; |
<reponame>zhunrong/myServer<gh_stars>0
import { getRepository } from 'typeorm';
import UserPicture from '../entity/entity.userPicture';
export function getPictures() {
const repository = getRepository(UserPicture);
return repository.find();
}
interface ISave {
uid: string;
directory: string;
filename: string;
}
export function save(params: ISave) {
const repository = getRepository(UserPicture);
const { uid, directory, filename } = params;
const userPicture = new UserPicture();
userPicture.uid = uid;
userPicture.directory = directory;
userPicture.filename = filename;
return repository.save(userPicture);
}
export function getPicturesByUserId(uid: string) {
const repository = getRepository(UserPicture);
return repository.find({
uid,
});
}
|
/*
Jameleon - An automation testing tool..
Copyright (C) 2003-2007 <NAME> (<EMAIL>)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package net.sf.jameleon.result;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import net.sf.jameleon.bean.Attribute;
import net.sf.jameleon.bean.FunctionalPoint;
import net.sf.jameleon.util.XMLHelper;
public class SessionResultTest extends TestCase {
private SessionResult sr;
private static final FunctionalPoint tag = new FunctionalPoint();
public static void main(String args[]) {
junit.textui.TestRunner.run( suite() );
}
public static Test suite() {
return new TestSuite( SessionResultTest.class );
}
public SessionResultTest( String name ) {
super( name );
}
public void setUp(){
sr = new SessionResult(tag);
}
public void testConstructor1(){
assertNotNull("tag", sr.getTag());
assertEquals("tag", tag, sr.getTag());
}
public void testConstructor2(){
SessionResult sr2 = new SessionResult(tag, sr);
assertTrue(sr == sr2.parentResults);
}
public void testAddChildResult(){
assertEquals("# of function results", 0, sr.getChildrenResults().size());
FunctionResult fr = new FunctionResult();
sr.addChildResult(fr);
assertEquals("# of function results", 1, sr.getChildrenResults().size());
assertTrue("function result", fr == sr.getChildrenResults().get(0));
}
public void testToXML(){
String xml = sr.toXML();
XMLHelper xh = new XMLHelper(xml);
assertEquals("application", "0", xh.getValueFromXPath("/session-result/execution-time-millis"));
Attribute attr = new Attribute();
attr.setName("application");
attr.setValue("app");
MockFunctionalPoint mfp = new MockFunctionalPoint();
mfp.addAttribute(attr);
sr.setTag(mfp);
xml = sr.toXML();
xh = new XMLHelper(xml);
assertEquals("application", "app", xh.getValueFromXPath("/session-result/application"));
assertEquals("functional point toXML got called", 1, mfp.toXMLNumOfCalls);
}
private class MockFunctionalPoint extends FunctionalPoint{
public int toXMLNumOfCalls;
public String toXML() {
toXMLNumOfCalls ++;
return "";
}
}
} |
public class ReverseString {
public static String reverseString(String str) {
StringBuilder sb = new StringBuilder();
for (int i = str.length() - 1; i >= 0; i--) {
sb.append(str.charAt(i));
}
return sb.toString();
}
} |
module.exports = {
env: {
browser: true,
es2021: true,
node: true,
},
extends: [
'eslint:recommended',
'plugin:@typescript-eslint/recommended',
],
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
},
plugins: [
'@typescript-eslint',
'svelte3',
],
ignorePatterns: [
// personal testing files
'/fiddle.js',
// globbed (aka auto-generated) files
'globbed-*.js',
],
overrides: [
{
files: [ '*.svelte' ],
processor: 'svelte3/svelte3',
},
// At the moment I'm still very new to TS, so I just don't have any styles
// applied to TS files, and I let WebStorm handle the auto-formatting for me.
{
files: [
'*.js',
'*.svelte',
],
rules: {
'array-bracket-spacing': [
'error',
'always',
{
'objectsInArrays': true,
'arraysInArrays': true,
},
],
'block-spacing': [
'error',
'always',
],
'brace-style': [
'error',
'1tbs',
{
'allowSingleLine': true,
},
],
'comma-dangle': [
'error',
'always-multiline',
],
'comma-spacing': [
'error',
{
'before': false,
'after': true,
},
],
'eol-last': [
'error',
'always',
],
'eqeqeq': [
'error',
'always',
],
'indent': [
'error',
'tab',
],
'keyword-spacing': [
'error',
{
'before': true,
},
],
'linebreak-style': [
'error',
'unix',
],
'no-eval': 'error',
'no-implied-eval': 'error',
'no-irregular-whitespace': [
'error',
{
'skipStrings': false,
},
],
'no-new': 'error',
'no-return-await': 'error',
'no-unexpected-multiline': 'error',
'no-useless-rename': 'error',
'no-var': [
'error',
],
'object-curly-spacing': [
'error',
'always',
],
'quotes': [
'error',
'single',
{
'avoidEscape': true,
},
],
'semi': [
'error',
'never',
],
'space-before-blocks': [
'error',
'always',
],
'space-before-function-paren': [
'error',
{
'anonymous': 'always',
'asyncArrow': 'always',
'named': 'never',
},
],
'space-in-parens': [
'error',
'never',
],
'valid-jsdoc': [
'error',
{
'requireReturn': false,
},
],
},
},
],
}
|
<gh_stars>0
package org.apache.tapestry5.ioc.internal;
public interface ToStringService
{
@Override
String toString();
}
|
#!/bin/bash
set -e
export RISCV=$1
export MARCH=$2
export MABI=$3
export ITER=$4
export PYTHON=$5
export OFFSET=$6
export BASEDIR=$7
export AAPG=$8
export CONFIG=$9
ELF2COE=$BASEDIR/soft/py/elf2coe.py
ELF2DAT=$BASEDIR/soft/py/elf2dat.py
ELF2MIF=$BASEDIR/soft/py/elf2mif.py
ELF2HEX=$BASEDIR/soft/py/elf2hex.py
if [ ! -d "${BASEDIR}/build" ]; then
mkdir ${BASEDIR}/build
fi
rm -rf ${BASEDIR}/build/aapg
mkdir ${BASEDIR}/build/aapg
mkdir ${BASEDIR}/build/aapg/elf
mkdir ${BASEDIR}/build/aapg/dump
mkdir ${BASEDIR}/build/aapg/coe
mkdir ${BASEDIR}/build/aapg/dat
mkdir ${BASEDIR}/build/aapg/mif
mkdir ${BASEDIR}/build/aapg/hex
if [ ! -d "${BASEDIR}/soft/src/aapg/setup" ]; then
mkdir ${BASEDIR}/soft/src/aapg/setup
fi
cd ${BASEDIR}/soft/src/aapg/setup
${AAPG} setup
cp ${BASEDIR}/soft/src/aapg/${CONFIG}.yaml ${BASEDIR}/soft/src/aapg/setup/config.yaml
${AAPG} gen --arch rv32
make -f ${BASEDIR}/soft/src/aapg/Makefile || exit
shopt -s nullglob
for filename in ${BASEDIR}/build/aapg/elf/*.elf; do
echo $filename
${PYTHON} ${ELF2COE} ${filename} 0x0 ${OFFSET} ${BASEDIR}/build/aapg
${PYTHON} ${ELF2DAT} ${filename} 0x0 ${OFFSET} ${BASEDIR}/build/aapg
${PYTHON} ${ELF2MIF} ${filename} 0x0 ${OFFSET} ${BASEDIR}/build/aapg
${PYTHON} ${ELF2HEX} ${filename} 0x0 ${OFFSET} ${BASEDIR}/build/aapg
done
shopt -s nullglob
for filename in ${BASEDIR}/build/aapg/elf/*.dump; do
mv ${filename} ${BASEDIR}/build/aapg/dump/
done
|
# Copyright (c) 2020 Qualcomm Innovation Center, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-3-Clause-Clear
# weston.sh: script to start weston display server
cp weston.sh /data
# TFLite posenet model
cp posenet_mobilenet_v1_075_481_641_quant.tflite /data/misc/camera
|
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
#NOTE: Pull images and lint chart
make pull-images libvirt
#NOTE: Deploy command
OPENSTACK_VERSION=${OPENSTACK_VERSION:-"ocata"}
if [ "$OPENSTACK_VERSION" == "ocata" ]; then
values="--values=./tools/overrides/releases/ocata/loci.yaml "
values+="--values=./tools/overrides/backends/opencontrail/libvirt-ocata.yaml "
fi
HUGE_PAGES_DIR=${HUGE_PAGES_DIR:-"/dev/hugepages"}
tee /tmp/libvirt_mount.yaml << EOF
pod:
mounts:
libvirt:
libvirt:
volumeMounts:
- name: hugepages-dir
mountPath: $HUGE_PAGES_DIR
volumes:
- name: hugepages-dir
hostPath:
path: $HUGE_PAGES_DIR
EOF
values+="--values=/tmp/libvirt_mount.yaml "
# Insert $values to OSH_EXTRA_HELM_ARGS_LIBVIRT
OSH_EXTRA_HELM_ARGS_LIBVIRT="$values "$OSH_EXTRA_HELM_ARGS_LIBVIRT
helm upgrade --install libvirt ./libvirt \
--namespace=openstack \
--values=./tools/overrides/backends/opencontrail/libvirt.yaml \
${OSH_EXTRA_HELM_ARGS} \
${OSH_EXTRA_HELM_ARGS_LIBVIRT}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
#NOTE: Validate Deployment info
helm status libvirt
|
#! /bin/sh -e
distDir=/data/webserver/dist/tarballs
find "$1" -name "*.nix" | while read fn; do
grep -E '^ *url = ' "$fn" | while read line; do
if url=$(echo "$line" | sed 's^url = \(.*\);^\1^'); then
if ! echo "$url" | grep -q -E "www.cs.uu.nl|nixos.org|.stratego-language.org|java.sun.com|ut2004|linuxq3a|RealPlayer|Adbe|belastingdienst|microsoft|armijn/.nix|sun.com|archive.eclipse.org"; then
base="$(basename "$url")"
newPath="$distDir/$base"
if test -e "$newPath"; then
#echo "$fn: checking hash of existing $newPath"
hash=$(fgrep -A 1 "$url" "$fn" | grep md5 | sed 's^.*md5 = \"\(.*\)\";.*^\1^')
hashType=md5
if test -z "$hash"; then
hash=$(fgrep -A 1 "$url" "$fn" | grep sha256 | sed 's^.*sha256 = \"\(.*\)\";.*^\1^')
hashType="sha256 --base32"
if test -n "$hash"; then
if test "${#hash}" = 64; then
hash=$(nix-hash --to-base32 --type sha256 $hash)
fi
else
hash=$(fgrep -A 1 "$url" "$fn" | grep sha1 | sed 's^.*sha1 = \"\(.*\)\";.*^\1^')
hashType="sha1"
if test -z "$hash"; then
echo "WARNING: $fn: cannot figure out the hash for $url"
continue
fi
fi
fi
#echo "HASH = $hash"
if ! test "$(nix-hash --type $hashType --flat "$newPath")" = "$hash"; then
echo "WARNING: $fn: $newPath exists and differs, hash should be $hash!"
continue
fi
else
if echo $url | grep -q 'mirror://'; then
#echo "$fn: skipping mirrored $url"
continue
fi
echo "$fn: $url -> $newPath"
if test -n "$doCopy"; then
if ! curl --disable-epsv --fail --location --max-redirs 20 --remote-time \
"$url" --output "$newPath".tmp; then
continue
fi
mv -f "$newPath".tmp "$newPath"
fi
fi
if test -n "$doCopy" -a -e "$newPath"; then
md5=$(nix-hash --flat --type md5 "$newPath")
ln -sfn "../$base" $distDir/md5/$md5
sha1=$(nix-hash --flat --type sha1 "$newPath")
ln -sfn "../$base" $distDir/sha1/$sha1
sha256=$(nix-hash --flat --type sha256 "$newPath")
ln -sfn "../$base" $distDir/sha256/$sha256
ln -sfn "../$base" $distDir/sha256/$(nix-hash --type sha256 --to-base32 "$sha256")
fi
fi
fi
done
done
echo DONE
|
sed -r 's/ +/,/g' results_abs_atk_def_sensor.txt > abs_atk_def_sensor.txt
sed -r 's/ +/,/g' results_abs_atk_no_def_sensor.txt > abs_atk_no_def_sensor.txt
sed -r 's/ +/,/g' results_atk_def_sensor.txt > atk_def_sensor.txt
sed -r 's/ +/,/g' results_no_atk_sensor.txt > no_atk_sensor.txt
sed -r 's/ +/,/g' results_no_def_sensor.txt > no_def_sensor.txt
|
import { NgModule } from '@angular/core';
import { RouterModule, Routes } from '@angular/router';
const routes: Routes = [];
import { QuoteDetailComponent } from './quote-detail/quote-detail.component';
@NgModule({
declarations: [
AppComponent,
QuoteComponent,
QuoteDetailComponent
],
imports: [
BrowserModule,
AppRoutingModule
],
providers: [],
bootstrap: [AppComponent]
})
export class AppModule { } |
json.extract! vertex, :id, :created_at, :updated_at
json.url vertex_url(vertex, format: :json)
|
#!/bin/sh
profile=${1:-default}
cd $(dirname $0) # Move to test directory
if [ ! $SCRIPTS_DIR ]; then
# assume we're running standalone
export SCRIPTS_DIR=../../scripts/
fi
. $SCRIPTS_DIR/setenv.sh
# Warning: tests args are now set in profiles
$SCRIPTS_DIR/run_c_files.sh $profile prio-preempt
|
package ru.job4j.analysis;
import org.junit.Test;
import static org.junit.Assert.assertThat;
import static org.hamcrest.Matchers.is;
import java.util.ArrayList;
import java.util.List;
/**
* AnalysisTest.
* @author <NAME> (<EMAIL>)
* @version $Id$
* @since 0.1
*/
public class AnalysisTest {
List<Analysis.User> previous = new ArrayList<>();
List<Analysis.User> current = new ArrayList<>();
Analysis analysis = new Analysis();
@Test
public void whenEmptyLists() {
Analysis.Info expected = new Analysis.Info(0, 0, 0);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
@Test
public void whenPreviousEmpty() {
current.add(new Analysis.User(4, "Jim"));
Analysis.Info expected = new Analysis.Info(1, 0, 0);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
@Test
public void whenCurrentEmpty() {
previous.add(new Analysis.User(4, "Jim"));
Analysis.Info expected = new Analysis.Info(0, 0, 1);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
@Test
public void whenOnlyChanged() {
previous.add(new Analysis.User(1, "Alex"));
previous.add(new Analysis.User(2, "Ugin"));
previous.add(new Analysis.User(3, "John"));
current.addAll(previous);
current.set(1, new Analysis.User(2, "Evgen"));
Analysis.Info expected = new Analysis.Info(0, 1, 0);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
@Test
public void whenAllPositionsChanged() {
previous.add(new Analysis.User(1, "Alex"));
previous.add(new Analysis.User(2, "Ugin"));
previous.add(new Analysis.User(3, "John"));
current.addAll(previous);
current.add(new Analysis.User(4, "Jim"));
current.remove(0);
current.set(0, new Analysis.User(2, "Evgen"));
Analysis.Info expected = new Analysis.Info(1, 1, 1);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
@Test
public void whenCurrentListLessThanPrevious() {
previous.add(new Analysis.User(1, "Alex"));
previous.add(new Analysis.User(2, "Ugin"));
previous.add(new Analysis.User(3, "John"));
current.addAll(previous);
current.remove(0);
current.set(0, new Analysis.User(2, "Evgen"));
Analysis.Info expected = new Analysis.Info(0, 1, 1);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
@Test
public void whenCurrentListGreaterThanPrevious() {
previous.add(new Analysis.User(1, "Alex"));
previous.add(new Analysis.User(2, "Ugin"));
previous.add(new Analysis.User(3, "John"));
current.addAll(previous);
current.add(new Analysis.User(4, "Jim"));
current.add(new Analysis.User(5, "Lisa"));
current.remove(0);
current.set(0, new Analysis.User(2, "Evgen"));
Analysis.Info expected = new Analysis.Info(2, 1, 1);
assertThat(analysis.diff(previous, current).equals(expected), is(true));
}
} |
<filename>src/network_flow/Boj1298.java<gh_stars>1-10
package network_flow;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.StringTokenizer;
/**
*
* @author minchoba
* 백준 1298번: 노트북의 주인을 찾아서
*
* @see https://www.acmicpc.net/problem/1298/
*
*/
public class Boj1298 {
private static ArrayList<Integer>[] connected;
private static int[] note;
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer st = new StringTokenizer(br.readLine());
int N = Integer.parseInt(st.nextToken());
int M = Integer.parseInt(st.nextToken());
connected = new ArrayList[N];
for(int i = 0; i < N; i++) {
connected[i] = new ArrayList<>();
}
while(M-- > 0) {
st = new StringTokenizer(br.readLine());
connected[Integer.parseInt(st.nextToken()) - 1].add(Integer.parseInt(st.nextToken()) - 1);
}
System.out.println(bipartiteMatch(N));
}
private static int bipartiteMatch(int n) {
int count = 0;
note = new int[n];
Arrays.fill(note, -1);
for(int start = 0; start < n; start++) {
boolean[] visit = new boolean[n];
if(dfs(visit, start)) count++;
}
return count;
}
private static boolean dfs(boolean[] visit, int current) {
if(visit[current]) return false;
visit[current] = true;
for(int next: connected[current]) {
if(note[next] == -1 || dfs(visit, note[next])) { // 주인 없는 노트북 주인 찾아주기
note[next] = current;
return true;
}
}
return false;
}
}
|
#!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
# List of exclude tests from conformance/serial suite
if [ "${TEST_TYPE}" == "conformance-serial" ]; then
cat > "${SHARED_DIR}/excluded_tests" << EOF
"[sig-imageregistry][Serial][Suite:openshift/registry/serial] Image signature workflow can push a signed image to openshift registry and verify it [Suite:openshift/conformance/serial]"
EOF
fi
if [ "${TEST_TYPE}" != "conformance-parallel" ]; then
exit 0
fi
# List of exclude tests from conformance/parallel suite for 4.7 & 4.6
if [ "${BRANCH}" == "4.7" ] && [ "${ARCH}" == "ppc64le" ]; then
cat > "${SHARED_DIR}/excluded_tests" << EOF
"[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-apps][Feature:DeploymentConfig] deploymentconfigs when tagging images should successfully tag the deployed image [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the authorize URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the grant URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the allow all IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the bootstrap IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for when there is only one IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the logout URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the root URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token request URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] oc new-app should fail with a --name longer than 58 characters [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] oc new-app should succeed with a --name of 58 characters [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] oc new-app should succeed with an imagestream [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [Suite:openshift/conformance/parallel]"
"[sig-cli] oc debug ensure it works with image streams [Suite:openshift/conformance/parallel]"
"[sig-cli] oc observe works as expected [Suite:openshift/conformance/parallel]"
"[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageAppend] Image append should create images by appending them [Suite:openshift/conformance/parallel]"
"[sig-instrumentation] Prometheus when installed on the cluster shouldn't report any alerts in firing state apart from Watchdog and AlertmanagerReceiversNotConfigured [Early] [Suite:openshift/conformance/parallel]"
"[sig-network] Internal connectivity for TCP and UDP on ports 9000-9999 is allowed [Suite:openshift/conformance/parallel]"
"[sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] Services should be able to create a functioning NodePort service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]"
"[sig-storage] Dynamic Provisioning [k8s.io] GlusterDynamicProvisioner should create and delete persistent volumes [fast] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]"
"[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it [Suite:openshift/conformance/parallel/minimal]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]"
EOF
elif [ "${BRANCH}" == "4.7" ] && [ "${ARCH}" == "s390x" ]; then
cat > "${SHARED_DIR}/excluded_tests" << EOF
"[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the authorize URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the grant URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the allow all IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the bootstrap IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for when there is only one IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the logout URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the root URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token request URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] clone repository using git:// protocol should clone using git:// if no proxy is configured [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [Suite:openshift/conformance/parallel]"
"[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [Suite:openshift/conformance/parallel]"
"[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] CSI mock volume CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] Dynamic Provisioning [k8s.io] GlusterDynamicProvisioner should create and delete persistent volumes [fast] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it [Suite:openshift/conformance/parallel/minimal]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]"
EOF
elif [ "${BRANCH}" == "4.6" ] && [ "${ARCH}" == "ppc64le" ]; then
cat > "${SHARED_DIR}/excluded_tests" << EOF
"[sig-apps][Feature:DeploymentConfig] deploymentconfigs when tagging images should successfully tag the deployed image [Suite:openshift/conformance/parallel]"
"[sig-arch] Managed cluster should have no crashlooping pods in core namespaces over four minutes [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:LDAP] LDAP should start an OpenLDAP test server [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the authorize URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the grant URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the allow all IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the bootstrap IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for when there is only one IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the logout URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the root URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token request URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] well-known endpoint should be reachable [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build can reference a cluster service with a build being created from new-build should be able to run a build that references a cluster service [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] custom build with buildah being created from new-build should complete build with custom builder image [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [Suite:openshift/conformance/parallel]"
"[sig-cluster-lifecycle] Pods cannot access the /config/master API endpoint [Suite:openshift/conformance/parallel]"
"[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [Suite:openshift/conformance/parallel]"
"[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it [Suite:openshift/conformance/parallel/minimal]"
"[sig-network][Feature:Router] The HAProxy router should expose prometheus metrics for a route [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should override the route host for overridden domains with a custom value [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should override the route host with a custom value [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should run even if it has no access to update status [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve a route that points to two services and respect weights [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve routes that were created from an ingress [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve the correct routes when scoped to a single namespace and label set [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the grant URL [Suite:openshift/conformance/parallel]"
"[sig-apps][Feature:DeploymentConfig] deploymentconfigs with minimum ready seconds set should not transition the deployment to Complete before satisfied [Suite:openshift/conformance/parallel]"
"[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] Dynamic Provisioning [k8s.io] GlusterDynamicProvisioner should create and delete persistent volumes [fast] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] multicast when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should allow multicast traffic in namespaces where it is enabled [Suite:openshift/conformance/parallel]"
"[sig-network] multicast when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should block multicast traffic in namespaces where it is disabled [Suite:openshift/conformance/parallel]"
"[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-apps][Feature:Jobs] Users should be able to create and run a job in a user project [Suite:openshift/conformance/parallel]"
"[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-cli] oc rsh rsh specific flags should work well when access to a remote shell [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageAppend] Image append should create images by appending them [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageExtract] Image extract should extract content from an image [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageInfo] Image info should display information about images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLayers] Image layer subresource should return layers from tagged images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the Deployment gets the resolve-names annotation later [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the object has the resolve-names annotation [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLookup] Image policy should update standard Kube object image fields when local names are on [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageTriggers] Annotation trigger reconciles after the image is overwritten [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:Image] oc tag should change image reference for internal images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:Image] oc tag should preserve image reference for external images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:Image] oc tag should work when only imagestreams api is available [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should support reencrypt to services backed by a serving certificate automatically [Suite:openshift/conformance/parallel]"
EOF
elif [ "${BRANCH}" == "4.6" ] && [ "${ARCH}" == "s390x" ]; then
cat > "${SHARED_DIR}/excluded_tests" << EOF
"[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the Deployment gets the resolve-names annotation later [Suite:openshift/conformance/parallel]"
"[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-cluster-lifecycle] Pods cannot access the /config/master API endpoint [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve the correct routes when running with the haproxy config manager [Suite:openshift/conformance/parallel]"
"[sig-apps][Feature:DeploymentConfig] deploymentconfigs when tagging images should successfully tag the deployed image [Suite:openshift/conformance/parallel]"
"[sig-apps][Feature:Jobs] Users should be able to create and run a job in a user project [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:LDAP] LDAP should start an OpenLDAP test server [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the authorize URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the grant URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the allow all IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the bootstrap IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for when there is only one IDP [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the logout URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the root URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token request URL [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]"
"[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build can reference a cluster service with a build being created from new-build should be able to run a build that references a cluster service [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [Suite:openshift/conformance/parallel]"
"[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [Suite:openshift/conformance/parallel]"
"[sig-cli] oc rsh rsh specific flags should work well when access to a remote shell [Suite:openshift/conformance/parallel]"
"[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageAppend] Image append should create images by appending them [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageExtract] Image extract should extract content from an image [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageInfo] Image info should display information about images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLayers] Image layer subresource should return layers from tagged images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the object has the resolve-names annotation [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageLookup] Image policy should update standard Kube object image fields when local names are on [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:ImageTriggers] Annotation trigger reconciles after the image is overwritten [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:Image] oc tag should change image reference for internal images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:Image] oc tag should preserve image reference for external images [Suite:openshift/conformance/parallel]"
"[sig-imageregistry][Feature:Image] oc tag should work when only imagestreams api is available [Suite:openshift/conformance/parallel]"
"[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-network] multicast when using one of the plugins 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should allow multicast traffic in namespaces where it is enabled [Suite:openshift/conformance/parallel]"
"[sig-network] network isolation when using a plugin that isolates namespaces by default should allow communication from non-default to default namespace on a different node [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should expose prometheus metrics for a route [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should override the route host for overridden domains with a custom value [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should override the route host with a custom value [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should run even if it has no access to update status [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve a route that points to two services and respect weights [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve routes that were created from an ingress [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should serve the correct routes when scoped to a single namespace and label set [Suite:openshift/conformance/parallel]"
"[sig-network][Feature:Router] The HAProxy router should support reencrypt to services backed by a serving certificate automatically [Suite:openshift/conformance/parallel]"
"[sig-storage] Dynamic Provisioning [k8s.io] GlusterDynamicProvisioner should create and delete persistent volumes [fast] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]"
"[sig-storage] PersistentVolumes GCEPD should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach [Suite:openshift/conformance/parallel] [Suite:k8s]"
EOF
else
echo "Executing all tests"
fi
if [ -f "${SHARED_DIR}/excluded_tests" ]; then
echo "Skipping following tests from conformance/parallel suite..."
cat ${SHARED_DIR}/excluded_tests
fi
|
#!/bin/bash -xe
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is executed inside post_test_hook function in devstack gate.
function export_subunit_data {
target="$1"
if [ -f .testrepository/0 ]; then
sudo testr last --subunit > $WORKSPACE/testrepository.subunit.$target
fi
}
function generate_testr_results {
cat $WORKSPACE/testrepository.subunit.* | sudo tee $BASE/logs/testrepository.subunit
sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html
sudo gzip -9 $BASE/logs/testrepository.subunit
sudo gzip -9 $BASE/logs/testr_results.html
sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
}
# If we're running in the gate find our keystone endpoint to give to
# gabbi tests and do a chown. Otherwise the existing environment
# should provide URL and TOKEN.
if [ -d $BASE/new/devstack ]; then
export CEILOMETER_DIR="$BASE/new/ceilometer"
STACK_USER=stack
sudo chown -R $STACK_USER:stack $CEILOMETER_DIR
source $BASE/new/devstack/openrc admin admin
# Go to the ceilometer dir
cd $CEILOMETER_DIR
fi
openstack catalog list
export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk '/public/{print $2}')
export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}')
export HEAT_SERVICE_URL=$(openstack catalog show orchestration -c endpoints -f value | awk '/public/{print $2}')
export NOVA_SERVICE_URL=$(openstack catalog show compute -c endpoints -f value | awk '/public/{print $2}')
export GLANCE_IMAGE_NAME=$(openstack image list | awk '/ cirros.*uec /{print $4}')
export ADMIN_TOKEN=$(openstack token issue -c id -f value)
if [ -d $BASE/new/devstack ]; then
# NOTE(sileht): on swift job permissions are wrong, I don't known why
sudo chown -R tempest:stack $BASE/new/tempest
sudo chown -R tempest:stack $BASE/data/tempest
# Run tests with tempest
cd $BASE/new/tempest
set +e
sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- ceilometer.tests.tempest.scenario.test_autoscaling --concurrency=$TEMPEST_CONCURRENCY
TEMPEST_EXIT_CODE=$?
set -e
export_subunit_data "all-plugin"
if [[ $TEMPEST_EXIT_CODE != 0 ]]; then
# Collect and parse result
generate_testr_results
exit $TEMPEST_EXIT_CODE
fi
cd $CEILOMETER_DIR
fi
# Run tests with gabbi
echo "Running telemetry integration test suite"
set +e
sudo -E -H -u ${STACK_USER:-${USER}} tox -eintegration
EXIT_CODE=$?
echo "* Message queue status:"
sudo rabbitmqctl list_queues | grep -e \\.sample -e \\.info
if [ $EXIT_CODE -ne 0 ] ; then
set +x
echo "* Heat stack:"
openstack stack show integration_test
echo "* Alarm list:"
ceilometer alarm-list
echo "* Nova instance list:"
openstack server list
echo "* Gnocchi instance list:"
gnocchi resource list -t instance
for instance_id in $(openstack server list -f value -c ID); do
echo "* Nova instance detail:"
openstack server show $instance_id
echo "* Gnocchi instance detail:"
gnocchi resource show -t instance $instance_id
echo "* Gnocchi measures for instance ${instance_id}:"
gnocchi measures show -r $instance_id cpu_util
done
gnocchi status
# Be sure to source Gnocchi settings before
source $BASE/new/gnocchi/devstack/settings
echo "* Unprocessed measures:"
sudo find $GNOCCHI_DATA_DIR/measure
set -x
fi
set -e
# Collect and parse result
if [ -n "$CEILOMETER_DIR" ]; then
export_subunit_data "integration"
generate_testr_results
fi
exit $EXIT_CODE
|
/* © 2017 Goalify
* @author Thanh
*/
import { Meteor } from 'meteor/meteor';
import { ReduceStore } from 'flux/utils';
import { Songs, AppStates, Rooms, Messages } from '../collections';
import AppDispatcher from './AppDispatcher';
import * as AppActions from './AppActions';
if (Meteor.isClient) {
Meteor.subscribe('Songs.public');
Meteor.subscribe('AppStates.public');
Meteor.subscribe('Rooms.public');
Meteor.subscribe('Messages.public');
window.Songs = Songs;
window.AppStates = AppStates;
window.Rooms = Rooms;
window.Messages = Messages;
}
/**
* The Flux ReducedStore to keep the states of the whole app
*
* @example
* // to get the whole state object, avoid mutate the store object
* AppStore.getState().stateName
* // to get a root object from the state tree
* AppStore.getRootState('stateName')
*/
class AppStore extends ReduceStore {
/**
* Get state at the root property
* @param {String} stateName name of root state
* @return {any} State object
*/
getRootState(stateName) {
return this.getState()[stateName];
}
// built-in ReduceStore hook
getInitialState() {
return {
tabIndex: 0,
activeBtnPlay: false,
focusSearchBox: false,
toggleBtnNav: false,
selectedSong: null,
openPopup: false,
songName: '',
songLyric: '',
revealedSongs: [],
currentRoom: null,
toasterOpen: false,
toasterText: '',
toasterType: 'success',
toggleSearchInput: false,
isChatboxOpen: false,
};
}
selectSong(id) {
if (id) {
return Songs.findOne({ _id: id });
}
return null;
}
getSongNameAndLyric(id) {
if (id) {
const song = Songs.findOne({ _id: id });
if (song) {
return {
songName: song.name,
songLyric: song.lyric,
};
}
}
return {
songName: '',
songLyric: '',
};
}
/**
* Pure function, avoid mutate inputs
* @param {Object} state Current state object
* @param {Object} action Action payload object
* @return {Object} new state
*/
reduce(state, action) {
let reducedState;
switch (action.type) {
case AppActions.CHANGE_TAB:
reducedState = { tabIndex: action.tabIndex };
break;
case AppActions.ACTIVE_BTN_PLAY:
reducedState = { activeBtnPlay: true };
break;
case AppActions.DEACTIVE_BTN_PLAY:
reducedState = { activeBtnPlay: false };
break;
case AppActions.FOCUS_SEARCH_BOX:
reducedState = { focusSearchBox: action.isFocus };
break;
case AppActions.SELECT_SONG:
reducedState = { selectedSong: this.selectSong(action.id), activeBtnPlay: true };
break;
case AppActions.OPEN_POP_UP:
reducedState = { openPopup: true };
break;
case AppActions.CLOSE_POP_UP:
reducedState = { openPopup: false };
break;
case AppActions.UPDATE_LYRIC_POPUP:
reducedState = this.getSongNameAndLyric(action.id);
break;
case AppActions.SET_ROOM:
reducedState = { currentRoom: action.room };
break;
case AppActions.SET_TOASTER:
reducedState = {
toasterOpen: action.open,
toasterText: action.text ? action.text : state.toasterText,
toasterType: action.toasterType ? action.toasterType : state.toasterType,
};
break;
case AppActions.TOGGLE_SEARCH:
reducedState = {
toggleSearchInput: !state.toggleSearchInput,
};
break;
case AppActions.TOGGLE_CHATBOX:
reducedState = {
isChatboxOpen: !state.isChatboxOpen,
};
break;
default:
// console.log(action.type, 'does nothing');
}
// return a new object, to immitate pure function
return Object.assign({}, state, reducedState);
}
}
// This will create a singleton AppStore and register events trigger from AppDispatcher
const instance = new AppStore(AppDispatcher);
export default instance;
|
/* Copyright (c) 2015-2016 Skyward Experimental Rocketry
* Authors: <NAME>, <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CANMANAGER_H
#define CANMANAGER_H
#include <Common.h>
#include "CanBus.h"
#include "CanUtils.h"
class CanBus;
static const int8_t AF_NONE = -1;
extern CanBus *global_bus_ptr[2];
extern uint32_t global_bus_ctr;
/** CanBus Init structure */
struct canbus_init_t
{
/** CAN1, CAN2, ... */
CAN_TypeDef *can;
/** Pin Mode */
const miosix::Mode::Mode_ mode;
/** Alternate function id or AF_NONE */
const int8_t af;
/** Array of interrupts */
const std::vector<IRQn_Type> interrupts;
};
class CanManager
{
// friend class Singleton<CanManager>;
public:
/**
* @brief Adds a filter to receive Canbus messages.
*
* @param id filter ID
* @param can_id on which canbus
* @return true ok
* @return false invalid filters
*/
bool addHWFilter(uint16_t id, uint32_t can_id);
bool delHWFilter(uint16_t id, uint32_t can_id);
unsigned getNumFilters(unsigned can_id) const;
/**
* Add a new bus to the canmanager. Can add AT MOST 2 different buses
*/
template <uint32_t gpio, uint8_t rx, uint8_t tx>
void addBus(const canbus_init_t &i, CanDispatcher dispatcher)
{
typedef miosix::Gpio<gpio, rx> rport;
typedef miosix::Gpio<gpio, tx> tport;
rport::mode(i.mode);
tport::mode(i.mode);
if (i.af >= 0)
{
#ifndef _ARCH_CORTEXM3_STM32 // Only stm32f2 and stm32f4 have it
rport::alternateFunction(i.af);
tport::alternateFunction(i.af);
#endif //_ARCH_CORTEXM3_STM32
}
// TODO de-hardcode this part
{
miosix::FastInterruptDisableLock dLock;
#ifdef RCC_APB1ENR_CAN2EN
RCC->APB1ENR |= RCC_APB1ENR_CAN1EN | RCC_APB1ENR_CAN2EN;
#else
RCC->APB1ENR |= RCC_APB1ENR_CAN1EN;
#endif
RCC_SYNC();
}
for (const auto &j : i.interrupts)
{
NVIC_SetPriority(j, 15);
NVIC_EnableIRQ(j);
}
CanBus *canbus = new CanBus(i.can, this, bus.size(), dispatcher);
bus.push_back(canbus);
canbus->start();
// Used by CanInterrupt.cpp
global_bus_ptr[global_bus_ctr++] = canbus;
}
CanBus *getBus(uint32_t id);
/** Rule of 5 */
CanManager(const CanManager &) = delete;
CanManager(const CanManager &&) = delete;
CanManager &operator=(const CanManager &) = delete;
~CanManager()
{
// Disable interrupts
// TODO: Only disable the interrupts that we enabled
#ifdef CAN1_RX0_IRQn
NVIC_DisableIRQ(CAN1_RX0_IRQn);
#endif
#ifdef CAN1_RX1_IRQn
NVIC_DisableIRQ(CAN1_RX1_IRQn);
#endif
#ifdef CAN2_RX0_IRQn
NVIC_DisableIRQ(CAN2_RX0_IRQn);
#endif
#ifdef CAN2_RX1_IRQn
NVIC_DisableIRQ(CAN2_RX1_IRQn);
#endif
global_bus_ptr[0] = NULL;
global_bus_ptr[1] = NULL;
global_bus_ctr = 0;
// TODO Maybe unconfigure ports?
while (bus.size() > 0)
{
bus[bus.size() - 1]->stop(); // Stop canbus thread
delete bus[bus.size() - 1];
bus.pop_back();
}
{
miosix::FastInterruptDisableLock dLock;
#ifdef RCC_APB1ENR_CAN2EN
RCC->APB1ENR &= ~(RCC_APB1ENR_CAN1EN | RCC_APB1ENR_CAN2EN);
#else
RCC->APB1ENR &= ~RCC_APB1ENR_CAN1EN;
#endif
RCC_SYNC();
}
}
explicit CanManager(volatile CAN_TypeDef *Config) : Config(Config)
{
memset(enabled_filters, 0, sizeof(enabled_filters));
}
// sizeof(id) = 11 bit
static constexpr int filter_max_id_log2 = 11;
static constexpr int filter_max_id = (1 << filter_max_id_log2);
// 32 bit = 2 filters * 16 bit
static constexpr int filterbank_size_bit = 32;
static constexpr int filters_per_bank = 2;
static constexpr int filters_per_row = 4;
static constexpr int filter_size_bit = // 16
filterbank_size_bit / filters_per_bank;
// registers per bank: 2, FR1, FR2
static constexpr int registers_per_bank = 2;
// TODO check this formula --v
static constexpr int separation_bit = // 2
filters_per_row / registers_per_bank - 1;
// 16 bit - 11 bit = 5 bit
static constexpr uint32_t filter_id_shift =
filter_size_bit - filter_max_id_log2;
static constexpr uint32_t filter_null = 0xffff;
static constexpr int max_chan_filters = 14 * filters_per_row;
// TODO 2 == number of can buses
static constexpr int max_glob_filters = 2 * max_chan_filters;
private:
std::map<uint16_t, uint8_t> filters[2];
std::vector<CanBus *> bus;
// TODO change "2" with number of available CAN buses
uint16_t enabled_filters[2];
volatile CAN_TypeDef *const Config;
};
//#define sCanManager CanManager::getInstance()
#endif /* CANMANAGER_H */
|
import sys
option = 0 if len(sys.argv) > 1 and sys.argv[1] == "new" else 1
out_file = sys.argv[1] if len(sys.argv) == 2 else "performance.txt"
TEST_SIZE = 5 |
#!/bin/bash
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
jq --version > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Please Install 'jq' https://stedolan.github.io/jq/ to execute this script"
echo
exit 1
fi
starttime=$(date +%s)
# Print the usage message
function printHelp () {
echo "Usage: "
echo " ./testAPIs.sh -l golang|node"
echo " -l <language> - chaincode language (defaults to \"golang\")"
}
# Language defaults to "golang"
LANGUAGE="golang"
# Parse commandline args
while getopts "h?l:" opt; do
case "$opt" in
h|\?)
printHelp
exit 0
;;
l) LANGUAGE=$OPTARG
;;
esac
done
##set chaincode path
function setChaincodePath(){
LANGUAGE=`echo "$LANGUAGE" | tr '[:upper:]' '[:lower:]'`
case "$LANGUAGE" in
"golang")
CC_SRC_PATH="github.com/example_cc/go"
;;
"node")
CC_SRC_PATH="$PWD/artifacts/src/github.com/example_cc/node"
;;
*) printf "\n ------ Language $LANGUAGE is not supported yet ------\n"$
exit 1
esac
}
setChaincodePath
echo "POST request Enroll on Org1 ..."
echo
ORG1_TOKEN=$(curl -s -X POST \
http://localhost:4000/users \
-H "content-type: application/x-www-form-urlencoded" \
-d 'username=Jim&orgName=Org1')
echo $ORG1_TOKEN
ORG1_TOKEN=$(echo $ORG1_TOKEN | jq ".token" | sed "s/\"//g")
echo
echo "ORG1 token is $ORG1_TOKEN"
echo
echo "POST request Enroll on Org2 ..."
echo
ORG2_TOKEN=$(curl -s -X POST \
http://localhost:4000/users \
-H "content-type: application/x-www-form-urlencoded" \
-d 'username=Barry&orgName=Org2')
echo $ORG2_TOKEN
ORG2_TOKEN=$(echo $ORG2_TOKEN | jq ".token" | sed "s/\"//g")
echo
echo "ORG2 token is $ORG2_TOKEN"
echo
echo
echo "POST request Create channel ..."
echo
curl -s -X POST \
http://localhost:4000/channels \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json" \
-d '{
"channelName":"mychannel",
"channelConfigPath":"../artifacts/channel/mychannel.tx"
}'
echo
echo
sleep 5
echo "POST request Join channel on Org1"
echo
curl -s -X POST \
http://localhost:4000/channels/mychannel/peers \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json" \
-d '{
"peers": ["peer0.org1.example.com","peer1.org1.example.com"]
}'
echo
echo
echo "POST request Join channel on Org2"
echo
curl -s -X POST \
http://localhost:4000/channels/mychannel/peers \
-H "authorization: Bearer $ORG2_TOKEN" \
-H "content-type: application/json" \
-d '{
"peers": ["peer0.org2.example.com","peer1.org2.example.com"]
}'
echo
echo
echo "POST Install chaincode on Org1"
echo
curl -s -X POST \
http://localhost:4000/chaincodes \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json" \
-d "{
\"peers\": [\"peer0.org1.example.com\",\"peer1.org1.example.com\"],
\"chaincodeName\":\"mycc\",
\"chaincodePath\":\"$CC_SRC_PATH\",
\"chaincodeType\": \"$LANGUAGE\",
\"chaincodeVersion\":\"v0\"
}"
echo
echo
echo "POST Install chaincode on Org2"
echo
curl -s -X POST \
http://localhost:4000/chaincodes \
-H "authorization: Bearer $ORG2_TOKEN" \
-H "content-type: application/json" \
-d "{
\"peers\": [\"peer0.org2.example.com\",\"peer1.org2.example.com\"],
\"chaincodeName\":\"mycc\",
\"chaincodePath\":\"$CC_SRC_PATH\",
\"chaincodeType\": \"$LANGUAGE\",
\"chaincodeVersion\":\"v0\"
}"
echo
echo
echo "POST instantiate chaincode on peer1 of Org1"
echo
curl -s -X POST \
http://localhost:4000/channels/mychannel/chaincodes \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json" \
-d "{
\"chaincodeName\":\"mycc\",
\"chaincodeVersion\":\"v0\",
\"chaincodeType\": \"$LANGUAGE\",
\"args\":[\"a\",\"100\",\"b\",\"200\"]
}"
echo
echo
echo "POST invoke chaincode on peers of Org1"
echo
TRX_ID=$(curl -s -X POST \
http://localhost:4000/channels/mychannel/chaincodes/mycc \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json" \
-d '{
"peers": ["peer0.org1.example.com","peer1.org1.example.com"],
"fcn":"put",
"args":["a"]
}')
echo "Transacton ID is $TRX_ID"
echo
echo
echo "GET query chaincode on peer1 of Org1"
echo
curl -s -X GET \
"http://localhost:4000/channels/mychannel/chaincodes/mycc?peer=peer0.org1.example.com&fcn=get&args=%5B%22a%22%5D" \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
exit
echo "GET query Block by blockNumber"
echo
curl -s -X GET \
"http://localhost:4000/channels/mychannel/blocks/1?peer=peer0.org1.example.com" \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
echo "GET query Transaction by TransactionID"
echo
curl -s -X GET http://localhost:4000/channels/mychannel/transactions/$TRX_ID?peer=peer0.org1.example.com \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
############################################################################
### TODO: What to pass to fetch the Block information
############################################################################
#echo "GET query Block by Hash"
#echo
#hash=????
#curl -s -X GET \
# "http://localhost:4000/channels/mychannel/blocks?hash=$hash&peer=peer1" \
# -H "authorization: Bearer $ORG1_TOKEN" \
# -H "cache-control: no-cache" \
# -H "content-type: application/json" \
# -H "x-access-token: $ORG1_TOKEN"
#echo
#echo
echo "GET query ChainInfo"
echo
curl -s -X GET \
"http://localhost:4000/channels/mychannel?peer=peer0.org1.example.com" \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
echo "GET query Installed chaincodes"
echo
curl -s -X GET \
"http://localhost:4000/chaincodes?peer=peer0.org1.example.com" \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
echo "GET query Instantiated chaincodes"
echo
curl -s -X GET \
"http://localhost:4000/channels/mychannel/chaincodes?peer=peer0.org1.example.com" \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
echo "GET query Channels"
echo
curl -s -X GET \
"http://localhost:4000/channels?peer=peer0.org1.example.com" \
-H "authorization: Bearer $ORG1_TOKEN" \
-H "content-type: application/json"
echo
echo
echo "Total execution time : $(($(date +%s)-starttime)) secs ..."
|
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh"
trap os::test::junit::reconcile_output EXIT
# Cleanup cluster resources created by this test
(
set +e
oc delete all,templates,secrets,pods,jobs --all
oc delete image v1-image
oc delete group patch-group
oc delete project test-project-admin
exit 0
) &>/dev/null
function escape_regex() {
sed 's/[]\.|$(){}?+*^]/\\&/g' <<< "$*"
}
project="$( oc project -q )"
os::test::junit::declare_suite_start "cmd/basicresources"
# This test validates basic resource retrieval and command interaction
os::test::junit::declare_suite_start "cmd/basicresources/versionreporting"
# Test to make sure that we're reporting the correct version information from endpoints and the correct
# User-Agent information from our clients regardless of which resources they're trying to access
os::build::version::get_vars
os_git_regex="$( escape_regex "${OS_GIT_VERSION%%-*}" )"
kube_git_regex="$( escape_regex "${KUBE_GIT_VERSION%%-*}" )"
etcd_version="$(echo "${ETCD_GIT_VERSION}" | sed -E "s/\-.*//g" | sed -E "s/v//")"
etcd_git_regex="$( escape_regex "${etcd_version%%-*}" )"
os::cmd::expect_success_and_text 'oc version' "oc ${os_git_regex}"
os::cmd::expect_success_and_text 'oc version' "kubernetes ${kube_git_regex}"
os::cmd::expect_success_and_text 'oc version' "features: Basic-Auth"
os::cmd::expect_success_and_text 'openshift version' "openshift ${os_git_regex}"
os::cmd::expect_success_and_text "curl -k '${API_SCHEME}://${API_HOST}:${API_PORT}/version'" "${kube_git_regex}"
os::cmd::expect_success_and_not_text "curl -k '${API_SCHEME}://${API_HOST}:${API_PORT}/version'" "${OS_GIT_COMMIT}"
# variants I know I have to worry about
# 1. oc (kube and openshift resources)
# 2. oc adm (kube and openshift resources)
# example User-Agent: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d
os::cmd::expect_success_and_text 'oc get pods --loglevel=7 2>&1 | grep -A4 "pods" | grep User-Agent' "oc/${kube_git_regex} .* kubernetes/"
# example User-Agent: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d
os::cmd::expect_success_and_text 'oc get dc --loglevel=7 2>&1 | grep -A4 "deploymentconfig" | grep User-Agent' "oc/${kube_git_regex} .* kubernetes/"
# example User-Agent: oc/v1.1.3 (linux/amd64) openshift/b348c2f
os::cmd::expect_success_and_text 'oc adm policy who-can get pods --loglevel=7 2>&1 | grep -A4 "localresourceaccessreviews" | grep User-Agent' "oc/${kube_git_regex} .* kubernetes/"
echo "version reporting: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/status"
os::cmd::expect_success_and_text 'oc status -h' 'oc describe buildConfig'
os::cmd::expect_success_and_text 'oc status' 'oc new-app'
echo "status help output: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/explain"
os::cmd::expect_failure_and_text 'oc get' 'oc api-resources'
os::cmd::expect_success_and_text 'oc get all --loglevel=6' 'buildconfigs'
os::cmd::expect_success_and_text 'oc explain pods' 'Pod is a collection of containers that can run on a host'
os::cmd::expect_success_and_text 'oc explain pods.spec' 'SecurityContext holds pod-level security attributes'
# TODO unbreak explain
#os::cmd::expect_success_and_text 'oc explain deploymentconfig' 'a desired deployment state'
#os::cmd::expect_success_and_text 'oc explain deploymentconfig.spec' 'ensures that this deployment config will have zero replicas'
echo "explain: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/resource-builder"
# Test resource builder filtering of files with expected extensions inside directories, and individual files without expected extensions
os::cmd::expect_success 'oc create -f test/testdata/resource-builder/directory -f test/testdata/resource-builder/json-no-extension -f test/testdata/resource-builder/yml-no-extension'
# Explicitly specified extensionless files
os::cmd::expect_success 'oc get secret json-no-extension yml-no-extension'
# Scanned files with extensions inside directories
os::cmd::expect_success 'oc get secret json-with-extension yml-with-extension'
# Ensure extensionless files inside directories are not processed by resource-builder
os::cmd::expect_failure_and_text 'oc get secret json-no-extension-in-directory' 'not found'
echo "resource-builder: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/pods"
os::cmd::expect_success 'oc get pods --match-server-version'
os::cmd::expect_success_and_text 'oc create -f examples/hello-openshift/hello-pod.json' 'pod/hello-openshift created'
os::cmd::expect_success 'oc describe pod hello-openshift'
os::cmd::expect_success 'oc delete pods hello-openshift --grace-period=0 --force'
echo "pods: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/label"
os::cmd::expect_success_and_text 'oc create -f examples/hello-openshift/hello-pod.json -o name' 'pod/hello-openshift'
os::cmd::try_until_success 'oc label pod/hello-openshift acustom=label' # can race against scheduling and status updates
os::cmd::expect_success_and_text 'oc describe pod/hello-openshift' 'acustom=label'
os::cmd::try_until_success 'oc annotate pod/hello-openshift foo=bar' # can race against scheduling and status updates
os::cmd::expect_success_and_text 'oc get -o yaml pod/hello-openshift' 'foo: bar'
os::cmd::expect_failure_and_not_text 'oc annotate pod hello-openshift description="test" --resource-version=123' 'may only be used with a single resource'
os::cmd::expect_failure_and_text 'oc annotate pod hello-openshift hello-openshift description="test" --resource-version=123' 'may only be used with a single resource'
os::cmd::expect_success 'oc delete pods -l acustom=label --grace-period=0 --force'
os::cmd::expect_failure 'oc get pod/hello-openshift'
# show-labels should work for projects
os::cmd::expect_success "oc label namespace '${project}' foo=bar"
os::cmd::expect_success_and_text "oc get project '${project}' --show-labels" "foo=bar"
echo "label: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/services"
os::cmd::expect_success 'oc get services'
os::cmd::expect_success 'oc create -f test/integration/testdata/test-service.json'
os::cmd::expect_success 'oc delete services frontend'
# TODO: reenable with a permission check
# os::cmd::expect_failure_and_text 'oc create -f test/integration/testdata/test-service-with-finalizer.json' "finalizers are disabled"
echo "services: ok"
os::test::junit::declare_suite_end
# TODO rewrite the yaml for this test to actually work
os::test::junit::declare_suite_start "cmd/basicresources/list-version-conversion"
os::cmd::expect_success 'oc create -f test/testdata/mixed-api-versions.yaml'
os::cmd::expect_success 'oc get -f test/testdata/mixed-api-versions.yaml -o yaml'
os::cmd::expect_success 'oc label -f test/testdata/mixed-api-versions.yaml mylabel=a'
os::cmd::expect_success 'oc annotate -f test/testdata/mixed-api-versions.yaml myannotation=b'
# Make sure all six resources, with different API versions, got labeled and annotated
os::cmd::expect_success_and_text 'oc get -f test/testdata/mixed-api-versions.yaml --output=jsonpath="{..metadata.labels.mylabel}"' '^a a a a$'
os::cmd::expect_success_and_text 'oc get -f test/testdata/mixed-api-versions.yaml --output=jsonpath="{..metadata.annotations.myannotation}"' '^b b b b$'
os::cmd::expect_success 'oc delete -f test/testdata/mixed-api-versions.yaml'
echo "list version conversion: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/nodes"
os::cmd::expect_success 'oc get nodes'
(
# subshell so we can unset kubeconfig
cfg="${KUBECONFIG}"
unset KUBECONFIG
os::cmd::expect_success "kubectl get nodes --kubeconfig='${cfg}'"
)
echo "nodes: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/create"
os::cmd::expect_success 'oc create dc my-nginx --image=nginx'
os::cmd::expect_success 'oc delete dc my-nginx'
os::cmd::expect_success 'oc create clusterquota limit-bob --project-label-selector=openshift.io/requester=user-bob --hard=pods=10'
os::cmd::expect_success 'oc delete clusterquota/limit-bob'
echo "create subcommands: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/statefulsets"
os::cmd::expect_success 'oc create -f test/testdata/statefulset.yaml'
os::cmd::try_until_success 'oc get pods testapp-0'
os::cmd::expect_success_and_text 'oc describe statefulset testapp' 'app=testapp'
os::cmd::expect_success 'oc delete -f test/testdata/statefulset.yaml'
echo "statefulsets: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/setprobe"
# Validate the probe command
arg="-f examples/hello-openshift/hello-pod.json"
os::cmd::expect_failure_and_text "oc set probe" "error: one or more resources"
os::cmd::expect_failure_and_text "oc set probe ${arg}" "error: you must specify one of --readiness, --liveness or both"
os::cmd::expect_failure_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1/path" "port must be specified as part of a url"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness" 'livenessProbe: \{\}'
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --initial-delay-seconds=10" "livenessProbe:"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --initial-delay-seconds=10" "initialDelaySeconds: 10"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness -- echo test" "livenessProbe:"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --readiness -- echo test" "readinessProbe:"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness -- echo test" "exec:"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness -- echo test" "\- echo"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness -- echo test" "\- test"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --open-tcp=3306" "tcpSocket:"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --open-tcp=3306" "port: 3306"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --open-tcp=port" "port: port"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1:port/path" "port: port"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1:8080/path" "port: 8080"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1:port/path" "path: /path"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1:port/path" "scheme: HTTPS"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=http://127.0.0.1:port/path" "scheme: HTTP"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1:port/path" "host: 127.0.0.1"
os::cmd::expect_success_and_text "oc set probe ${arg} --local -o yaml --liveness --get-url=https://127.0.0.1:port/path" "port: port"
os::cmd::expect_success "oc create -f test/integration/testdata/test-deployment-config.yaml"
os::cmd::expect_failure_and_text "oc set probe dc/test-deployment-config --liveness" "Required value: must specify a handler type"
os::cmd::expect_success_and_text "oc set probe dc test-deployment-config --liveness --open-tcp=8080" "updated"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liveness --open-tcp=8080 --v=1" "was not changed"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "livenessProbe:"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liveness --initial-delay-seconds=10" "updated"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "initialDelaySeconds: 10"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liveness --initial-delay-seconds=20" "updated"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "initialDelaySeconds: 20"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liveness --failure-threshold=2" "updated"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "initialDelaySeconds: 20"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "failureThreshold: 2"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --readiness --success-threshold=4 -- echo test" "updated"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "initialDelaySeconds: 20"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "successThreshold: 4"
os::cmd::expect_success_and_text "oc set probe dc test-deployment-config --liveness --period-seconds=5" "updated"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "periodSeconds: 5"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liveness --timeout-seconds=6" "updated"
os::cmd::expect_success_and_text "oc get dc/test-deployment-config -o yaml" "timeoutSeconds: 6"
os::cmd::expect_success_and_text "oc set probe dc --all --liveness --timeout-seconds=7" "updated"
os::cmd::expect_success_and_text "oc get dc -o yaml" "timeoutSeconds: 7"
os::cmd::expect_success_and_text "oc set probe dc/test-deployment-config --liveness --remove" "updated"
os::cmd::expect_success_and_not_text "oc get dc/test-deployment-config -o yaml" "livenessProbe"
os::cmd::expect_success "oc delete dc/test-deployment-config"
echo "set probe: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/setenv"
os::cmd::expect_success "oc create -f test/integration/testdata/test-deployment-config.yaml"
os::cmd::expect_success "oc create -f test/integration/testdata/test-buildcli.json"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config FOO=1st" "updated"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config FOO=2nd" "updated"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config FOO=bar --overwrite" "updated"
os::cmd::expect_failure_and_text "oc set env dc/test-deployment-config FOO=zee --overwrite=false" "already has a value"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config --list" "FOO=bar"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config FOO-" "updated"
os::cmd::expect_success_and_text "oc set env bc --all FOO=bar" "updated"
os::cmd::expect_success_and_text "oc set env bc --all --list" "FOO=bar"
os::cmd::expect_success_and_text "oc set env bc --all FOO-" "updated"
os::cmd::expect_success "oc create secret generic mysecret --from-literal='foo.bar=secret'"
os::cmd::expect_success_and_text "oc set env --from=secret/mysecret --prefix=PREFIX_ dc/test-deployment-config" "updated"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config --list" "PREFIX_FOO_BAR from secret mysecret, key foo.bar"
os::cmd::expect_success_and_text "oc set env dc/test-deployment-config --list --resolve" "PREFIX_FOO_BAR=secret"
os::cmd::expect_success "oc delete secret mysecret"
os::cmd::expect_failure_and_text "oc set env dc/test-deployment-config --list --resolve" "error retrieving reference for PREFIX_FOO_BAR"
# switch to view user to ensure view-only users can't get secrets through env var resolution
new="$(mktemp -d)/tempconfig"
os::cmd::expect_success "oc config view --raw > $new"
export KUBECONFIG=$new
project=$(oc project -q)
os::cmd::expect_success 'oc policy add-role-to-user view view-user'
os::cmd::expect_success 'oc login -u view-user -p anything'
os::cmd::try_until_success 'oc project ${project}'
os::cmd::expect_failure_and_text "oc set env dc/test-deployment-config --list --resolve" "cannot get secrets in the namespace"
oc login -u system:admin
# clean up
os::cmd::expect_success "oc delete dc/test-deployment-config"
os::cmd::expect_success "oc delete bc/ruby-sample-build-validtag"
echo "set env: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/basicresources/expose"
# Expose service as a route
os::cmd::expect_success 'oc create -f test/integration/testdata/test-service.json'
os::cmd::expect_failure 'oc expose service frontend --create-external-load-balancer'
os::cmd::expect_failure 'oc expose service frontend --port=40 --type=NodePort'
os::cmd::expect_success 'oc expose service frontend --path=/test'
os::cmd::expect_success_and_text "oc get route.v1.route.openshift.io frontend --template='{{.spec.path}}'" "/test"
os::cmd::expect_success_and_text "oc get route.v1.route.openshift.io frontend --template='{{.spec.to.name}}'" "frontend" # routes to correct service
os::cmd::expect_success_and_text "oc get route.v1.route.openshift.io frontend --template='{{.spec.port.targetPort}}'" ""
os::cmd::expect_success 'oc delete svc,route -l name=frontend'
# Test that external services are exposable
os::cmd::expect_success 'oc create -f test/testdata/external-service.yaml'
os::cmd::expect_success 'oc expose svc/external'
os::cmd::expect_success_and_text 'oc get route external' 'external'
os::cmd::expect_success 'oc delete route external'
os::cmd::expect_success 'oc delete svc external'
# Expose multiport service and verify we set a port in the route
os::cmd::expect_success 'oc create -f test/testdata/multiport-service.yaml'
os::cmd::expect_success 'oc expose svc/frontend --name route-with-set-port'
os::cmd::expect_success_and_text "oc get route route-with-set-port --template='{{.spec.port.targetPort}}'" "web"
echo "expose: ok"
os::test::junit::declare_suite_end
# Test OAuth access token describer
os::cmd::expect_success 'oc create -f test/testdata/oauthaccesstoken.yaml'
os::cmd::expect_success_and_text "oc describe oauthaccesstoken DYGZDLucARCPIfUeKPhsgPfn0WBLR_9KdeREH0c9iod" "DYGZDLucARCPIfUeKPhsgPfn0WBLR_9KdeREH0c9iod"
echo "OAuth descriptor: ok"
os::cmd::expect_success 'oc delete all --all'
os::test::junit::declare_suite_start "cmd/basicresources/projectadmin"
# switch to test user to be sure that default project admin policy works properly
temp_config="$(mktemp -d)/tempconfig"
os::cmd::expect_success "oc config view --raw > '${temp_config}'"
export KUBECONFIG="${temp_config}"
os::cmd::expect_success 'oc policy add-role-to-user admin project-admin'
os::cmd::expect_success 'oc login -u project-admin -p anything'
os::cmd::expect_success 'oc new-project test-project-admin'
os::cmd::try_until_success "oc project test-project-admin"
os::cmd::expect_success 'oc run --image=openshift/hello-openshift test'
os::cmd::expect_success 'oc run --image=openshift/hello-openshift --generator=run-controller/v1 test2'
os::cmd::expect_success 'oc run --image=openshift/hello-openshift --restart=Never test3'
os::cmd::expect_success 'oc run --image=openshift/hello-openshift --generator=job/v1 --restart=Never test4'
os::cmd::expect_success 'oc delete dc/test rc/test2 pod/test3 job/test4'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name' 'deploymentconfig.apps.openshift.io/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --restart=Always' 'deploymentconfig.apps.openshift.io/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --restart=Never' 'pod/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --generator=job/v1' 'job.batch/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --generator=deploymentconfig/v1' 'deploymentconfig.apps.openshift.io/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --generator=run-controller/v1' 'replicationcontroller/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --generator=run/v1' 'replicationcontroller/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --generator=run-pod/v1' 'pod/foo'
os::cmd::expect_success_and_text 'oc run --dry-run foo --image=bar -o name --generator=deployment/v1beta1' 'deployment.extensions/foo'
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-stibuild.json -l name=mytemplate | oc create -f -'
os::cmd::expect_success 'oc delete all -l name=mytemplate'
os::cmd::expect_success 'oc new-app https://github.com/openshift/ruby-hello-world'
os::cmd::expect_success 'oc get dc/ruby-hello-world'
os::cmd::expect_success_and_text "oc get dc/ruby-hello-world --template='{{ .spec.replicas }}'" '1'
patch='{"spec": {"replicas": 2}}'
os::cmd::expect_success "oc patch dc/ruby-hello-world -p '${patch}'"
os::cmd::expect_success_and_text "oc get dc/ruby-hello-world --template='{{ .spec.replicas }}'" '2'
os::cmd::expect_success 'oc delete all -l app=ruby-hello-world'
os::cmd::expect_failure 'oc get dc/ruby-hello-world'
echo "delete all: ok"
os::test::junit::declare_suite_end
# service accounts should not be allowed to request new projects
os::cmd::expect_failure_and_text "oc new-project --token='$( oc sa get-token builder )' will-fail" 'Error from server \(Forbidden\): You may not request a new project via this API.'
os::test::junit::declare_suite_start "cmd/basicresources/patch"
# Validate patching works correctly
os::cmd::expect_success 'oc login -u system:admin'
group_json='{"kind":"Group","apiVersion":"v1","metadata":{"name":"patch-group"}}'
os::cmd::expect_success "echo '${group_json}' | oc create -f -"
os::cmd::expect_success "oc patch group patch-group -p 'users: [\"myuser\"]' --loglevel=8"
os::cmd::expect_success_and_text 'oc get group patch-group -o yaml' 'myuser'
os::cmd::expect_success "oc patch group patch-group -p 'users: []' --loglevel=8"
# applying the same patch twice results in exit code 0, and "not patched" text
os::cmd::expect_success_and_text "oc patch group patch-group -p 'users: []'" "not patched"
# applying an invalid patch results in exit code 1 and an error
os::cmd::expect_failure_and_text "oc patch group patch-group -p 'users: \"\"'" "cannot restore slice from string"
os::cmd::expect_success_and_text 'oc get group patch-group -o yaml' 'users: \[\]'
echo "patch: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_end
|
#!/usr/bin/env bash
installType='yum -y install'
removeType='yum -y remove'
upgrade="yum -y update"
echoType='echo -e'
cp=`which cp`
# 打印
echoColor(){
case $1 in
# 红色
"red")
${echoType} "\033[31m$2 \033[0m"
;;
# 天蓝色
"skyBlue")
${echoType} "\033[36m$2 \033[0m"
;;
# 绿色
"green")
${echoType} "\033[32m$2 \033[0m"
;;
# 白色
"white")
${echoType} "\033[37m$2 \033[0m"
;;
"magenta")
${echoType} "\033[31m$2 \033[0m"
;;
"skyBlue")
${echoType} "\033[36m$2 \033[0m"
;;
# 黄色
"yellow")
${echoType} "\033[33m$2 \033[0m"
;;
esac
}
# 选择系统执行工具
checkTools(){
if [[ ! -z `find /etc -name "redhat-release"` ]] || [[ ! -z `cat /proc/version | grep -i "centos" | grep -v grep ` ]] || [[ ! -z `cat /proc/version | grep -i "red hat" | grep -v grep ` ]] || [[ ! -z `cat /proc/version | grep -i "redhat" | grep -v grep ` ]]
then
release="centos"
installType='yum -y install'
removeType='yum -y remove'
upgrade="yum update -y"
elif [[ ! -z `cat /etc/issue | grep -i "debian" | grep -v grep` ]] || [[ ! -z `cat /proc/version | grep -i "debian" | grep -v grep` ]]
then
release="debian"
installType='apt -y install'
upgrade="apt update -y"
removeType='apt -y autoremove'
elif [[ ! -z `cat /etc/issue | grep -i "ubuntu" | grep -v grep` ]] || [[ ! -z `cat /proc/version | grep -i "ubuntu" | grep -v grep` ]]
then
release="ubuntu"
installType='apt -y install'
upgrade="apt update -y"
removeType='apt --purge remove'
fi
if [[ -z ${release} ]]
then
echoContent red "本脚本不支持此系统,请将下方日志反馈给开发者"
cat /etc/issue
cat /proc/version
exit 0;
fi
}
# 安装依赖
installDepends(){
echoColor yellow "更新"
${upgrade}
if [[ -z `find /usr/bin/ -executable -name "socat"` ]]
then
echoColor yellow "\nsocat未安装,安装中\n"
${installType} socat >/dev/null
echoColor green "socat安装完毕"
fi
echoColor yellow "\n检测是否安装Nginx"
if [[ -z `find /sbin/ -executable -name 'nginx'` ]]
then
echoColor yellow "nginx未安装,安装中\n"
${installType} nginx >/dev/null
echoColor green "nginx安装完毕"
else
echoColor green "nginx已安装\n"
fi
echoColor yellow "检测是否安装acme.sh"
if [[ -z `find ~/.acme.sh/ -name "acme.sh"` ]]
then
echoColor yellow "\nacme.sh未安装,安装中\n"
curl -s https://get.acme.sh | sh >/dev/null
echoColor green "acme.sh安装完毕\n"
else
echoColor green "acme.sh已安装\n"
fi
}
# 恢复配置
resetNginxConfig(){
`cp -Rrf /tmp/cooper-q/nginx/nginx.conf /etc/nginx/nginx.conf`
rm -rf /etc/nginx/conf.d/6GFV1ES52V2.conf
echoColor green "\n恢复配置完毕"
}
# 备份
bakConfig(){
mkdir -p /tmp/cooper-q/nginx
`cp -Rrf /etc/nginx/nginx.conf /tmp/cooper-q/nginx/nginx.conf`
}
# 安装证书
installTLS(){
echoColor yellow "请输入域名【例:blog.mengxc.info】:"
read domain
if [[ -z ${domain} ]]
then
echoColor red "域名未填写\n"
installTLS
fi
# 备份
bakConfig
# 替换原始文件中的域名
if [[ ! -z `cat /etc/nginx/nginx.conf|grep -v grep|grep "${domain}"` ]]
then
sed -i "s/${domain}/@@@6GFV1ES52V2@@@/g" `grep "${domain}" -rl /etc/nginx/nginx.conf`
fi
touch /etc/nginx/conf.d/6GFV1ES52V2.conf
echo "server {listen 80;server_name ${domain};root /usr/share/nginx/html;location ~ /.well-known {allow all;}location /test {return 200 '6GFV1ES52V2';}}" > /etc/nginx/conf.d/6GFV1ES52V2.conf
nginxStatus=1;
if [[ ! -z `ps -ef|grep -v grep|grep nginx` ]]
then
nginxStatus=2;
ps -ef|grep -v grep|grep nginx|awk '{print $2}'|xargs kill -9
sleep 0.5
nginx
else
nginx
fi
echoColor yellow "\n验证域名以及服务器是否可用"
if [[ ! -z `curl -s ${domain}/test|grep 6GFV1ES52V2` ]]
then
ps -ef|grep -v grep|grep nginx|awk '{print $2}'|xargs kill -9
sleep 0.5
echoColor green "服务可用,生成TLS中,请等待\n"
else
echoColor red "服务不可用请检测dns配置是否正确"
# 恢复备份
resetNginxConfig
exit 0;
fi
sudo ~/.acme.sh/acme.sh --issue -d ${domain} --standalone -k ec-256 >/dev/null
~/.acme.sh/acme.sh --installcert -d ${domain} --fullchainpath /tmp/cooper-q/nginx/${domain}.crt --keypath /tmp/cooper-q/nginx/${domain}.key --ecc >/dev/null
if [[ -z `cat /tmp/cooper-q/nginx/${domain}.key` ]]
then
echoColor red "证书key生成失败,请重新运行"
resetNginxConfig
exit
elif [[ -z `cat /tmp/cooper-q/nginx/${domain}.crt` ]]
then
echoColor red "证书crt生成失败,请重新运行"
resetNginxConfig
exit
fi
echoColor green "证书生成成功"
echoColor green "证书目录/tmp/cooper-q/nginx"
ls /tmp/cooper-q/nginx
resetNginxConfig
if [[ ${nginxStatus} = 2 ]]
then
nginx
fi
}
init(){
echoColor red "\n=============================="
echoColor yellow "此脚本注意事项"
echoColor green " 1.会安装依赖所需依赖"
echoColor green " 2.会把Nginx配置文件备份"
echoColor green " 3.会安装Nginx、acme.sh,如果已安装则使用已经存在的"
echoColor green " 4.恢复备份"
echoColor green " 5.执行期间请不要重启机器"
echoColor green " 6.备份文件和证书文件都在/tmp下面,请注意留存"
echoColor green " 7.如果多次执行则将上次生成备份和生成的证书强制覆盖"
echoColor green " 8.证书默认ec-256"
echoColor green " 9.下个版本会加入通配符证书生成[todo]"
echoColor green " 10.可以生成多个不同域名的证书[包含子域名],具体速率请查看[https://letsencrypt.org/zh-cn/docs/rate-limits/]"
echoColor green " 11.兼容Centos、Ubuntu、Debian"
echoColor green " 12.Github[https://github.com/cooper-q]"
echoColor red "=============================="
echoColor yellow "请输入[y]执行脚本,[任意]结束:"
read isExecStatus
if [[ ${isExecStatus} = "y" ]]
then
installDepends
installTLS
else
echoColor green "欢迎下次使用"
exit
fi
}
checkTools
init
|
<reponame>vvydier/misk-web<gh_stars>10-100
import * as React from "react"
import { Row } from "../Table"
export const Rows = (props: { data: any; range: number[] }) => {
const { data, range } = props
return (
<tbody>
{data.slice(...range).map((row: any, index: number) => (
<Row key={`row${index}`} data={row} index={range[0] + index} />
))}
</tbody>
)
}
|
package container_runtime
import (
"github.com/docker/docker/api/types"
"github.com/flant/werf/pkg/image"
)
type BuildOptions struct {
IntrospectBeforeError bool
IntrospectAfterError bool
}
type ImageInterface interface {
Name() string
SetName(name string)
Pull() error
Untag() error
// TODO: build specifics for stapel builder and dockerfile builder
// TODO: should be under a single separate interface
Container() Container
BuilderContainer() BuilderContainer
DockerfileImageBuilder() *DockerfileImageBuilder
Build(BuildOptions) error
GetBuiltId() string
TagBuiltImage(name string) error
Export(name string) error
Introspect() error
SetInspect(inspect *types.ImageInspect)
IsExistsLocally() bool
SetStageDescription(stage *image.StageDescription)
GetStageDescription() *image.StageDescription
}
type Container interface {
Name() string
UserRunCommands() []string
UserCommitChanges() []string
AddServiceRunCommands(commands ...string)
AddRunCommands(commands ...string)
RunOptions() ContainerOptions
CommitChangeOptions() ContainerOptions
ServiceCommitChangeOptions() ContainerOptions
}
type BuilderContainer interface {
AddServiceRunCommands(commands ...string)
AddRunCommands(commands ...string)
AddVolume(volumes ...string)
AddVolumeFrom(volumesFrom ...string)
AddExpose(exposes ...string)
AddEnv(envs map[string]string)
AddLabel(labels map[string]string)
}
type ContainerOptions interface {
AddVolume(volumes ...string)
AddVolumeFrom(volumesFrom ...string)
AddExpose(exposes ...string)
AddEnv(envs map[string]string)
AddLabel(labels map[string]string)
AddCmd(cmd string)
AddWorkdir(workdir string)
AddUser(user string)
AddEntrypoint(entrypoint string)
AddHealthCheck(check string)
}
|
package simulation
import (
"math/rand"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
"github.com/cosmos/cosmos-sdk/x/simulation"
"github.com/tendermint/farming/app/params"
"github.com/tendermint/farming/x/farming/keeper"
"github.com/tendermint/farming/x/farming/types"
)
// Simulation operation weights constants
const (
OpWeightMsgCreateFixedAmountPlan = "op_weight_msg_create_fixed_amount_plan"
OpWeightMsgCreateRatioPlan = "op_weight_msg_create_ratio_plan"
OpWeightMsgStake = "op_weight_msg_stake"
OpWeightMsgUnstake = "op_weight_msg_unstake"
OpWeightMsgClaim = "op_weight_msg_claim"
)
// WeightedOperations returns all the operations from the module with their respective weights
func WeightedOperations(
appParams simtypes.AppParams, cdc codec.JSONCodec, ak types.AccountKeeper,
bk types.BankKeeper, k keeper.Keeper,
) simulation.WeightedOperations {
var weightMsgCreateFixedAmountPlan int
appParams.GetOrGenerate(cdc, OpWeightMsgCreateFixedAmountPlan, &weightMsgCreateFixedAmountPlan, nil,
func(_ *rand.Rand) {
weightMsgCreateFixedAmountPlan = params.DefaultWeightMsgCreateFixedAmountPlan
},
)
var weightMsgCreateRatioPlan int
appParams.GetOrGenerate(cdc, OpWeightMsgCreateRatioPlan, &weightMsgCreateRatioPlan, nil,
func(_ *rand.Rand) {
weightMsgCreateRatioPlan = params.DefaultWeightMsgCreateRatioPlan
},
)
var weightMsgStake int
appParams.GetOrGenerate(cdc, OpWeightMsgStake, &weightMsgStake, nil,
func(_ *rand.Rand) {
weightMsgStake = params.DefaultWeightMsgStake
},
)
var weightMsgUnstake int
appParams.GetOrGenerate(cdc, OpWeightMsgUnstake, &weightMsgUnstake, nil,
func(_ *rand.Rand) {
weightMsgUnstake = params.DefaultWeightMsgUnstake
},
)
var weightMsgClaim int
appParams.GetOrGenerate(cdc, OpWeightMsgClaim, &weightMsgClaim, nil,
func(_ *rand.Rand) {
weightMsgClaim = params.DefaultWeightMsgHarvest
},
)
return simulation.WeightedOperations{
simulation.NewWeightedOperation(
weightMsgCreateFixedAmountPlan,
SimulateMsgCreateFixedAmountPlan(ak, bk, k),
),
simulation.NewWeightedOperation(
weightMsgCreateRatioPlan,
SimulateMsgCreateRatioPlan(ak, bk, k),
),
simulation.NewWeightedOperation(
weightMsgStake,
SimulateMsgStake(ak, bk, k),
),
simulation.NewWeightedOperation(
weightMsgUnstake,
SimulateMsgUnstake(ak, bk, k),
),
simulation.NewWeightedOperation(
weightMsgClaim,
SimulateMsgClaim(ak, bk, k),
),
}
}
// SimulateMsgCreateFixedAmountPlan generates a MsgCreateFixedAmountPlan with random values
// nolint: interfacer
func SimulateMsgCreateFixedAmountPlan(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Keeper) simtypes.Operation {
return func(
r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string,
) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {
// TODO: not implemented yet
return simtypes.OperationMsg{}, nil, nil
}
}
// SimulateMsgCreateRatioPlan generates a MsgCreateRatioPlan with random values
// nolint: interfacer
func SimulateMsgCreateRatioPlan(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Keeper) simtypes.Operation {
return func(
r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string,
) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {
// TODO: not implemented yet
return simtypes.OperationMsg{}, nil, nil
}
}
// SimulateMsgStake generates a MsgCreateFixedAmountPlan with random values
// nolint: interfacer
func SimulateMsgStake(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Keeper) simtypes.Operation {
return func(
r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string,
) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {
// TODO: not implemented yet
return simtypes.OperationMsg{}, nil, nil
}
}
// SimulateMsgUnstake generates a SimulateMsgUnstake with random values
// nolint: interfacer
func SimulateMsgUnstake(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Keeper) simtypes.Operation {
return func(
r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string,
) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {
// TODO: not implemented yet
return simtypes.OperationMsg{}, nil, nil
}
}
// SimulateMsgClaim generates a MsgClaim with random values
// nolint: interfacer
func SimulateMsgClaim(ak types.AccountKeeper, bk types.BankKeeper, k keeper.Keeper) simtypes.Operation {
return func(
r *rand.Rand, app *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string,
) (simtypes.OperationMsg, []simtypes.FutureOperation, error) {
// TODO: not implemented yet
return simtypes.OperationMsg{}, nil, nil
}
}
|
public class WhileLoopExample {
public static void main(String[] args) {
int i = 0;
while (i <= 10) {
System.out.print(i + " ");
i++;
}
}
} |
#!/bin/sh
prog=svcasc2Abcd_test.m
depends="svcasc2Abcd_test.m test_common.m butter2pq.m pq2svcasc.m \
pq2blockKWopt.m KW.m optKW2.m optKW.m svcasc2Abcd.m Abcd2tf.m"
tmp=/tmp/$$
here=`pwd`
if [ $? -ne 0 ]; then echo "Failed pwd"; exit 1; fi
fail()
{
echo FAILED ${0#$here"/"} $prog 1>&2
cd $here
rm -rf $tmp
exit 1
}
pass()
{
echo PASSED ${0#$here"/"} $prog
cd $here
rm -rf $tmp
exit 0
}
trap "fail" 1 2 3 15
mkdir $tmp
if [ $? -ne 0 ]; then echo "Failed mkdir"; exit 1; fi
for file in $depends;do \
cp -R src/$file $tmp; \
if [ $? -ne 0 ]; then echo "Failed cp "$file; fail; fi \
done
cd $tmp
if [ $? -ne 0 ]; then echo "Failed cd"; fail; fi
#
# the output should look like this
#
cat > test.ok << 'EOF'
EOF
if [ $? -ne 0 ]; then echo "Failed output cat"; fail; fi
#
# run and see if the results match
#
echo "Running $prog"
octave --no-gui -q $prog >test.out 2>&1
if [ $? -ne 0 ]; then echo "Failed running $prog"; fail; fi
diff -Bb test.ok test.out
if [ $? -ne 0 ]; then echo "Failed diff -Bb"; fail; fi
#
# this much worked
#
pass
|
#!/bin/bash
Green_font_prefix="\033[32m" && Red_font_prefix="\033[31m" && Green_background_prefix="\033[42;37m" && Red_background_prefix="\033[41;37m" && Font_color_suffix="\033[0m"
Info="${Green_font_prefix}[Installed]${Font_color_suffix}"
Error="${Red_font_prefix}[Not Installed]${Font_color_suffix}"
cek=$(netstat -ntlp | grep 10000 | awk '{print $7}' | cut -d'/' -f2)
function install () {
IP=$(wget -qO- ifconfig.co);
echo " Adding Repositori Webmin"
sh -c 'echo "deb http://download.webmin.com/download/repository sarge contrib" > /etc/apt/sources.list.d/webmin.list'
apt install gnupg gnupg1 gnupg2 -y
wget http://www.webmin.com/jcameron-key.asc
apt-key add jcameron-key.asc
echo " Start Install Webmin"
clear
sleep 0.5
apt update > /dev/null 2>&1
apt install webmin -y
sed -i 's/ssl=1/ssl=0/g' /etc/webmin/miniserv.conf
/etc/init.d/webmin restart
rm -f /root/jcameron-key.asc
clear
echo ""
echo " Done Install Webmin"
echo " $IP:10000"
}
function restart () {
echo " Restarting Webmin"
sleep 0.5
service webmin restart > /dev/null 2>&1
echo " Start Uninstall Webmin"
clear
echo ""
echo " Done Restart Webmin"
}
function uninstall () {
echo " Removing Repositori Webmin"
rm -f /etc/apt/sources.list.d/webmin.list
apt update > /dev/null 2>&1
echo " Start Uninstall Webmin"
clear
sleep 0.5
apt autoremove --purge webmin -y > /dev/null 2>&1
clear
echo ""
echo " Done Uninstall Webmin"
}
if [[ "$cek" = "perl" ]]; then
sts="${Info}"
else
sts="${Error}"
fi
clear
echo -e " =============================="
echo -e " Webmin Menu "
echo -e " =============================="
echo -e " Status $sts"
echo -e " 1. Install Webmin"
echo -e " 2. Restart Webmin"
echo -e " 3. Uninstall Webmin"
echo -e " Press CTRL+C to return"
read -rp " Please Enter The Correct Number : " -e num
if [[ "$num" = "1" ]]; then
install
elif [[ "$num" = "2" ]]; then
restart
elif [[ "$num" = "3" ]]; then
uninstall
else
clear
echo " You Entered The Wrong Number"
menu
fi
|
//==============================================================================
// WIT
//
// Based On:
//==============================================================================
// Constrained Materials Management and Production Planning Tool
//
// (C) Copyright IBM Corp. 1993, 2020 All Rights Reserved
//==============================================================================
#ifndef MsgArgH
#define MsgArgH
//------------------------------------------------------------------------------
// Header file: "MsgArg.h"
//
// Contains the declaration of class MsgArg.
//------------------------------------------------------------------------------
#include <Util.h>
//------------------------------------------------------------------------------
class MclArg;
class MclFacility;
class MclLevel;
class MclMsgFrag;
//------------------------------------------------------------------------------
// Class MsgArg
//
// Wrapper for class MclArg.
//
// Implemented in MsgFac.C.
//------------------------------------------------------------------------------
class WitMsgArg
{
public:
//------------------------------------------------------------------------
// Constructor functions.
//------------------------------------------------------------------------
WitMsgArg ();
WitMsgArg (bool rawArg);
WitMsgArg (int rawArg);
WitMsgArg (size_t rawArg);
WitMsgArg (long rawArg);
WitMsgArg (double rawArg);
WitMsgArg (const WitISRealArg & rawArg);
WitMsgArg (const char * rawArg);
WitMsgArg (const WitString & rawArg);
WitMsgArg (const WitMsgFrag & rawArg);
WitMsgArg (const int * rawArg, int length = 1);
WitMsgArg (const float * rawArg, int length = 1);
WitMsgArg (const double * rawArg, int length = 1);
WitMsgArg (const WitVector <bool> & rawArg);
WitMsgArg (const WitVector <int> & rawArg);
WitMsgArg (const WitVector <double> & rawArg);
//------------------------------------------------------------------------
// Destructor function.
//------------------------------------------------------------------------
~WitMsgArg ();
//------------------------------------------------------------------------
// Other public member functions.
//------------------------------------------------------------------------
void operator = (const WitMsgArg &);
inline bool hasArg () const
{
return (myMclArgPtr_ != NULL);
}
noCopyCtor (WitMsgArg);
//
// Public, but not implemented.
// Making the copy ctor public avoids a syntax error from the
// MS Visual C++ compiler with the -Za flag.
//------------------------------------------------------------------------
// Data access functions.
//------------------------------------------------------------------------
inline const MclArg & myMclArg () const
{
witAssert (hasArg ());
return * myMclArgPtr_;
}
private:
//------------------------------------------------------------------------
// Private member data.
//------------------------------------------------------------------------
MclArg * myMclArgPtr_;
//
// Pointer to the MclArg wrapped by this MsgArg, if any;
// otherwise NULL.
};
#endif
|
package org.insightcentre.nlp.saffron.topic.topicsim;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class TermSimilarityApplication {
/**
* The entry point of application.
*
* @param args the input arguments
*/
public static void main(String[] args) {
SpringApplication.run(TermSimilarityApplication.class, args);
}
}
|
package com.school.domain.entities;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
@Getter
@NoArgsConstructor
@AllArgsConstructor
public class Student {
private String name;
private Integer id;
private String address;
}
|
#!/bin/bash
#
# Takes a list of tweet-IDs
# - Extracts the tweets using https://github.com/docnow/twarc
# - Extract image-URLs from the tweets
# - Downloads the images
# - Generates a collage using the images with links back to the tweets
#
# The format of the tweet-ID-file is a list of tweetIDs (numbers), one per line
#
# Requirements:
# - An installed twarc and a Twitter API key (see the twarc GitHub readme)
# - jq (sudo apt install jq)
#
# TODO: Consider adding user.screen_name as metadata
###############################################################################
# CONFIG
###############################################################################
pushd ${BASH_SOURCE%/*} > /dev/null
: ${TWARC:="/usr/local/bin/twarc"} # Also tries default path
: ${IMAGE_BUCKET_SIZE:=20000}
: ${MAX_IMAGES:=99999999999}
: ${THREADS:=3}
: ${TIMEOUT:=60}
: ${TEMPLATE:="$(pwd)/demo_twitter.template.html"}
: ${ALREADY_HYDRATED:=false}
: ${AGGRESSIVE_TWITTER_SKIP:=false} # true = skip when there are existing structures
: ${DOWNLOAD_CACHE:=""} # Will default to collagename_downloads
: ${BACKGROUND:="000000"}
: ${RAW_W:=2}
: ${RAW_H:=2}
: ${ALLOW_UPSCALE:=true}
: ${JUXTA_HOME:="$(pwd)"}
popd > /dev/null
export JUXTA_HOME
################################################################################
# FUNCTIONS
################################################################################
usage() {
echo "./demo_twitter.sh tweet-ID-list [collage_name]"
exit $1
}
parse_arguments() {
TWEETIDS="$1"
if [[ ! -s "$TWEETIDS" ]]; then
>&2 echo "Error: No tweet-ID-list at '$TWEETIDS'"
usage 1
fi
DEST="$2"
if [[ "." == ".$DEST" ]]; then
DEST=$(basename "$TWEETIDS") # foo.json.gz
DEST="${DEST%.*}" # foo.json
DEST="twitter_${DEST%.*}" # foo
echo "No collage name specified, using $DEST"
fi
if [[ "." == .$(which jq) ]]; then
>&2 echo "Error: jq not available. Install with 'sudo apt-get install jq'"
exit 9
fi
: ${DOWNLOAD:="$DOWNLOAD_CACHE"}
: ${DOWNLOAD:="${DEST}_downloads"}
}
# Output: HYDRATED
hydrate() {
export HYDRATED="$DOWNLOAD/hydrated.json.gz"
if [[ "." != .$( grep -m 1 '{' "$TWEETIDS" ) ]]; then
echo "Input file $TWEETIDS contains a '{', so it is probably already hydrated"
ALREADY_HYDRATED=true
fi
if [[ -s "$DOWNLOAD/hydrated.json" ]]; then
echo " - Skipping hydration of '$TWEETIDS' as $DOWNLOAD/hydrated.json already exists"
export HYDRATED="$DOWNLOAD/hydrated.json"
return
elif [[ -s "$DOWNLOAD/hydrated.json.gz" ]]; then
echo " - Skipping hydration of '$TWEETIDS' as $DOWNLOAD/hydrated.json.gz already exists"
return
fi
if [ "true" == "$ALREADY_HYDRATED" ]; then
if [[ "$TWEETIDS" == *.gz ]]; then
echo "Input file $TWEETIDS is already hydrated. Copying to $DOWNLOAD/hydrated.json.gz"
cp $TWEETIDS $DOWNLOAD/hydrated.json.gz
else
echo "Input file $TWEETIDS is already hydrated. GZIPping to $DOWNLOAD/hydrated.json.gz"
gzip -c $TWEETIDS > $DOWNLOAD/hydrated.json.gz
fi
return
fi
if [ ! -x "$TWARC" ]; then
TWARC=$(which twarc)
if [ ! -x "$TWARC" ]; then
>&2 echo "Unable to locate twarc executable (tried $TWARC)"
>&2 echo "Please state the folder using environment variables, such as"
>&2 echo "TWARC=/home/myself/bin/twarc ./demo_twitter.sh mytweetIDs.dat mytweets"
exit 3
fi
fi
echo " - Hydration of '$TWEETIDS' to $DOWNLOAD/hydrated.json.gz"
$TWARC hydrate "$TWEETIDS" | gzip > "$DOWNLOAD/hydrated.json"
}
extract_image_data() {
if [ -s "$DOWNLOAD/date-id-imageURL.dat" ]; then
echo " - Skipping extraction of date, ID and imageURL as $DOWNLOAD/date-id-imageURL.dat already exists"
return
fi
echo " - Extracting date, ID and imageURL to $DOWNLOAD/date-id-imageURL.dat"
# TODO: Better handling of errors than throwing them away
zcat "$HYDRATED" | jq --indent 0 -r 'if (.entities .media[] .type) == "photo" then [.id_str,.created_at,.entities .media[] .media_url_https // .entities .media[] .media_url] else empty end' > "$DOWNLOAD/date-id-imageURL.dat" 2>/dev/null
# TODO: $DOWNLOAD/hydrated.json -> $DOWNLOAD/date-id-imageURL.dat
}
# 1 [786532479343599600,"Thu Oct 13 11:42:10 +0000 2016","https://pbs.twimg.com/media/CupTGBlWcAA-yzz.jpg"]
download_image() {
local LINE="$@"
local IFS=$' '
local TOKENS=($LINE)
local COUNT=${TOKENS[0]}
unset IFS
LINE=${LINE#*\[}
# 786532479343599600,"Thu Oct 13 11:42:10 +0000 2016","https://pbs.twimg.com/media/CupTGBlWcAA-yzz.jpg"]
IFS=,
local TOKENS=($LINE)
local ID=${TOKENS[0]}
local ID=$( echo $ID | tr -d '"' )
local DATE_STR=${TOKENS[1]}
local TDATE=$( date -d $DATE_STR +"%Y-%m-%dT%H:%M:%S" )
unset IFS
local LINE=${LINE#*,}
local LINE=${LINE#*,}
local IMAGE_URL=${LINE%?}
local IMAGE_NAME=$(echo "$IMAGE_URL" | sed -e 's/^[a-zA-Z]*:\/\///' -e 's/[^-A-Za-z0-9_.]/_/g')
local BUCKET=$((COUNT / IMAGE_BUCKET_SIZE * IMAGE_BUCKET_SIZE ))
mkdir -p "$DOWNLOAD/images/$BUCKET"
local IDEST="$DOWNLOAD/images/$BUCKET/$IMAGE_NAME"
if [ ! -s "$IDEST" ]; then
curl -s -m $TIMEOUT "$IMAGE_URL" > "$IDEST"
fi
if [ -s "$IDEST" ]; then
echo "$COUNT/$MAX $TDATE $ID $IDEST"
else
>&2 echo "Unable to download $IMAGE_URL"
fi
}
export -f download_image
download_images() {
if [ -s "$DOWNLOAD/counter-max-date-id-imagePath.dat" ]; then
if [[ "true" == "$AGGRESSIVE_TWITTER_SKIP" ]]; then
echo " - $DOWNLOAD/counter-max-date-id-imagePath.dat already exists and AGGRESSIVE_TWITTER_SKIP==treu. Skipping image download"
return
else
echo " - $DOWNLOAD/counter-max-date-id-imagePath.dat already exists, but all images might not be there"
fi
fi
echo " - Downloading images defined in $DOWNLOAD/date-id-imageURL.dat"
# Create job list
local MAX=`cat "$DOWNLOAD/date-id-imageURL.dat" | wc -l`
if [ "$MAX_IMAGES" -lt "$MAX" ]; then
MAX=$MAX_IMAGES
fi
local ITMP=`mktemp /tmp/juxta_demo_twitter_XXXXXXXX`
local COUNTER=1
IFS=$'\n'
while read LINE; do
if [ $COUNTER -gt $MAX ]; then
break
fi
echo "$COUNTER $LINE" >> $ITMP
COUNTER=$(( COUNTER + 1 ))
done < "$DOWNLOAD/date-id-imageURL.dat"
# Run download jobs threaded
export MAX
export IMAGE_BUCKET_SIZE
export DOWNLOAD
export TIMEOUT
#cat $ITMP | tr '\n' '\0' | xargs -0 -P $THREADS -n 1 -I {} bash -c 'echo "{}"'
cat $ITMP | tr '\n' '\0' | xargs -0 -P $THREADS -n 1 -I {} bash -c 'download_image "{}"' | tee "$DOWNLOAD/counter-max-date-id-imagePath.dat"
rm $ITMP
}
prepare_juxta_input() {
if [[ "true" == "$AGGRESSIVE_TWITTER_SKIP" && -s "$DOWNLOAD/twitter_images.dat" ]]; then
echo " - Skipping sorting and preparing juxta image list $DOWNLOAD/twitter_images.dat as it already exists AGGRESSIVE_TWITTER_SKIP=true"
return
fi
echo " - Sorting and preparing juxta image list $DOWNLOAD/twitter_images.dat"
cat "$DOWNLOAD/counter-max-date-id-imagePath.dat" | sed -e 's/^[0-9\/]* //' -e 's/^\([^ ][^ ]*\) \([0-9][0-9]*\) \([^ ][^ ]*\)$/\3|\2 \1/' > "$DOWNLOAD/twitter_images.dat"
}
###############################################################################
# CODE
###############################################################################
parse_arguments "$@"
mkdir -p "$DOWNLOAD"
hydrate
extract_image_data
download_images
prepare_juxta_input
export TEMPLATE
export RAW_W
export RAW_H
export THREADS
AGGRESSIVE_META_SKIP=$AGGRESSIVE_TWITTER_SKIP SKIP_IMAGE_VERIFICATION=$AGGRESSIVE_TWITTER_SKIP AGGRESSIVE_IMAGE_SKIP=$AGGRESSIVE_TWITTER_SKIP INCLUDE_ORIGIN=false . ${JUXTA_HOME}/juxta.sh "$DOWNLOAD/twitter_images.dat" "$DEST"
|
#!/bin/bash
#Version 3
#Hecho por José Arizaga
#Formato e indentaciones por José Arizaga
#Este script despliega los logs de intentos de sesion (exitosos o fallidos)
let opc=10
while [ $opc != 0 ]
do
clear
echo "Menu de Logs"
echo "1- Ver historico de usuarios logeados"
echo "2- Ver intentos de log fallidos"
echo "3- Ver usuarios logeados actualemente"
echo "0- Salir"
read opc
case $opc in
1)
cat /root/logs.correctos| grep -v "still logged in" | grep -v "reboot" | more
echo "Filtros:"
echo "1- Por nombre de usuario"
echo "2- Por fechas"
read opc2
case $opc2 in
1)
echo -n "Ingrese nombre de usuario: "
read usr
cat /root/logs.correctos-F | head -n -1 | grep -v "still logged in" | grep -v "reboot" | grep -w $usr | more
;;
2)
#A continuacion se prepara el last y se lo manda a un archivo para su mejor manejo
#Se cambian todos los espacios por ";" y se borran los ";" repetidos
#Se eliminan los usuarios ya logueados y los reinicios de sistema para solo poder visualizar el historico, solo se manejan
#fechas de entrada y no de salida
#Este archivo (aux) solo existira mientras el script se este ejecutando
cat /root/logs.correctos-F| head -n -1 | grep -v "still logged in" | grep -v "reboot" | tr ' \t' ";" | tr -s ";" > aux
echo -n "Ingrese fecha inicial (formato MM-DD-AAAA): "
read fechaIni
echo -n "Ingrese fecha final (formato MM-DD-AAAA): "
read fechaFin
#UTS por Unix Timestamp
UTSIni=$(date -d$fechaIni +%s)
UTSFin=$(date -d$fechaFin +%s)
while IFS= read -r linea
do
echo "$linea" > aux2
dia=$(cut -d";" -f6 < aux2)
mes=$(cut -d";" -f5 < aux2)
anio=$(cut -d";" -f8 < aux2)
rm aux2
fechaCompleta=($mes-$dia-$anio)
fechaLineaUTS=$(date -d$fechaCompleta +%s)
if [ $fechaLineaUTS -ge $UTSIni ] && [ $fechaLineaUTS -le $UTSFin ]
then
echo "$linea" >> logsAux
fi
done < aux
cat logsAux | tr ";" ' \t' | more
rm logsAux
rm aux
;;
esac
;;
2)
lastb
echo "Filtros:"
echo "1- Por nombre de usuario"
echo "2- Por fechas"
read opc3
case $opc3 in
1)
echo -n "Ingrese nombre de usuario: "
read usr
cat /root/logs.fallidos| head -n -1 | grep -w "$usr" | more
;;
2)
#El codigo es casi el mismo para lastb
cat /root/logs.fallidos| head -n -1 | tr ' \t' ";" | tr -s ";" > aux
echo -n "Ingrese fecha inicial (formato MM-DD-AAAA): "
read fechaIni
echo -n "Ingrese fecha final (formato MM-DD-AAAA): "
read fechaFin
#UTS por Unix Timestamp
UTSIni=$(date -d$fechaIni +%s)
UTSFin=$(date -d$fechaFin +%s)
while IFS= read -r linea
do
echo "$linea" > aux2
dia=$(cut -d";" -f6 < aux2)
mes=$(cut -d";" -f5 < aux2)
anio=$(cut -d";" -f8 < aux2)
rm aux2
fechaCompleta=($mes-$dia-$anio)
fechaLineaUTS=$(date -d$fechaCompleta +%s)
if [ $fechaLineaUTS -ge $UTSIni ] && [ $fechaLineaUTS -le $UTSFin ]
then
echo "$linea" >> logsAux
fi
done < aux
#Se devuelve a un formato mas "human-readable"
cat logsAux | tr ";" ' \t' | more
rm logsAux
rm aux
;;
esac
;;
3)
who
echo ""
echo "Presione cualquier tecla para salir"
read -n1
;;
0)
let opc=0
clear
;;
*)
echo "Esa opción no es valida"
;;
esac
done |
#!/bin/bash
echo "Restoring node..."
cp etc/moximo/scripts/moximo-setup.sh /etc/moximo/scripts/.
systemctl stop kube-apiserver.service kube-controller-manager.service kube-proxy.service kube-scheduler.service kubelet.service
systemctl disable kube-apiserver.service kube-controller-manager.service kube-proxy.service kube-scheduler.service kubelet.service
systemctl stop moximo-master
systemctl disable moximo-master
cd /etc/sysconfig/network-scripts/
cp ifcfg-eth0:0.FIRSTBOOT ifcfg-eth0\:0
rm /etc/moximo/.node
rm /etc/moximo/.master
touch /etc/moximo/.firstboot
ifup eth0:0
hostnamectl set-hostname --static moximo-virgin
echo "Done.."
|
var _cl_layer_support_tests_8cpp =
[
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#ac71500cd7f2194b59ae1173c90d292d8", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#aaa616ce0e224c6321469548c54561030", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#a30825c7d0a3c280ef4e3800e71a36f6a", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#aee9113eb19d3e48fc73e9eb0b3ce061a", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#aa021aef2e8677449b53dfa2b76d740bb", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#a1958e012978fd4c40cb90f0781eafa7a", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#a98703852b189f0391713194bb5e3be8e", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#a3811203ce021d04386a4db36b50fa014", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#a1488e25b30fb7538946047af3452ddfc", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#aa5bc535b3261f905caf521328174a0fa", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#a38b9f4cacc3ff6bdfe820cef9a2dc5d2", null ],
[ "BOOST_FIXTURE_TEST_CASE", "_cl_layer_support_tests_8cpp.xhtml#ac13048f7a82db6e53fe4602238f8c65a", null ]
]; |
from setuptools import Extension
from setuptools.command.build_ext import build_ext as build_ext_orig
from distutils.file_util import copy_file
class CMakeExtension(Extension):
def __init__(self, name, sources=None, build_directory='', build_options=None):
"""
Initialize the CMakeExtension.
:param name: Name of the extension
:param sources: List of source files for the extension
:param build_directory: Directory where the CMake build will take place
:param build_options: Additional options to be passed to CMake during the build process
"""
Extension.__init__(self, name, sources=sources or [])
self.build_directory = build_directory
self.build_options = build_options or []
class build_ext(build_ext_orig):
def run(self):
"""
Override the run method to build the CMakeExtension using CMake.
"""
for extension in self.extensions:
if isinstance(extension, CMakeExtension):
self.build_cmake_extension(extension)
else:
super().run()
def build_cmake_extension(self, extension):
"""
Build the CMakeExtension using CMake.
:param extension: CMakeExtension instance to be built
"""
import os
import subprocess
cwd = os.getcwd()
build_temp = os.path.abspath(self.build_temp)
ext_fullpath = self.get_ext_fullpath(extension.name)
extdir = os.path.dirname(ext_fullpath)
if not os.path.exists(build_temp):
os.makedirs(build_temp)
cmake_args = [
'cmake',
extension.build_directory,
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
]
cmake_args.extend(extension.build_options)
env = os.environ.copy()
env['CXX'] = self.compiler_cxx
env['CC'] = self.compiler_so
try:
self.spawn(cmake_args, env=env, cwd=build_temp)
self.spawn(['cmake', '--build', '.', '--target', extension.name], env=env, cwd=build_temp)
except subprocess.CalledProcessError as e:
raise DistutilsPlatformError("Error building C extension: " + str(e)) |
<reponame>payhawk/travelperk-integration<filename>src/store/PgStore.ts
import * as fs from 'fs';
import * as moment from 'moment';
import { Pool } from 'pg';
import { ILogger } from '@utils';
import { SCHEMA } from './Config';
import { IInvoicesSyncHistoryItemRecord, INewUserTokenSetRecord, ISchemaStore, IUserTokenSetRecord, PayhawkApiKeyRecordKeys, SyncHistoryItemRecordKeys, UserTokenSetRecordKeys } from './contracts';
export class PgStore implements ISchemaStore {
constructor(private readonly pgClient: Pool, private readonly logger: ILogger) {
}
async getAllTokenSets(): Promise<IUserTokenSetRecord[]> {
const result = await this.pgClient.query<IUserTokenSetRecord>({
text: `SELECT * FROM "${SCHEMA.TABLE_NAMES.ACCESS_TOKENS}"`,
});
return result.rows;
}
async saveAccessToken({ account_id, token_set }: INewUserTokenSetRecord): Promise<void> {
await this.pgClient.query({
text: `
INSERT INTO "${SCHEMA.TABLE_NAMES.ACCESS_TOKENS}" (
"${UserTokenSetRecordKeys.account_id}",
"${UserTokenSetRecordKeys.token_set}"
)
VALUES ($1, $2)
ON CONFLICT ("${UserTokenSetRecordKeys.account_id}")
DO
UPDATE SET
"${UserTokenSetRecordKeys.token_set}" = $2,
"${UserTokenSetRecordKeys.updated_at}" = now();
`,
values: [
account_id,
token_set,
],
});
}
async getAccessToken(accountId: string): Promise<IUserTokenSetRecord | undefined> {
const query = await this.pgClient.query<IUserTokenSetRecord>({
text: `
SELECT * FROM "${SCHEMA.TABLE_NAMES.ACCESS_TOKENS}"
WHERE "${UserTokenSetRecordKeys.account_id}"=$1
`,
values: [
accountId,
],
});
const record = query.rows[0];
return record;
}
async deleteAccessToken(accountId: string): Promise<void> {
await this.pgClient.query<IUserTokenSetRecord>({
text: `
DELETE FROM "${SCHEMA.TABLE_NAMES.ACCESS_TOKENS}"
WHERE "${UserTokenSetRecordKeys.account_id}"=$1
`,
values: [
accountId,
],
});
}
async getApiKey(accountId: string): Promise<string | undefined> {
const query = await this.pgClient.query<{ key: string }>({
text: `
SELECT "${PayhawkApiKeyRecordKeys.key}" FROM "${SCHEMA.TABLE_NAMES.PAYHAWK_API_KEYS}"
WHERE "${PayhawkApiKeyRecordKeys.account_id}" = $1
`,
values: [accountId],
});
if (query.rows.length > 0) {
return query.rows[0].key;
} else {
return undefined;
}
}
async setApiKey(accountId: string, key: string): Promise<void> {
await this.pgClient.query<{ payhawk_api_key: string }>({
text: `
INSERT INTO "${SCHEMA.TABLE_NAMES.PAYHAWK_API_KEYS}" ("${PayhawkApiKeyRecordKeys.account_id}", "${PayhawkApiKeyRecordKeys.key}")
VALUES ($1, $2)
ON CONFLICT ("${PayhawkApiKeyRecordKeys.account_id}")
DO
UPDATE SET "${PayhawkApiKeyRecordKeys.key}" = $2, "${PayhawkApiKeyRecordKeys.updated_at}" = NOW()
`,
values: [accountId, key],
});
}
async getLastSyncDate(accountId: string): Promise<Date | undefined> {
const query = await this.pgClient.query<Pick<IInvoicesSyncHistoryItemRecord, 'last_sync_at'>>({
text: `
SELECT "${SyncHistoryItemRecordKeys.last_sync_at}" FROM "${SCHEMA.TABLE_NAMES.INVOICES_SYNC_HISTORY}"
WHERE "${PayhawkApiKeyRecordKeys.account_id}" = $1
`,
values: [accountId],
});
if (query.rows.length > 0) {
return moment.utc(query.rows[0].last_sync_at).toDate();
}
return undefined;
}
async updateLastSyncDate(accountId: string, lastSyncAt: Date): Promise<void> {
await this.pgClient.query({
text: `
INSERT INTO "${SCHEMA.TABLE_NAMES.INVOICES_SYNC_HISTORY}"
("${SyncHistoryItemRecordKeys.account_id}", "${SyncHistoryItemRecordKeys.last_sync_at}")
VALUES ($1, $2)
ON CONFLICT ("${SyncHistoryItemRecordKeys.account_id}")
DO
UPDATE SET "${SyncHistoryItemRecordKeys.last_sync_at}" = $2
`,
values: [accountId, lastSyncAt],
});
}
async initSchema(): Promise<void> {
await this.pgClient.query(await readSchemaInitScript());
}
async ensureSchemaVersion(): Promise<void> {
await this.applyMigration();
}
private async applyMigration(): Promise<void> {
await this.applyDatabaseMigration();
}
private async applyDatabaseMigration(): Promise<void> {
const fileName = `migration.sql`;
if (!scriptExists(fileName)) {
this.logger.info('Database migration skipped. No script');
return;
}
try {
this.logger.info('Database migration started');
const scriptText = await readScript(fileName);
await this.pgClient.query(scriptText);
this.logger.info('Database migration finished');
} catch (err) {
const error = Error(`Database migration script failed: ${err instanceof Error ? err.toString() : JSON.stringify(err)}`);
this.logger.error(error);
}
}
}
const readSchemaInitScript = async (): Promise<string> => {
return readScript('init.schema.sql');
};
const readScript = async (name: string): Promise<string> => {
return await new Promise<string>((resolve, reject) => {
fs.readFile(getPathFullName(name), 'utf8', (err, data) => {
err ? reject(err) : resolve(data);
});
});
};
const scriptExists = (name: string): boolean => {
return fs.existsSync(getPathFullName(name));
};
const getPathFullName = (fileName: string): string => {
return `${process.cwd()}/assets/${fileName}`;
};
|
!#/bin/bash
mkdir -v -p dist/web-${BUILD_ENV}
godot -v --export "HTML5" dist/web-${BUILD_ENV}/index.html |
package de.ids_mannheim.korap.user;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import lombok.Getter;
import lombok.Setter;
@Getter
@Setter
public class KorAPUser extends User {
private static Logger jlog = LogManager.getLogger(KorAPUser.class);
private static final long serialVersionUID = -7108308497625884584L;
//fixme: accountlink to shibboleth account
private String accountLink;
private String password;
private String URIFragment;
private Long URIExpiration;
public KorAPUser (String username) {
super(username, 0);
this.URIFragment = "";
this.URIExpiration = 0L;
}
public KorAPUser (Integer id, String username) {
this(username);
this.setId(id);
}
public KorAPUser () {
super();
}
@Override
protected User clone () {
KorAPUser user = new KorAPUser(this.getUsername());
user.setUsername(this.getUsername());
user.setAccountCreation(this.getAccountCreation());
return user;
}
@Override
public int hashCode () {
int result = super.hashCode();
result = 31 * result + (jlog != null ? jlog.hashCode() : 0);
result = 31 * result + (password != null ? password.hashCode() : 0);
result = 31 * result
+ (URIFragment != null ? URIFragment.hashCode() : 0);
result = 31 * result
+ (URIExpiration != null ? URIExpiration.hashCode() : 0);
return result;
}
@Override
public boolean equals (Object o) {
if (this == o)
return true;
if (!(o instanceof KorAPUser))
return false;
if (!super.equals(o))
return false;
KorAPUser korAPUser = (KorAPUser) o;
if (URIExpiration != korAPUser.URIExpiration)
return false;
if (URIFragment != null ? !URIFragment.equals(korAPUser.URIFragment)
: korAPUser.URIFragment != null)
return false;
return true;
}
}
|
const adder = (initial = 0) => ({
value: initial,
steps: [initial],
add(value) {
this.steps.push(value);
this.value += value;
return this;
}
});
const Adder = class {
constructor(initial = 0) {
this.value = initial;
this.steps = [initial];
return this;
}
add(value) {
this.value += value;
this.steps.push(value);
return this;
}
};
//Usage
{
const { value, steps } = adder(5).add(8).add(-3);
console.log('Total sum is ' + value);
const [a, b, c] = steps;
console.log('Steps of operations ' + a, b, c);
}
{
const { value, steps } = new Adder(5).add(8).add(-3);
console.log('Value ' + value);
console.log('Steps ' + steps);
const obj = new Adder(3).add(5);
console.log('Object obj.value ' + obj.value);
console.log('Object obj.steps ' + obj.steps);
console.log('Object obj.add ' + obj.add);
}
|
import os
import subprocess
import time
from itertools import chain, repeat
from mock import patch
import pytest
from pytest import raises
from pytest_server_fixtures.xvfb import XvfbServer
def test_construct(xvfb_server):
assert xvfb_server.display
def test_connect_client():
with XvfbServer() as server:
p = subprocess.Popen(['xdpyinfo', '-display', server.display],
env=dict(os.environ, XAUTHORITY=server.authfile), stdout=subprocess.PIPE)
dpyinfo, _ = p.communicate()
assert p.returncode == 0
assert server.display in str(dpyinfo)
def test_terminates_on_last_client_exit():
with XvfbServer() as server:
subprocess.check_call(['xdpyinfo', '-display', server.display],
env=dict(os.environ, XAUTHORITY=server.authfile), stdout=open('/dev/null'))
for _ in range(5):
if server.process.poll() is not None:
break
time.sleep(0.1) # wait up to 0.5 seconds for the server to terminate
assert server.process.poll() == 0
def test_tries_to_find_free_server_num():
with XvfbServer() as server1:
with XvfbServer() as server2:
assert server1.display != server2.display
def test_raises_if_fails_to_find_free_server_num():
_exists = os.path.exists
with patch('os.path.exists', new=lambda f: "-lock" in f or _exists(f)):
with raises(RuntimeError) as ex:
XvfbServer()
assert 'Unable to find a free server number to start Xvfb' in str(ex)
def test_handles_unexpected_server_num_collision():
with XvfbServer() as server1:
from os.path import exists as real_exists
with patch('os.path.exists') as mock_exists:
side_effect_chain = chain([lambda _: False], repeat(real_exists))
mock_exists.side_effect = lambda path: next(side_effect_chain)(path)
with XvfbServer() as server2:
assert server1.display != server2.display
def test_handles_unexpected_failure_to_start():
with patch('pytest_server_fixtures.xvfb.XvfbServer.xvfb_command', 'false'):
with raises(RuntimeError) as ex:
XvfbServer()
assert 'Failed to start Xvfb' in str(ex)
|
<gh_stars>1-10
import {Overlay, OverlayRef} from '@angular/cdk/overlay';
import {ComponentPortal} from '@angular/cdk/portal';
import {Injectable} from '@angular/core';
import {SpinnerOverlayComponent} from '../spinner-overlay/spinner-overlay.component';
@Injectable({
providedIn: 'root',
})
export class SpinnerOverlayService {
private overlayRef: OverlayRef = undefined;
private isOnShow = false;
constructor(private overlay: Overlay) {
}
public show(): void {
// Hack avoiding `ExpressionChangedAfterItHasBeenCheckedError` error
Promise.resolve(null).then(() => {
this.overlayRef = this.overlay.create({
positionStrategy: this.overlay
.position()
.global()
.centerHorizontally()
.centerVertically(),
hasBackdrop: true,
});
this.overlayRef.attach(new ComponentPortal(SpinnerOverlayComponent));
});
this.isOnShow = true;
}
public hide(): void {
if (this.isOnShow) {
this.overlayRef.detach();
this.overlayRef = undefined;
this.isOnShow = false;
}
}
}
|
/*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under,
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package handler
import (
"context"
actions "github.com/Tencent/bk-bcs/bcs-services/bcs-argocd-manager/bcs-argocd-server/internal/action/plugin"
"github.com/Tencent/bk-bcs/bcs-services/bcs-argocd-manager/pkg/sdk/plugin"
)
// PluginHandler handler that implements the micro handler interface
type PluginHandler struct{}
// NewPluginHandler return a new PluginHandler plugin
func NewPluginHandler() *PluginHandler {
return &PluginHandler{}
}
// CreateArgocdPlugin create a plugin
func (handler *PluginHandler) CreateArgocdPlugin(ctx context.Context,
request *plugin.CreateArgocdPluginRequest, response *plugin.CreateArgocdPluginResponse) error {
action := actions.CreateArgocdPluginAction{}
return action.Handle(ctx, request, response)
}
// UpdateArgocdPlugin update a plugin
func (handler *PluginHandler) UpdateArgocdPlugin(ctx context.Context,
request *plugin.UpdateArgocdPluginRequest, response *plugin.UpdateArgocdPluginResponse) error {
action := actions.UpdateArgocdPluginAction{}
return action.Handle(ctx, request, response)
}
// DeleteArgocdPlugin delete a plugin by name
func (handler *PluginHandler) DeleteArgocdPlugin(ctx context.Context,
request *plugin.DeleteArgocdPluginRequest, response *plugin.DeleteArgocdPluginResponse) error {
action := actions.DeleteArgocdPluginAction{}
return action.Handle(ctx, request, response)
}
// GetArgocdPlugin get plugin by name
func (handler *PluginHandler) GetArgocdPlugin(ctx context.Context,
request *plugin.GetArgocdPluginRequest, response *plugin.GetArgocdPluginResponse) error {
action := actions.GetArgocdPluginAction{}
return action.Handle(ctx, request, response)
}
// ListArgocdPlugins list plugins
func (handler *PluginHandler) ListArgocdPlugins(ctx context.Context,
request *plugin.ListArgocdPluginsRequest, response *plugin.ListArgocdPluginsResponse) error {
action := actions.ListArgocdPluginsAction{}
return action.Handle(ctx, request, response)
}
|
<filename>src/db/get_images_by_tag.sql
SELECT Id, Origin, Filter FROM Image_Tag
LEFT JOIN Image ON Image = Id
WHERE Tag = :tag;
|
def parseStr(input_str):
# Convert the input string to lowercase to make it case-insensitive
input_str = input_str.lower()
# Initialize an empty dictionary to store the character counts
char_count = {}
# Iterate through each character in the input string
for char in input_str:
# Ignore spaces
if char != ' ':
# If the character is already in the dictionary, increment its count
if char in char_count:
char_count[char] += 1
# If the character is not in the dictionary, add it with a count of 1
else:
char_count[char] = 1
return char_count |
const formatDate = () => {
const date = new Date();
const year = date.getFullYear();
let month = date.getMonth() + 1;
let dt = date.getDate();
if (month < 10)
month = '0' + month;
if (dt < 10)
dt = '0' + dt;
return year + '-' + month + '-' + dt;
};
// Example:
formatDate(); // 2020-09-09 |
<gh_stars>10-100
package chylex.hee.world.end.gen;
import java.util.EnumSet;
import java.util.Random;
import net.minecraft.init.Blocks;
import chylex.hee.system.util.MathUtil;
import chylex.hee.world.end.EndTerritory;
import chylex.hee.world.end.TerritoryGenerator;
import chylex.hee.world.feature.noise.GenerateIslandNoise;
import chylex.hee.world.feature.noise.GenerateIslandNoiseTame;
import chylex.hee.world.structure.StructureWorld;
public class TerritoryForgottenTombs extends TerritoryGenerator{
public TerritoryForgottenTombs(EndTerritory territory, EnumSet variations, StructureWorld world, Random rand){
super(territory, variations, world, rand);
}
@Override
public void generate(){
for(int island = 0; island < 3; island++){
final double angle = MathUtil.toRad(120D*island+(rand.nextDouble()-0.5D)*20D);
final double dist = 50D+rand.nextDouble()*10D;
int offX = MathUtil.floor(Math.cos(angle)*dist);
int offZ = MathUtil.floor(Math.sin(angle)*dist);
for(int attempt = 0; attempt < 25; attempt++){
GenerateIslandNoise noise = new GenerateIslandNoise(Blocks.end_stone, rand);
noise.terrainSize = 36;
noise.noiseHeight = 21;
noise.sideSmoothness = 180D;
noise.densityPeakMultiplier = 0.5D;
noise.surfaceHillScale = 48F;
GenerateIslandNoiseTame generator = new GenerateIslandNoiseTame(noise);
generator = new GenerateIslandNoiseTame(noise);
generator.setWorldArea(100, 60, 100);
generator.setCenterXZ();
if (attempt < 18){
generator.setMinBlocks(65_000);
generator.setMinSize(72, 72);
generator.setMaxSize(100, 100);
}
else if (attempt < 24){
generator.setMinBlocks(30_000);
generator.setMinSize(50, 50);
generator.setMaxSize(120, 120);
}
if (generator.generate(world, offX, rand.nextInt(10), offZ))break;
}
}
}
}
|
template<int SZ> struct BCC {
int N;
vpi adj[SZ], ed;
vi disc, low, par, art;
vector<vector<int>> bcc;
stack<int> stk;
void addEdge(int u, int v) {
adj[u].pb({v,sz(ed)}),
adj[v].pb({u,sz(ed)});
ed.pb({u,v});
}
void dfs(int u, int p, int &time) {
disc[u] = low[u] = ++time;
int children = 0;
for (auto [v, id] : adj[u]) {
if (!disc[v]) {
children++;
stk.push(id);
par[v] = u;
dfs(v, u, time);
low[u] = min(low[u], low[v]);
if ((p == -1 && children > 1) || (p != -1 && low[v] >= disc[u])) {
art[u] = 1;
vector<int> component;
while (true) {
int e = stk.top(); stk.pop();
component.push_back(e);
if (e == id) break;
}
bcc.push_back(component);
}
} else if (v != p && disc[v] < disc[u]) {
low[u] = min(low[u], disc[v]);
stk.push(id);
}
}
}
vector<vector<int>> findBiconnectedComponents() {
disc.assign(N, 0);
low.assign(N, 0);
par.assign(N, -1);
art.assign(N, 0);
bcc.clear();
int time = 0;
for (int i = 0; i < N; i++) {
if (!disc[i]) {
dfs(i, -1, time);
if (!stk.empty()) {
vector<int> component;
while (!stk.empty()) {
component.push_back(stk.top());
stk.pop();
}
bcc.push_back(component);
}
}
}
return bcc;
}
}; |
#!/bin/bash
echo "Running go generate..."
go generate
echo "Running go fmt..."
gofmt -s -w ./..
echo "Running unit tests..."
go test ./... || exit
echo "Building application..."
go build -ldflags="-s -w" || exit
GREEN='\033[1;32m'
RED='\033[0;31m'
NC='\033[0m'
if ./hashit --not-a-real-option > /dev/null ; then
echo -e "${RED}================================================="
echo -e "FAILED Invalid option should produce error code "
echo -e "======================================================="
exit
else
echo -e "${GREEN}PASSED invalid option test"
fi
if ./hashit > /dev/null ; then
echo -e "${GREEN}PASSED no directory specified test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should run correctly with no directory specified"
echo -e "======================================================="
exit
fi
if ./hashit * > /dev/null ; then
echo -e "${GREEN}PASSED all files test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should run correctly with all files specified"
echo -e "======================================================="
exit
fi
if ./hashit --debug --trace --verbose -f text --hash md5 --no-stream --stream-size 10 -r main.go > /dev/null ; then
echo -e "${GREEN}PASSED multiple options test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should run correctly with multiple options"
echo -e "======================================================="
exit
fi
if ./hashit processor > /dev/null ; then
echo -e "${GREEN}PASSED directory specified test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should run correctly with directory specified"
echo -e "======================================================="
exit
fi
if ./hashit main.go -c md5 | grep -q -i 'md5'; then
echo -e "${GREEN}PASSED short hash test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with short hash"
echo -e "======================================================="
exit
fi
for i in 'md4' 'md5' 'sha1' 'sha256' 'sha512'
do
if ./hashit main.go --hash $i | grep -q -i $i; then
echo -e "${GREEN}PASSED hash test $i"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash $i"
echo -e "======================================================="
exit
fi
done
if ./hashit main.go --hash blake2b256 | grep -q -i 'blake2b-256'; then
echo -e "${GREEN}PASSED hash test blake2b256"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash blake2b256"
echo -e "======================================================="
exit
fi
if ./hashit main.go --hash blake2b512 | grep -q -i 'blake2b-512'; then
echo -e "${GREEN}PASSED hash test blake2b512"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash blake2b512"
echo -e "======================================================="
exit
fi
if ./hashit main.go --hash sha3224 | grep -q -i 'sha3-224'; then
echo -e "${GREEN}PASSED hash test sha3224"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash sha3224"
echo -e "======================================================="
exit
fi
if ./hashit main.go --hash sha3256 | grep -q -i 'sha3-256'; then
echo -e "${GREEN}PASSED hash test sha3256"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash sha3256"
echo -e "======================================================="
exit
fi
if ./hashit main.go --hash sha3384 | grep -q -i 'sha3-384'; then
echo -e "${GREEN}PASSED hash test sha3384"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash sha3384"
echo -e "======================================================="
exit
fi
if ./hashit main.go --hash sha3512 | grep -q -i 'sha3-512'; then
echo -e "${GREEN}PASSED hash test sha3512"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to work with hash sha3512"
echo -e "======================================================="
exit
fi
if ./hashit main.go --hashes | grep -q -i 'md5'; then
echo -e "${GREEN}PASSED hashes test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to display hashes"
echo -e "======================================================="
exit
fi
if echo "hello" | ./hashit main.go --hash md5 | grep -q -i 'b1946ac92492d2347c6235b4d2611184'; then
echo -e "${GREEN}PASSED stdin md5 test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to process md5 stdin"
echo -e "======================================================="
exit
fi
if echo "hello" | ./hashit main.go --hash sha1 | grep -q -i 'f572d396fae9206628714fb2ce00f72e94f2258f'; then
echo -e "${GREEN}PASSED stdin sha1 test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to process sha1 stdin"
echo -e "======================================================="
exit
fi
if echo "hello" | ./hashit main.go --hash sha256 | grep -q -i '5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03'; then
echo -e "${GREEN}PASSED stdin sha256 test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to process sha256 stdin"
echo -e "======================================================="
exit
fi
a=$(./hashit --no-stream * | sort | md5sum)
b=$(./hashit * | sort | md5sum)
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED stream output test"
else
echo -e "${RED}======================================================="
echo -e "FAILED stream output test"
echo -e "================================================="
exit
fi
a=$(./hashit --format hashdeep main.go | grep ',main.go')
b=$(hashdeep -l main.go | grep ',main.go')
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED hashdeep hash test"
else
echo -e "${RED}======================================================="
echo -e "FAILED hashdeep hash test"
echo -e "================================================="
exit
fi
a=$(./hashit --format sum --hash md5 main.go)
b=$(md5sum main.go)
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED sum md5 format test"
else
echo -e "${RED}======================================================="
echo -e "FAILED sum md5 format test"
echo -e "================================================="
exit
fi
a=$(./hashit --format sum --hash sha1 main.go)
b=$(sha1sum main.go)
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED sum sha1 format test"
else
echo -e "${RED}======================================================="
echo -e "FAILED sum sha1 format test"
echo -e "================================================="
exit
fi
a=$(./hashit --format sum --hash sha256 main.go)
b=$(sha256sum main.go)
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED sum sha256 format test"
else
echo -e "${RED}======================================================="
echo -e "FAILED sum sha256 format test"
echo -e "================================================="
exit
fi
a=$(./hashit --format sum --hash sha512 main.go)
b=$(sha512sum main.go)
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED sum sha512 format test"
else
echo -e "${RED}======================================================="
echo -e "FAILED sum sha512 format test"
echo -e "================================================="
exit
fi
for i in '' '--stream-size 0'
do
if ./hashit $i LICENSE | grep -q -i '227f999ca03b135a1b4d69bde84afb16'; then
echo -e "${GREEN}PASSED stream $i hash test"
else
echo -e "${RED}======================================================="
echo -e "FAILED $i test"
echo -e "======================================================="
exit
fi
done
a=$(./hashit --format sum --hash all ./LICENSE)
b=$(./hashit --format sum --hash all --stream-size 1 ./LICENSE)
if [ "$a" == "$b" ]; then
echo -e "${GREEN}PASSED small scanner test"
else
echo -e "${RED}======================================================="
echo -e "FAILED small scanner test"
echo -e "================================================="
exit
fi
if ./hashit --format hashdeep processor > audit.txt && hashdeep -l -r -a -k audit.txt processor | grep -q -i 'Audit passed'; then
echo -e "${GREEN}PASSED relative hashdeep audit test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to create relative hashdeep audit"
echo -e "======================================================="
exit
fi
if ./hashit --format hashdeep vendor > audit.txt && hashdeep -l -r -a -k audit.txt vendor | grep -q -i 'Audit passed'; then
echo -e "${GREEN}PASSED large relative hashdeep audit test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to create large relative hashdeep audit"
echo -e "======================================================="
exit
fi
mkdir -p /tmp/hashit/
echo "hello" > /tmp/hashit/file
if ./hashit --format hashdeep /tmp/hashit/ > audit.txt && hashdeep -r -a -k audit.txt /tmp/hashit/ | grep -q -i 'Audit passed'; then
echo -e "${GREEN}PASSED full hashdeep audit test"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should be able to create full hashdeep audit"
echo -e "======================================================="
exit
fi
if ./hashit -x ./examples/xubuntu-18.04-desktop-amd64.iso > /dev/null ; then
echo -e "${RED}================================================="
echo -e "FAILED Invalid file match should return error "
echo -e "======================================================="
exit
else
echo -e "${GREEN}PASSED Invalid file match"
fi
if ./hashit -x ./examples/xubuntu-18.04-desktop-amd64.iso | grep -q -i 'identified by filename'; then
echo -e "${GREEN}PASSED identified by filename"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should identify by filename"
echo -e "======================================================="
exit
fi
if ./hashit -x ./main.go | grep -q -i 'unknown file cannot audit'; then
echo -e "${GREEN}PASSED unknown file"
else
echo -e "${RED}======================================================="
echo -e "FAILED Should report unknown file"
echo -e "======================================================="
exit
fi
echo -e "${NC}Cleaning up..."
rm ./hashit
rm ./audit.txt
rm /tmp/hashit/file
rmdir /tmp/hashit/
echo -e "${GREEN}================================================="
echo -e "ALL TESTS PASSED"
echo -e "================================================="
|
#!/bin/bash
nmap -sn 192.168.1.0/24
raspberrypi=$(arp -n | grep -w -i 'b8:27:eb:ab:aa:26' | awk 'NR == 1' | awk '{print $1}') #arp -n | grep -w -i 'b8:27:eb:ab:aa:26' | awk '{print $1}')
fpga=$(arp -n | grep -w -i '00:00:F3:BE:EF:02' | awk '{print $1}')
projector=$(arp -n | grep -w -i 'cc:4b:73:b5:4b:da' | awk '{print $1}')
tmux list-sessions | awk 'BEGIN{FS=":"}{print $1}' | xargs -n 1 tmux kill-session -t
tmux new-session -s roboy -d \; \
send-keys 'echo' C-m \; \
split-window -h \; \
split-window -h \; \
split-window -v \; \
split-window -v \; \
split-window -v \; \
select-pane -t 0 \; \
split-window -v \; \
split-window -v \; \
send-keys -t 3 'animus' C-m\; \
send-keys -t 4 'websocket --wait' C-m\; \
send-keys -t 5 'face' C-m\; \
send-keys -t 6 "ssh root@$fpga -t \"bash -lic 'plexus' \"" C-m\; \
send-keys -t 0 'rosrun rosserial_python serial_node.py tcp' C-m\; \
send-keys -t 1 'node-red' C-m \; \
send-keys -t 2 /home/roboy/restart_face.sh C-m \;
#send-keys -t 7 'wheel-commander' C-m\; \
#select-pane -t 5\; \
#split-window -v \; \
#send-keys "ssh pi@$raspberrypi -t \"bash -lic 'leds' \"" C-m\;
tmux attach
|
#!/bin/bash
set -o xtrace
set -o errexit
APACHE=$(command -v apache2 || command -v /usr/lib/apache2/mpm-prefork/apache2) || true
if [ -n "$APACHE" ]; then
APACHE_CONFIG=apache24ubuntu161404.conf
else
APACHE=$(command -v httpd) || true
if [ -z "$APACHE" ]; then
echo "Could not find apache2 binary"
exit 1
else
APACHE_CONFIG=apache22amazon.conf
fi
fi
PYTHON_VERSION=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))")
export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_version/$MOD_WSGI_VERSION/mod_wsgi.so
export PYTHONHOME=/opt/python/$PYTHON_VERSION
cd ..
$APACHE -k start -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}
trap "$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}" EXIT HUP
set +e
wget -t 1 -T 10 -O - "http://localhost:8080${PROJECT_DIRECTORY}"
STATUS=$?
set -e
# Debug
cat error_log
if [ $STATUS != 0 ]; then
exit $STATUS
fi
${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel http://localhost:8080${PROJECT_DIRECTORY}
${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial http://localhost:8080${PROJECT_DIRECTORY}
|
#!/bin/bash
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
set -ex
set -o pipefail
if [[ $GROUP != nonode ]]; then
python -c "from jupyterlab.commands import build_check; build_check()"
fi
if [[ $GROUP == python ]]; then
# Run the python tests
py.test -v --junitxml=junit.xml
fi
if [[ $GROUP == js ]]; then
jlpm build:packages
jlpm build:test
FORCE_COLOR=1 jlpm coverage --loglevel success
jlpm run clean
fi
if [[ $GROUP == docs ]]; then
# Run the link check - allow for a link to fail once
py.test --check-links -k .md . || py.test --check-links -k .md --lf .
# Build the docs
jlpm build:packages
jlpm docs
# Verify tutorial docs build
pushd docs
pip install sphinx sphinx-copybutton sphinx_rtd_theme recommonmark jsx-lexer
make linkcheck
make html
popd
fi
if [[ $GROUP == integrity ]]; then
# Run the integrity script first
jlpm run integrity --force
# Check yarn.lock file
jlpm check --integrity
# Lint our files.
jlpm run lint:check || (echo 'Please run `jlpm run lint` locally and push changes' && exit 1)
# Build the packages individually.
jlpm run build:src
# Make sure we have CSS that can be converted with postcss
jlpm global add postcss-cli
jlpm config set prefix ~/.yarn
~/.yarn/bin/postcss packages/**/style/*.css --dir /tmp
# run twine check on the python build assets.
# this must be done before altering any versions below.
python -m pip install -U twine wheel
python setup.py sdist
python setup.py bdist_wheel
twine check dist/*
# Make sure we can bump the version
# This must be done at the end so as not to interfere
# with the other checks
git config --global user.email "you@example.com"
git config --global user.name "CI"
git stash
git checkout -b commit_${BUILD_SOURCEVERSION}
git clean -df
jlpm bumpversion minor --force
jlpm bumpversion major --force
jlpm bumpversion release --force # switch to beta
jlpm bumpversion release --force # switch to rc
jlpm bumpversion build --force
VERSION=$(python setup.py --version)
if [[ $VERSION != *rc1 ]]; then exit 1; fi
# make sure we can patch release
jlpm bumpversion release --force # switch to final
jlpm patch:release --force
# make sure we can bump major JS releases
jlpm bumpversion minor --force
jlpm bump:js:major console --force
jlpm bump:js:major console notebook --force
# Make sure that a prepublish would include the proper files.
jlpm run prepublish:check
fi
if [[ $GROUP == usage ]]; then
# Build the examples.
jlpm run build:packages
jlpm run build:examples
# Test the examples
jlpm run test:examples
# Test the cli apps.
jupyter lab clean --debug
jupyter lab build --debug
jupyter lab path --debug
pushd jupyterlab/tests/mock_packages
jupyter labextension link extension --no-build --debug
jupyter labextension unlink extension --no-build --debug
jupyter labextension link extension --no-build --debug
jupyter labextension unlink @jupyterlab/mock-extension --no-build --debug
jupyter labextension install extension --no-build --debug
jupyter labextension list --debug
jupyter labextension disable @jupyterlab/mock-extension --debug
jupyter labextension enable @jupyterlab/mock-extension --debug
jupyter labextension disable @jupyterlab/notebook-extension --debug
jupyter labextension uninstall @jupyterlab/mock-extension --no-build --debug
jupyter labextension uninstall @jupyterlab/notebook-extension --no-build --debug
popd
jupyter lab workspaces export > workspace.json --debug
jupyter lab workspaces import --name newspace workspace.json --debug
jupyter lab workspaces export newspace > newspace.json --debug
rm workspace.json newspace.json
# Make sure we can call help on all the cli apps.
jupyter lab -h
jupyter lab build -h
jupyter lab clean -h
jupyter lab path -h
jupyter labextension link -h
jupyter labextension unlink -h
jupyter labextension install -h
jupyter labextension uninstall -h
jupyter labextension list -h
jupyter labextension enable -h
jupyter labextension disable -h
# Make sure we can add and remove a sibling package.
# jlpm run add:sibling jupyterlab/tests/mock_packages/extension
# jlpm run build
# jlpm run remove:package extension
# jlpm run build
# jlpm run integrity --force # Should have a clean tree now
# Test cli tools
jlpm run get:dependency mocha
jlpm run update:dependency mocha
jlpm run remove:dependency mocha
jlpm run get:dependency @jupyterlab/buildutils
jlpm run get:dependency typescript
jlpm run get:dependency react-native
# Test theme creation - make sure we can add it as a package, build,
# and run browser
pip install -q pexpect
python scripts/create_theme.py
mv foo packages
jlpm run integrity
jlpm run build:packages
jlpm run build:dev
python -m jupyterlab.browser_check --dev-mode
jlpm run remove:package foo
jlpm run integrity
## Test app directory support being a symlink
mkdir tmp
pushd tmp
mkdir real_app_dir
ln -s real_app_dir link_app_dir
# verify that app directory is not resolved
env JUPYTERLAB_DIR=./link_app_dir jupyter lab path | grep link_app_dir
popd
# Make sure we can successfully load the dev app.
python -m jupyterlab.browser_check --dev-mode
# Make sure core mode works
jlpm run build:core
# Make sure we have a final released version of JupyterLab server
python -m jupyterlab.browser_check --core-mode
# Make sure we can run the built app.
jupyter labextension install ./jupyterlab/tests/mock_packages/extension --debug
python -m jupyterlab.browser_check
jupyter labextension list --debug
# Make sure we can non-dev install.
virtualenv -p $(which python3) test_install
./test_install/bin/pip install -q ".[test]" # this populates <sys_prefix>/share/jupyter/lab
./test_install/bin/python -m jupyterlab.browser_check
# Make sure we can run the build
./test_install/bin/jupyter lab build
# Make sure we can start and kill the lab server
./test_install/bin/jupyter lab --no-browser &
TASK_PID=$!
# Make sure the task is running
ps -p $TASK_PID || exit 1
sleep 5
kill $TASK_PID
wait $TASK_PID
# Make sure we can clean various bits of the app dir
jupyter lab clean
jupyter lab clean --extensions
jupyter lab clean --settings
jupyter lab clean --static
jupyter lab clean --all
fi
if [[ $GROUP == nonode ]]; then
# Make sure we can install the wheel
virtualenv -p $(which python3) test_install
./test_install/bin/pip install -v --pre --no-cache-dir --no-deps jupyterlab --no-index --find-links=dist # Install latest jupyterlab
./test_install/bin/pip install jupyterlab # Install jupyterlab dependencies
./test_install/bin/python -m jupyterlab.browser_check --no-chrome-test
# Make sure we can start and kill the lab server
./test_install/bin/jupyter lab --no-browser &
TASK_PID=$!
# Make sure the task is running
ps -p $TASK_PID || exit 1
sleep 5
kill $TASK_PID
wait $TASK_PID
fi
|
#!/bin/bash
if [[ -z "$RESET_PIN" ]]; then
echo "No RESET_PIN environment variable set, skipping the pin reset. If you experience problem with starting the concentrator please set this variable to your manufacturer reset pin"
else
echo "Resetting the pin"
./reset_lgw.sh stop $RESET_PIN
./reset_lgw.sh start $RESET_PIN
echo "Finished resetting the pin"
fi
if [[ -z "$SPI_DEV" ]] || [[ $SPI_DEV == '$LBS_SPI_DEV' ]]; then
echo "No custom SPI dev set up, defaulting to spi dev 0"
SPI_DEV = 0
fi
#Generate tc.uri file
if [[ -z "$TC_URI" ]]; then
echo "No TC_URI detected in environment variables."
else
echo "TC_URI is set to: $TC_URI"
touch tc.uri && echo "$TC_URI" > tc.uri
#start basestation
echo "Starting base station..."
if [[ -z "$SPI_SPEED" ]] || [[ "$SPI_SPEED" == '$LBS_SPI_SPEED' ]]; then
echo "No SPI Speed found defaulting to 8mbps"
RADIODEV=/dev/spidev$SPI_DEV.0 /bin/station.std -f
else
if [ "$SPI_SPEED" == "2" ]; then
echo "Spi speed set to 2 mbps"
RADIODEV=/dev/spidev$SPI_DEV.0 /bin/station.spispeed2 -f
else
if [ "$SPI_SPEED" == "8" ]; then
echo "Spi speed set to 8 mbps"
RADIODEV=/dev/spidev$SPI_DEV.0 /bin/station.std -f
else
echo "The value $SPI_SPEED is not supported as custom value. Supported values are 2 or 8"
exit 1;
fi
fi
fi
fi
|
// Type definitions for lib/Detector/Detector.js
// Project: [LIBRARY_URL_HERE]
// Definitions by: [YOUR_NAME_HERE] <[YOUR_URL_HERE]>
// Definitions: https://github.com/borisyankov/DefinitelyTyped
declare namespace Detector{
// Detector.getWebGLErrorMessage.!ret
/**
*
*/
interface GetWebGLErrorMessageRet {
/**
*
*/
id : string;
/**
*
*/
innerHTML : string;
}
}
/**
* @author alteredq / http://alteredqualia.com/
* @author mr.doob / http://mrdoob.com/
*/
declare namespace Detector{
/**
*
*/
export var canvas : boolean;
/**
*
*/
export var webgl : boolean;
/**
*
*/
export var workers : boolean;
/**
*
* @return
*/
function getWebGLErrorMessage(): Detector.GetWebGLErrorMessageRet;
/**
*
* @param parameters
*/
function addGetWebGLMessage(parameters : any): void;
}
|
package malte0811.controlengineering.blockentity.bus;
import blusunrize.immersiveengineering.api.TargetingInfo;
import blusunrize.immersiveengineering.api.wires.ConnectionPoint;
import blusunrize.immersiveengineering.api.wires.LocalWireNetwork;
import blusunrize.immersiveengineering.api.wires.WireType;
import blusunrize.immersiveengineering.api.wires.redstone.IRedstoneConnector;
import blusunrize.immersiveengineering.api.wires.redstone.RedstoneNetworkHandler;
import com.google.common.collect.ImmutableList;
import malte0811.controlengineering.ControlEngineering;
import malte0811.controlengineering.bus.BusLine;
import malte0811.controlengineering.bus.LocalBusHandler;
import net.minecraft.core.BlockPos;
import net.minecraft.core.Vec3i;
import net.minecraft.nbt.CompoundTag;
import net.minecraft.network.chat.Component;
import net.minecraft.network.chat.TranslatableComponent;
import net.minecraft.resources.ResourceLocation;
import net.minecraft.world.level.block.entity.BlockEntityType;
import net.minecraft.world.level.block.state.BlockState;
import net.minecraft.world.phys.BlockHitResult;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Objects;
import static malte0811.controlengineering.gui.remapper.AbstractRemapperMenu.NOT_MAPPED;
public class RSRemapperBlockEntity extends DualConnectorBlockEntity implements IRedstoneConnector {
private static final int COLOR_ID = MIN_ID;
public static final String COLORED_KEY = ControlEngineering.MODID + ".gui.remapper.colored";
public static final String GRAY_KEY = ControlEngineering.MODID + ".gui.remapper.gray";
private int[] colorToGray = makeInitialMapping();
private int[] grayToColor = makeInverseMapping(colorToGray);
private final byte[][] lastInputByPoint = new byte[2][BusLine.LINE_SIZE];
private final boolean[] needsUpdate = {false, false};
public RSRemapperBlockEntity(BlockEntityType<?> type, BlockPos pos, BlockState state) {
super(type, pos, state);
}
@Override
public void load(@Nonnull CompoundTag nbt) {
super.load(nbt);
var newColorToGray = nbt.getIntArray("colorToGray");
if (newColorToGray.length != BusLine.LINE_SIZE) {
newColorToGray = makeInitialMapping();
}
for (int i = 0; i < newColorToGray.length; ++i) {
var mappedTo = newColorToGray[i];
if (mappedTo < 0 || mappedTo >= BusLine.LINE_SIZE) {
newColorToGray[i] = NOT_MAPPED;
}
}
setColorToGray(newColorToGray);
}
@Override
public void saveAdditional(@Nonnull CompoundTag nbt) {
super.saveAdditional(nbt);
nbt.putIntArray("colorToGray", colorToGray);
}
@Override
public LocalWireNetwork getLocalNet(int cpIndex) {
return super.getLocalNet(cpIndex);
}
/*GENERAL IIC*/
@Override
public boolean canConnectCable(WireType wireType, ConnectionPoint connectionPoint, Vec3i offset) {
return countRealWiresAt(connectionPoint) == 0 && wireType.getCategory().equals(WireType.REDSTONE_CATEGORY);
}
/*REDSTONE*/
@Override
public void onChange(ConnectionPoint cp, RedstoneNetworkHandler handler) {
var netHere = getNet(cp);
if (netHere == null) {
return;
}
var netOther = getOtherNet(cp);
if (netOther == null || (netOther == netHere && cp.index() != COLOR_ID)) {
return;
}
var inputSignals = netHere.getValuesExcluding(cp);
if (Arrays.equals(inputSignals, lastInputByPoint[cp.index()])) {
return;
}
System.arraycopy(inputSignals, 0, lastInputByPoint[cp.index()], 0, BusLine.LINE_SIZE);
needsUpdate[1 - cp.index()] = true;
Objects.requireNonNull(level).scheduleTick(worldPosition, getBlockState().getBlock(), 1);
}
@Override
public void updateInput(byte[] signals, ConnectionPoint cp) {
var otherNet = getOtherNet(cp);
var thisNet = getNet(cp);
if (otherNet == null || thisNet == null) {
return;
} else if (otherNet == thisNet) {
updateInputsShorted(signals, cp);
return;
}
var otherToThis = cp.index() == COLOR_ID ? grayToColor : colorToGray;
var inputs = lastInputByPoint[1 - cp.index()];
for (int otherIndex = 0; otherIndex < inputs.length; ++otherIndex) {
var thisIndex = otherToThis[otherIndex];
if (thisIndex != NOT_MAPPED) {
signals[thisIndex] = (byte) Math.max(inputs[otherIndex], signals[thisIndex]);
}
}
}
public void onBlockTick() {
for (int i = 0; i < 2; ++i) {
if (!needsUpdate[i]) {
continue;
}
var net = getNet(new ConnectionPoint(worldPosition, i));
if (net != null) {
net.updateValues();
}
needsUpdate[i] = false;
}
}
private void updateInputsShorted(byte[] out, ConnectionPoint cp) {
if (cp.index() != COLOR_ID) {
return;
}
byte[] totalSignal = Arrays.copyOf(lastInputByPoint[COLOR_ID], BusLine.LINE_SIZE);
boolean changed;
do {
changed = false;
for (var mapping : List.of(colorToGray, grayToColor)) {
for (int i = 0; i < BusLine.LINE_SIZE; ++i) {
var outIndex = mapping[i];
if (outIndex == NOT_MAPPED) {
continue;
}
if (totalSignal[i] > out[outIndex]) {
totalSignal[outIndex] = out[outIndex] = totalSignal[i];
changed = true;
}
}
}
} while (changed);
}
@Nullable
private RedstoneNetworkHandler getOtherNet(ConnectionPoint cp) {
return getNet(getOtherPoint(cp));
}
@Nullable
private RedstoneNetworkHandler getNet(ConnectionPoint cp) {
return getLocalNet(cp)
.getHandler(RedstoneNetworkHandler.ID, RedstoneNetworkHandler.class);
}
@Override
public Collection<ResourceLocation> getRequestedHandlers() {
return ImmutableList.of(LocalBusHandler.NAME, RedstoneNetworkHandler.ID);
}
public int[] getColorToGray() {
return colorToGray;
}
public void setColorToGray(int[] newColorToGray) {
this.colorToGray = newColorToGray;
this.grayToColor = makeInverseMapping(colorToGray);
}
private static int[] makeInitialMapping() {
int[] result = new int[BusLine.LINE_SIZE];
for (int i = 0; i < BusLine.LINE_SIZE; ++i) {
result[i] = i;
}
return result;
}
private static int[] makeInverseMapping(int[] mapping) {
int[] result = new int[mapping.length];
Arrays.fill(result, NOT_MAPPED);
for (int i = 0; i < mapping.length; ++i) {
if (mapping[i] >= 0 && mapping[i] < result.length) {
result[mapping[i]] = i;
}
}
return result;
}
public void addOverlay(List<Component> lines, BlockHitResult hitResult) {
var hitLoc = hitResult.getLocation().subtract(worldPosition.getX(), worldPosition.getY(), worldPosition.getZ());
var target = getTargetedPoint(
new TargetingInfo(hitResult.getDirection(), (float) hitLoc.x, (float) hitLoc.y, (float) hitLoc.z),
Vec3i.ZERO
);
lines.add(new TranslatableComponent(target.index() == COLOR_ID ? COLORED_KEY : GRAY_KEY));
}
}
|
#!/bin/sh
#SBATCH --time=4:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=24
#SBATCH --exclusive
#SBATCH --partition=haswell
#SBATCH --mem-per-cpu=2500M
#SBATCH --comment="cpufreqchown"
#SBATCH -J "lulesh_sacct"
#SBATCH -A p_readex
#SBATCH --reservation=READEX
#SBATCH --output=static_average.out
#SBATCH --error=static_average.out
cd ..
REPEAT_COUNT=5
module purge
source ./readex_env/set_env_plain.source
NUM_CPUS=24
change_frequency() {
for ((i = 0; i<$NUM_CPUS; i++))
do
cpufreq-set -c $i -f $1GHz
done
}
check_uncore_frequency() {
x86a_read -n -i Intel_UNCORE_MIN_RATIO
x86a_read -n -i Intel_UNCORE_MAX_RATIO
x86a_read -n -i Intel_UNCORE_CURRENT_RATIO
}
#change_frequency 2.5
#x86a_write -n -c 0 -i Intel_UNCORE_MAX_RATIO -V 30
#x86a_write -n -c 1 -i Intel_UNCORE_MAX_RATIO -V 30
#x86a_write -n -c 0 -i Intel_UNCORE_MIN_RATIO -V 30
#x86a_write -n -c 1 -i Intel_UNCORE_MIN_RATIO -V 30
#cpufreq-info
#export SCOREP_ENABLE_TRACING=true
#export SCOREP_ENABLE_PROFILING=false
#export SCOREP_TOTAL_MEMORY=3G
#export SCOREP_MPI_ENABLE_GROUPS=EXT
i=1
rm -rf PLAIN_*
while [ $i -le $REPEAT_COUNT ]; do
mkdir PLAIN_$i
export MEASURE_RAPL_TARGET="PLAIN_$i"
srun measure-rapl ./lulesh2.0_plain -i 100 -s 150
i=$(echo "$i + 1" | bc)
done
#srun ./lulesh2.0_saf -i 100 -s 150
#export SCOREP_ENABLE_TRACING=true
#export SCOREP_ENABLE_PROFILING=false
#export SCOREP_TOTAL_MEMORY=3G
#export SCOREP_MPI_ENABLE_GROUPS=EXT
#cpu_freq_list=(1.2 2.0 2.4 2.5)
#uncore_freq_list=(14 22 26 30)
#for i in "${cpu_freq_list[@]}"
#do
# change_frequency $i
# for j in "${uncore_freq_list[@]}"
# do
#export MEASURE_RAPL_TARGET="TUNED_$sum"
# cpufreq-info
# x86a_write -n -c 0 -i Intel_UNCORE_MAX_RATIO -V $j
# x86a_write -n -c 1 -i Intel_UNCORE_MAX_RATIO -V $j
# x86a_write -n -c 0 -i Intel_UNCORE_MIN_RATIO -V $j
# x86a_write -n -c 1 -i Intel_UNCORE_MIN_RATIO -V $j
# check_uncore_frequency
#srun measure-rapl ./lulesh2.0_plain -i 250 -s 75
#srun -n 1 -c 24 --exclusive --mem-per-cpu 2500M -p haswell --reservation=READEX ./lulesh2.0_plain -i 50 -s 75
#sum=$sum + 1
# mpiexec --np 1 --npernode 1 --cpus-per-proc 24 ./lulesh2.0_saf -i 350 -s 75
#((sum++))
#echo $sum
# done
#done
i=1
total_time_plain=0
total_energy_plain=0
total_cpu_energy_plain=0
while [ $i -lt $REPEAT_COUNT ]; do
times_energys=$(sacct -j $SLURM_JOBID.$i --format="JobID,CPUTimeRAW,ConsumedEnergyRaw")
i=$(echo "$i + 1" | bc)
times_energys_array=(${times_energys[@]})
time_step=${times_energys_array[7]}
energy_step=${times_energys_array[8]}
echo "Job Time : $time_step"
echo "Job Energy: $energy_step"
total_time_plain=$(echo "${total_time_plain} + ${time_step}" | bc)
total_energy_plain=$(echo "${total_energy_plain} + ${energy_step}" | bc)
for file in PLAIN_$i/*
do
values=$( tail -1 $file | awk -F'[ ,]' '{print int($1)" "int($2)}' )
values=(${values[@]})
total_cpu_energy_plain=$[ total_cpu_energy_plain + ${values[0]} + ${values[1]} ]
done
done
echo "Total Plain Time = $total_time_plain, Total Plain Energy = $total_energy_plain"
avg_time_plain=$(echo "$total_time_plain/$((REPEAT_COUNT-1))" | bc)
avg_energy_plain=$(echo "$total_energy_plain/$((REPEAT_COUNT-1))" | bc)
echo "Average Plain Time=$avg_time_plain"
echo "Average Plain Energy=$avg_energy_plain"
rm -rf PLAIN_*
|
<filename>ui/src/app/lib/atlasmap-data-mapper/components/line-machine.component.spec.ts<gh_stars>1-10
/* tslint:disable:no-unused-variable */
import { ChangeDetectorRef } from '@angular/core';
import { TestBed, async, inject } from '@angular/core/testing';
import { LineMachineComponent } from './line-machine.component';
describe('LineMachineComponent', () => {
beforeEach(() => {
TestBed.configureTestingModule({
providers: [
ChangeDetectorRef,
LineMachineComponent,
],
});
});
it(
'should ...',
inject([LineMachineComponent], (service: LineMachineComponent) => {
expect(service).toBeTruthy();
}),
);
});
|
var util = require('../../utils/utils.js');
const db = wx.cloud.database()
const _ = db.command;
Page({
data: {
qian: '签到',
userTang: 0,
},
qiandao: function(){
if(this.data.qian == '签到'){
var newTang = this.data.userTang + 1
//调用云函数,修改糖果数量,向云函数传值
wx.cloud.callFunction({
name: 'updateTang',
data: {
openId: wx.getStorageSync('openId'),
userTang: newTang
},
success: res => {
this.setData({
qian: '已签到',
userTang: newTang
})
db.collection('qiandao').where({
_openid: wx.getStorageSync('openId')
}).get({
success: res => {
if (res.data.length == 0) {
//保存签到当前日期
db.collection('qiandao').add({
data: {
time: util.formaDate(new Date()),
},
success: res => {
console.log('qiandao存入成功')
}
})
}else{
wx.cloud.callFunction({
name: 'updateQiandao',
data: {
openId: wx.getStorageSync('openId'),
time: util.formaDate(new Date())
},
success: res => {
console.log('更新成功')
}
})
}
}
})
}
})
}
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
db.collection('users').where({
_openid: wx.getStorageSync('openId')
}).get({
success: res => {
this.setData({
userTang: res.data[0].userTang
})
},
fail: console.error
})
db.collection('qiandao').where({
_openid: wx.getStorageSync('openId')
}).get({
success: res => {
if (res.data[0].time < util.formaDate(new Date())){
this.setData({
qian: '签到'
})
}else{
this.setData({
qian: '已签到'
})
}
},
fail: console.error
})
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
}
}) |
export function getFlash()
{
return window.flash ;
} |
<reponame>DjangoCrypto/django-crypto-extensions
import datetime
from django.test import TestCase
from django_crypto_extensions.django_fields import (
CryptoTextField,
)
from django_crypto_extensions.tests.models import (
CryptoTextModel,
CryptoTextModelPassword,
CryptoAllFieldModel,
CryptoTextModelPasswordFromField
)
from django.conf import settings
DEFAULT_PASSWORD = "<PASSWORD>"
class CryptoFieldTest(TestCase):
def test_char_field_create(self):
t = CryptoTextModel.objects.create(text_field="RandomTextField123!")
self.assertEqual(t.text_field, "RandomTextField123!")
def test_all_field_create(self):
text_field = "RandomTextField123!"
char_field = "RandomCharField123!"
email_field = "<EMAIL>"
int_field = -123
date_field = datetime.date(2001, 1, 1)
date_time_field = datetime.datetime(2001, 1, 1, 13, 00)
big_int_field = -9223372036854775808
positive_int_field = 123
positive_small_int_field = 1
small_int_field = -1
t = CryptoAllFieldModel.objects.create(
text_field=text_field,
char_field=char_field,
email_field=email_field,
int_field=int_field,
date_field=date_field,
date_time_field=date_time_field,
big_int_field=big_int_field,
positive_int_field=positive_int_field,
positive_small_int_field=positive_small_int_field,
small_int_field=small_int_field,
)
self.assertEqual(t.text_field, text_field)
self.assertEqual(t.char_field, char_field)
self.assertEqual(t.email_field, email_field)
self.assertEqual(t.int_field, int_field)
self.assertEqual(t.date_field, date_field)
self.assertEqual(t.date_time_field, date_time_field)
self.assertEqual(t.big_int_field, big_int_field)
self.assertEqual(t.positive_int_field, positive_int_field)
self.assertEqual(t.positive_small_int_field, positive_small_int_field)
self.assertEqual(t.small_int_field, small_int_field)
def test_char_field_create_password(self):
t = CryptoTextModelPassword.objects.create(text_field="RandomTextField123!")
self.assertEqual(t.text_field, "RandomTextField123!")
def test_mutable(self):
t1 = CryptoTextModel.objects.create(text_field="RandomTextField123!")
t2 = CryptoTextModel.objects.create(text_field="RandomTextField123!")
self.assertIs(t1.text_field, t2.text_field)
def test_get_prep_value(self):
c_text = CryptoTextField()
self.assertEqual(
"RandomTextField123!",
c_text.get_prep_value(value="RandomTextField123!"),
)
def test_get_db_prep_save(self):
c_text = CryptoTextField()
self.assertIs(
bytes,
type(c_text.get_db_prep_save(value="RandomTextField123!", connection=None)),
)
def test_to_python(self):
c_text = CryptoTextField()
self.assertEqual(str(""), c_text.to_python(""))
self.assertEqual(str("a"), c_text.to_python("a"))
def test_password_salt(self):
c_text = CryptoTextField()
c2_text = CryptoTextField(password="<PASSWORD>")
self.assertEqual(DEFAULT_PASSWORD, c_text.password)
self.assertEqual(settings.SECRET_KEY, c_text.salt)
self.assertEqual("password_to_be_used_as_key", c2_text.password)
def test_password_field(self):
k = CryptoTextModelPasswordFromField(text_field="Random<PASSWORD>!")
self.assertEqual("password_field_to_be_used_as_key", k.password)
|
<filename>src/components/blog-preview/index.js<gh_stars>1-10
import React from 'react';
import AniLink from 'gatsby-plugin-transition-link/AniLink';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { faClock, faShare } from '@fortawesome/free-solid-svg-icons';
import { toast } from 'react-toastify';
import { css } from 'glamor';
import { copyTextToClipboard } from '../../services/clipboard';
import style from './blog-preview.module.scss';
function BlogPreview({
post,
postTitleClassName = '',
postDescriptionClassName = '',
postOptionsWrapperClassName = '',
postOptionsItemClassName = ''
}) {
return (
<AniLink
to={`/${post.path}`}
className={style.blogPreview}
swipe
direction="left"
>
<img
src={post.cover}
alt={post.alt || 'Capa do post'}
className={style.blogPreviewCover}
/>
<div className={style.blogPreviewInfo}>
<h2 className={`${style.blogPreviewInfoTitle} ${postTitleClassName}`}>
{ post.title }
</h2>
<p className={`${style.blogPreviewInfoDescription} ${postDescriptionClassName}`}>
{ post.description }
</p>
<div className={`${style.blogPreviewInfoOptions} ${postOptionsWrapperClassName}`}>
<button className={`${style.blogPreviewInfoOptionsItem} ${postOptionsItemClassName}`}>
<FontAwesomeIcon
className={style.blogPreviewInfoOptionsItemIcon}
icon={faClock}
/>
<span className={style.blogPreviewInfoOptionsItemText}>
{ post.date }
</span>
</button>
<button
onClick={copyPostLinkToClipboard}
className={`${style.blogPreviewInfoOptionsItem} ${postOptionsItemClassName}`}
>
<FontAwesomeIcon
className={style.blogPreviewInfoOptionsItemIcon}
icon={faShare}
/>
<span className={style.blogPreviewInfoOptionsItemText}>
Compartilhar
</span>
</button>
</div>
</div>
</AniLink>
);
function copyPostLinkToClipboard(event) {
event.preventDefault();
copyTextToClipboard(`${window.location.origin}/${post.slug}`);
toast('Link copiado :)', {
className: css({
background: 'black'
}),
bodyClassName: css({
fontSize: '18px',
color: 'white'
}),
progressClassName: css({
background: "radial-gradient(#a55eea 25%, #fed330 25%, #fc5c65 25%, #2bcbba 25%)",
fontFamily: 'Lato'
})
});
}
}
export default BlogPreview; |
<reponame>belo355/omnistack11
const request = require('supertest');
const app = require('../../src/app');
const connection = require('../../src/database/connection');
describe('ONG', () => {
beforeEach(async () => {
await connection.migrate.rollback();
await connection.migrate.latest();
});
afterAll(async ()=> {
await connection.destroy();
});
it('should be able to create a new ONG', async () => {
const response = await request(app).post('/ongs')
.send({
name:"APAD2",
email:"<EMAIL>",
whatsapp:"1100000000",
city:"sao paulo",
uf:"SP"
});
expect(response.body).toHaveProperty('id');
expect(response.body.id).toHaveLength(8);
});
it('should be able to list existents ONG', async () => {
//create
const ong = await request(app).post('/ongs')
.send({
name:"APAD2",
email:"<EMAIL>",
whatsapp:"1100000000",
city:"sao paulo",
uf:"SP"
});
const response = await request(app).get('/ongs');
expect(response.body.name).toEqual(ong.name);
expect(response.body.email).toEqual(ong.email);
expect(response.body.whatsapp).toEqual(ong.whatsapp);
expect(response.body.city).toEqual(ong.city);
expect(response.body.uf).toEqual(ong.uf);
});
});
//pedente testas delete |
<gh_stars>10-100
#include <bits/stdc++.h>
using namespace std;
int fact(int n)
{
if (n < 2)
return 1;
else
return n * fact(n - 1);
}
int pascalTriangle(int n, int r)
{
return fact(n) / (fact(r) * fact(n - r));
}
int main()
{
int no_of_rows;
cin >> no_of_rows;
for (int row = 0; row < no_of_rows; row++)
{
for (int space = 1; space <= no_of_rows - row; space++)
cout << " ";
for (int colunm = 0; colunm <= row; colunm++)
cout << pascalTriangle(row, colunm) << " ";
cout << endl;
}
return 0;
} |
def add_from_n_to_m(n, m):
"""This function takes two numbers, `n` and `m`, and returns the results of adding all the numbers from `n` to `m`."""
total = 0
for i in range(n, m+1):
total += i
return total
n = 2
m = 5
print(add_from_n_to_m(n, m)) |
#!/bin/bash
# This script needs to be run after cmake on BG/Q systems to filter out
# unwanted X11 dependencies that CMake places in the link line.
# Filter the engine link line so it will not include X11 libraries. CMake is adding
# them even though we don't want them. Also get rid of extra static/dynamic
# link keywords that prevent the linker from making a good static executable.
for target in engine_ser_exe.dir engine_par_exe.dir
do
edir="engine/main/CMakeFiles/$target"
if test -e "$edir/link.txt" ; then
sed "s/-lX11//g" $edir/link.txt > $edir/link1.txt
sed "s/-lXext//g" $edir/link1.txt > $edir/link2.txt
sed "s/-Wl,-Bstatic//g" $edir/link2.txt > $edir/link3.txt
sed "s/-Wl,-Bdynamic//g" $edir/link3.txt > $edir/link4.txt
rm -f $edir/link1.txt $edir/link2.txt $edir/link3.txt
mv $edir/link4.txt $edir/link.txt
else
echo "***** DID NOT SEE: $edir/link.txt pwd=`pwd`"
fi
if test -e "$edir/relink.txt" ; then
sed "s/-lX11//g" $edir/relink.txt > $edir/relink1.txt
sed "s/-lXext//g" $edir/relink1.txt > $edir/relink2.txt
sed "s/-Wl,-Bstatic//g" $edir/relink2.txt > $edir/relink3.txt
sed "s/-Wl,-Bdynamic//g" $edir/relink3.txt > $edir/relink4.txt
rm -f $edir/relink1.txt $edir/relink2.txt $edir/relink3.txt
mv $edir/relink4.txt $edir/relink.txt
else
echo "***** DID NOT SEE: $edir/relink.txt pwd=`pwd`"
fi
done
# Filter the visitconvert link line so it will not include X11 libraries. CMake
# is adding them even though we don't want them. Also get rid of extra static/dynamic
# link keywords that prevent the linker from making a good static executable.
for target in visitconvert_ser.dir visitconvert_par.dir
do
edir="tools/convert/CMakeFiles/$target"
if test -e "$edir/link.txt" ; then
sed "s/-lX11//g" $edir/link.txt > $edir/link1.txt
sed "s/-lXext//g" $edir/link1.txt > $edir/link2.txt
sed "s/-Wl,-Bstatic//g" $edir/link2.txt > $edir/link3.txt
sed "s/-Wl,-Bdynamic//g" $edir/link3.txt > $edir/link4.txt
rm -f $edir/link1.txt $edir/link2.txt $edir/link3.txt
mv $edir/link4.txt $edir/link.txt
else
echo "***** DID NOT SEE: $edir/link.txt pwd=`pwd`"
fi
if test -e "$edir/relink.txt" ; then
sed "s/-lX11//g" $edir/relink.txt > $edir/relink1.txt
sed "s/-lXext//g" $edir/relink1.txt > $edir/relink2.txt
sed "s/-Wl,-Bstatic//g" $edir/relink2.txt > $edir/relink3.txt
sed "s/-Wl,-Bdynamic//g" $edir/relink3.txt > $edir/relink4.txt
rm -f $edir/relink1.txt $edir/relink2.txt $edir/relink3.txt
mv $edir/relink4.txt $edir/relink.txt
else
echo "***** DID NOT SEE: $edir/relink.txt pwd=`pwd`"
fi
done
# Filter the osmesavtktest link line so it will not include X11 libraries. CMake
# is adding them even though we don't want them. Also get rid of extra static/dynamic
# link keywords that prevent the linker from making a good static executable.
for target in osmesavtktest_ser.dir osmesavtktest_par.dir
do
edir="tools/diagnostics/osmesatest/CMakeFiles/$target"
if test -e "$edir/link.txt" ; then
sed "s/-lX11//g" $edir/link.txt > $edir/link1.txt
sed "s/-lXext//g" $edir/link1.txt > $edir/link2.txt
sed "s/-Wl,-Bstatic//g" $edir/link2.txt > $edir/link3.txt
sed "s/-Wl,-Bdynamic//g" $edir/link3.txt > $edir/link4.txt
rm -f $edir/link1.txt $edir/link2.txt $edir/link3.txt
mv $edir/link4.txt $edir/link.txt
else
echo "***** DID NOT SEE: $edir/link.txt pwd=`pwd`"
fi
if test -e "$edir/relink.txt" ; then
sed "s/-lX11//g" $edir/relink.txt > $edir/relink1.txt
sed "s/-lXext//g" $edir/relink1.txt > $edir/relink2.txt
sed "s/-Wl,-Bstatic//g" $edir/relink2.txt > $edir/relink3.txt
sed "s/-Wl,-Bdynamic//g" $edir/relink3.txt > $edir/relink4.txt
rm -f $edir/relink1.txt $edir/relink2.txt $edir/relink3.txt
mv $edir/relink4.txt $edir/relink.txt
else
echo "***** DID NOT SEE: $edir/relink.txt pwd=`pwd`"
fi
done
# Filter the test_cache link line so it will not include X11 libraries. CMake
# is adding them even though we don't want them. Also get rid of extra static/dynamic
# link keywords that prevent the linker from making a good static executable.
for target in test_cache.dir
do
edir="avt/Database/CMakeFiles/$target"
if test -e "$edir/link.txt" ; then
sed "s/-lX11//g" $edir/link.txt > $edir/link1.txt
sed "s/-lXext//g" $edir/link1.txt > $edir/link2.txt
sed "s/-Wl,-Bstatic//g" $edir/link2.txt > $edir/link3.txt
sed "s/-Wl,-Bdynamic//g" $edir/link3.txt > $edir/link4.txt
rm -f $edir/link1.txt $edir/link2.txt $edir/link3.txt
mv $edir/link4.txt $edir/link.txt
else
echo "***** DID NOT SEE: $edir/link.txt pwd=`pwd`"
fi
if test -e "$edir/relink.txt" ; then
sed "s/-lX11//g" $edir/relink.txt > $edir/relink1.txt
sed "s/-lXext//g" $edir/relink1.txt > $edir/relink2.txt
sed "s/-Wl,-Bstatic//g" $edir/relink2.txt > $edir/relink3.txt
sed "s/-Wl,-Bdynamic//g" $edir/relink3.txt > $edir/relink4.txt
rm -f $edir/relink1.txt $edir/relink2.txt $edir/relink3.txt
mv $edir/relink4.txt $edir/relink.txt
else
echo "***** DID NOT SEE: $edir/relink.txt pwd=`pwd`"
fi
done
exit 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.