text stringlengths 1 1.05M |
|---|
<filename>src/app/Services/common-function-my-profile.ts
import { Injectable } from '@angular/core';
import { LocationResponseData } from '../models/locations/location-response-data';
import { Railcard } from '../models/journey/railcards/railcard';
import { ResponseData } from '../models/common/response';
import { HttpHeaders, HttpClient } from '@angular/common/http';
import { environment } from 'src/environments/environment';
import { HttpClientService } from '../utility/http-client.service';
import { ApireferenceService } from '../utility/apireference.service';
import { RailCardModel } from '../models/journey/railcard.model';
import { CommonServiceService } from 'src/app/Services/common-service.service';
import { StorageDataService } from 'src/app/Services/SharedCache.service';
import { SharedService } from 'src/app/Services/shared.service';
import { ApplicationConstants } from 'src/app/models/Constants/constants.model';
import { CustomerServiceService } from 'src/app/services/customer-service.service';
import { AddNewAddressRequest } from 'src/app/models/customer/add-new-address-request';
import { AddressResponse } from 'src/app/models/customer/address-response';
@Injectable({
providedIn: 'root'
})
export class CommonFunctionMyProfile {
constructor(public sharedService: SharedService,private sharedStorage:StorageDataService,
private customerService:CustomerServiceService, private commonServiceService:CommonServiceService,private httpClientService:HttpClientService,private apiPath:ApireferenceService) { }
public locations: LocationResponseData[];
public railCards: RailCardModel[];
responseData: ResponseData;
public locationData: LocationResponseData[];
addressResponse:AddressResponse;
sharedServiceData:SharedService;
getLocations(){
this.commonServiceService.getLocations().subscribe(
res => {
if (res != null) {
this.responseData = res as ResponseData;
if (this.responseData.responseCode == '200') {
this.sharedService.locationResponseData = this.responseData.data;
this.locationData = this.responseData.data;
this.sharedService.locationResponseData=this.locationData;
this.sharedStorage.clearStorageData(ApplicationConstants.StoredAllServices);
this.sharedStorage.setStorageData(ApplicationConstants.StoredAllServices,this.sharedService,true);
}
}
});
return this.locationData;
}
}
|
<filename>WinSort/WinSort/FileName.cpp
#include "stdafx.h"
#include "FileName.h"
#include <algorithm>
#include "utils.h"
#include <iostream>
#include <stdlib.h>
#include <Shlwapi.h>
FileName::FileName(const wchar_t *file_name, bool isFolder) {
this->fileName = file_name;
this->isFolder = isFolder;
}
FileName::~FileName() {
//delete[] this->fileName;
//delete[] this->suffix;
std::cout << "~~~~~~~~~~" << std::endl;
}
bool FileName::operator<(const FileName &rhs) {
int res = StrCmpLogicalW(this->fileName, rhs.fileName);
return res < 0;
}
void FileName::printFileName() {
std::cout << WcharToChar_New(this->fileName) << std::endl;
}
|
package uk.org.ulcompsoc.ld32.systems;
import uk.org.ulcompsoc.ld32.components.Position;
import uk.org.ulcompsoc.ld32.components.Renderable;
import uk.org.ulcompsoc.ld32.util.Mappers;
import com.badlogic.ashley.core.Entity;
import com.badlogic.ashley.core.Family;
import com.badlogic.ashley.systems.IteratingSystem;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.graphics.glutils.ShapeRenderer;
import com.badlogic.gdx.graphics.glutils.ShapeRenderer.ShapeType;
public class PositionDebugSystem extends IteratingSystem {
private final ShapeRenderer renderer;
@SuppressWarnings("unchecked")
public PositionDebugSystem(int priority, final ShapeRenderer renderer) {
super(Family.all(Position.class, Renderable.class).get(), priority);
this.renderer = renderer;
System.out.println("PositionDebugSystem created: are you sure you meant to do this?");
}
@Override
public void update(float deltaTime) {
renderer.begin(ShapeType.Line);
renderer.setColor(Color.BLACK);
super.update(deltaTime);
renderer.end();
}
@Override
protected void processEntity(Entity entity, float deltaTime) {
final Position p = Mappers.positionMapper.get(entity);
final float size = 6.0f;
renderer.point(p.getX(), p.getY(), 0.0f);
renderer.rect(p.getX() - (size / 2), p.getY() - size / 2, size, size);
}
}
|
#!/bin/sh
echo 'Running yapf with google style'
yapf -ir .. --style google -vv
echo 'Reformatting completed'
echo 'Retrieving, preprocessing and segregating data...'
mlflow run ../executor/. -P config_name=eda_config.yaml
echo 'Data pipeline executed'
echo 'Generating pandas-profiling report for train dataset...'
python data_profiler.py --input_file=../data/segregated/student_maths_train.csv --output_file=./train_data_profile.html
echo 'pandas-profiling report generated.'
echo 'Training Model...'
mlflow run ../inference/.
echo 'Model training completed.'
echo 'Evaluating the model...'
mlflow run ../scoring/.
echo 'Model scoring completed' |
import numpy as np
# define the function
def f(x):
return np.power(x,2) - 2*x + 2
# set the parameters
# initial parameter
x_start = 4
# learning rate
alpha = 0.01
# iterations
iterations = 100
# store all parameters
parameters = []
# begin the optimization process
x = x_start
for i in range(iterations):
grad = 2*x - 2
x -= alpha*grad
parameters.append(x)
# find the optimal parameter
optimal_parameter = min(parameters)
print("Optimal parameter is {}".format(optimal_parameter)) |
package libs.trustconnector.scdp.testsuite;
import libs.trustconnector.scdp.*;
import libs.trustconnector.scdp.SCDP;
import libs.trustconnector.scdp.TestSuite;
import libs.trustconnector.scdp.TestSuiteEntry;
public class Repeater extends TestSuite
{
@Override
public boolean ExcTest(final String[] param) {
if (param.length < 2) {
SCDP.reportError("param error,usage:counter testclass[param1,param2,...]");
return false;
}
final int counter = Integer.valueOf(param[0]);
String[] p = null;
if (param.length > 2) {
p = new String[param.length - 2];
System.arraycopy(param, 2, p, 0, param.length - 2);
}
for (int i = 1; i <= counter; ++i) {
final int cookie = SCDP.beginGroup("Counter " + i);
try {
final Class<?> cls = Class.forName(param[1]);
final TestSuiteEntry ts = (TestSuiteEntry)cls.newInstance();
if (!ts.ExcTest(p)) {
SCDP.reportError("Test result is false");
return false;
}
}
catch (ClassNotFoundException e) {
SCDP.reportError("test case error:" + param[1] + " does not found!");
e.printStackTrace();
return false;
}
catch (ExceptionInInitializerError e2) {
SCDP.reportError("test case init error");
e2.printStackTrace();
return false;
}
catch (Exception e3) {
SCDP.reportError("test case name error:" + param[1] + " does not implements TestsuitEntry");
e3.printStackTrace();
return false;
}
SCDP.endGroup(cookie);
}
return true;
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.hadoop.has.webserver;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.has.kdc.HASKdcServer;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
import org.apache.kerby.kerberos.kdc.impl.NettyKdcServerImpl;
import org.apache.kerby.kerberos.kerb.server.KdcContext;
import org.apache.kerby.kerberos.kerb.server.KdcSetting;
import javax.servlet.ServletContext;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
/**
* A HTTP and Netty based KDC server implementation.
*/
public class HttpKdcServerImpl extends NettyKdcServerImpl {
public static final Log LOG = LogFactory.getLog(HttpKdcServerImpl.class.getName());
private HttpServer2 httpServer;
private final Configuration conf;
private InetSocketAddress httpAddress;
private InetSocketAddress httpsAddress;
private final InetSocketAddress bindAddress;
private HASKdcServer kdcServer;
protected static final String KDCSERVER_ATTRIBUTE_KEY = "kdcserver";
public HttpKdcServerImpl(Configuration conf, InetSocketAddress bindAddress,
KdcSetting kdcSetting, HASKdcServer kdcServer) {
super(kdcSetting);
this.conf = conf;
this.bindAddress = bindAddress;
this.kdcServer = kdcServer;
}
private void init(Configuration conf) throws IOException {
final String pathSpec = "/has/v1/*";
// add has packages
httpServer.addJerseyResourcePackage(HASWebMethods.class
.getPackage().getName(),
pathSpec);
}
@Override
protected void doStart() throws Exception {
super.doStart();
startJettyServer();
LOG.info("Http kdc server started.");
}
@Override
protected void doStop() throws Exception {
super.doStop();
jettyStop();
LOG.info("Http kdc server stopped.");
}
/**
* Get http policy.
*/
public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
String policyStr = conf.get(HASConfigKeys.HAS_HTTP_POLICY_KEY,
HASConfigKeys.HAS_HTTP_POLICY_DEFAULT);
HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
if (policy == null) {
throw new HadoopIllegalArgumentException("Unregonized value '"
+ policyStr + "' for " + HASConfigKeys.HAS_HTTP_POLICY_KEY);
}
conf.set(HASConfigKeys.HAS_HTTP_POLICY_KEY, policy.name());
return policy;
}
/**
* Return a HttpServer.Builder that the ssm can use to
* initialize their HTTP / HTTPS server.
*
*/
public static HttpServer2.Builder httpServerTemplateForHAS(
Configuration conf, final InetSocketAddress httpAddr,
final InetSocketAddress httpsAddr, String name) throws IOException {
HttpConfig.Policy policy = getHttpPolicy(conf);
HttpServer2.Builder builder = new HttpServer2.Builder().setName(name);
if (policy.isHttpEnabled()) {
if (httpAddr.getPort() == 0) {
builder.setFindPort(true);
}
URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
builder.addEndpoint(uri);
LOG.info("Starting Web-server for " + name + " at: " + uri);
}
if (policy.isHttpsEnabled() && httpsAddr != null) {
Configuration sslConf = loadSslConfiguration(conf);
loadSslConfToHttpServerBuilder(builder, sslConf);
if (httpsAddr.getPort() == 0) {
builder.setFindPort(true);
}
URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
builder.addEndpoint(uri);
LOG.info("Starting Web-server for " + name + " at: " + uri);
}
return builder;
}
/**
* Load HTTPS-related configuration.
*/
public static Configuration loadSslConfiguration(Configuration conf) {
Configuration sslConf = new Configuration(false);
sslConf.addResource(conf.get(
HASConfigKeys.HAS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
HASConfigKeys.HAS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
final String[] reqSslProps = {
HASConfigKeys.HAS_SERVER_HTTPS_TRUSTSTORE_LOCATION_KEY,
HASConfigKeys.HAS_SERVER_HTTPS_KEYSTORE_LOCATION_KEY,
HASConfigKeys.HAS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY,
HASConfigKeys.HAS_SERVER_HTTPS_KEYPASSWORD_KEY
};
// Check if the required properties are included
for (String sslProp : reqSslProps) {
if (sslConf.get(sslProp) == null) {
LOG.warn("SSL config " + sslProp + " is missing. If " +
HASConfigKeys.HAS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY +
" is specified, make sure it is a relative path");
}
}
boolean requireClientAuth = conf.getBoolean(HASConfigKeys.HAS_CLIENT_HTTPS_NEED_AUTH_KEY,
HASConfigKeys.HAS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
sslConf.setBoolean(HASConfigKeys.HAS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
return sslConf;
}
public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
Configuration sslConf) {
return builder
.needsClientAuth(
sslConf.getBoolean(HASConfigKeys.HAS_CLIENT_HTTPS_NEED_AUTH_KEY,
HASConfigKeys.HAS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
.keyPassword(getPassword(sslConf, HASConfigKeys.HAS_SERVER_HTTPS_KEYPASSWORD_KEY))
.keyStore(sslConf.get("ssl.server.keystore.location"),
getPassword(sslConf, HASConfigKeys.HAS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY),
sslConf.get("ssl.server.keystore.type", "jks"))
.trustStore(sslConf.get("ssl.server.truststore.location"),
getPassword(sslConf, HASConfigKeys.HAS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY),
sslConf.get("ssl.server.truststore.type", "jks"))
.excludeCiphers(
sslConf.get("ssl.server.exclude.cipher.list"));
}
/**
* Leverages the Configuration.getPassword method to attempt to get
* passwords from the CredentialProvider API before falling back to
* clear text in config - if falling back is allowed.
*
* @param conf Configuration instance
* @param alias name of the credential to retreive
* @return String credential value or null
*/
static String getPassword(Configuration conf, String alias) {
String password = null;
try {
char[] passchars = conf.getPassword(alias);
if (passchars != null) {
password = new String(passchars);
}
} catch (IOException ioe) {
LOG.warn("Setting password to null since IOException is caught"
+ " when getting password", ioe);
password = null;
}
return password;
}
/**
* for information related to the different configuration options and
* Http Policy is decided.
*/
private void startJettyServer() throws IOException {
HttpConfig.Policy policy = getHttpPolicy(conf);
final String infoHost = bindAddress.getHostName();
final InetSocketAddress httpAddr = bindAddress;
final String httpsAddrString = conf.getTrimmed(
HASConfigKeys.HAS_HTTPS_ADDRESS_KEY,
HASConfigKeys.HAS_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
if (httpsAddr != null) {
// If DFS_NAMENODE_HTTPS_BIND_HOST_KEY exists then it overrides the
// host name portion of DFS_NAMENODE_HTTPS_ADDRESS_KEY.
final String bindHost =
conf.getTrimmed(HASConfigKeys.HAS_HTTPS_BIND_HOST_KEY);
if (bindHost != null && !bindHost.isEmpty()) {
httpsAddr = new InetSocketAddress(bindHost, httpsAddr.getPort());
}
}
HttpServer2.Builder builder = httpServerTemplateForHAS(conf, httpAddr, httpsAddr, "has");
httpServer = builder.build();
init(conf);
httpServer.start();
int connIdx = 0;
if (policy.isHttpEnabled()) {
httpAddress = httpServer.getConnectorAddress(connIdx++);
conf.set(HASConfigKeys.HAS_HTTP_ADDRESS_KEY,
NetUtils.getHostPortString(httpAddress));
}
if (policy.isHttpsEnabled()) {
httpsAddress = httpServer.getConnectorAddress(connIdx);
conf.set(HASConfigKeys.HAS_HTTPS_ADDRESS_KEY,
NetUtils.getHostPortString(httpsAddress));
}
httpServer.setAttribute(KDCSERVER_ATTRIBUTE_KEY, kdcServer);
}
public static HASKdcServer getKdcServerFromContext(ServletContext context) {
return (HASKdcServer) context.getAttribute(KDCSERVER_ATTRIBUTE_KEY);
}
/**
* Joins the httpserver.
*/
public void join() throws InterruptedException {
if (httpServer != null) {
httpServer.join();
}
}
private void jettyStop() throws Exception {
if (httpServer != null) {
httpServer.stop();
}
}
InetSocketAddress getHttpAddress() {
return httpAddress;
}
InetSocketAddress getHttpsAddress() {
return httpsAddress;
}
/**
* Returns the httpServer.
* @return HttpServer2
*/
@VisibleForTesting
public HttpServer2 getHttpServer() {
return httpServer;
}
}
|
#!/bin/bash
# run_raspir_SLURM.sh
# last updated: 28 July 2021
# set partition
#SBATCH -p normal
# set run on x MB node only
#SBATCH --mem 30000
# set run x cpus
#SBATCH --cpus-per-task 8
# set name of job
#SBATCH --job-name=raspir_run
# Add miniconda3 to PATH. TODO - autodetection
. /mnt/ngsnfs/tools/miniconda3/etc/profile.d/conda.sh
# Activate env on cluster node
conda activate raspir_env >> /dev/null
for i in `ls *raspir.csv`
do
echo $i
python raspir.py $i ${i%.csv}
done
|
/**
* @file 二元表达式操作函数
* @author errorrik(<EMAIL>)
*/
/**
* 二元表达式操作函数
*
* @type {Object}
*/
var BinaryOp = {
/* eslint-disable */
43: function (a, b) {
return a + b;
},
45: function (a, b) {
return a - b;
},
42: function (a, b) {
return a * b;
},
47: function (a, b) {
return a / b;
},
60: function (a, b) {
return a < b;
},
62: function (a, b) {
return a > b;
},
76: function (a, b) {
return a && b;
},
94: function (a, b) {
return a != b;
},
121: function (a, b) {
return a <= b;
},
122: function (a, b) {
return a == b;
},
123: function (a, b) {
return a >= b;
},
155: function (a, b) {
return a !== b;
},
183: function (a, b) {
return a === b;
},
248: function (a, b) {
return a || b;
}
/* eslint-enable */
};
exports = module.exports = BinaryOp;
|
def mean(nums):
total = 0
for num in nums:
total += num
return total/len(nums)
print(mean([2,3,5,7])) |
function GpuTimer(app) {
this._gl = app.graphicsDevice.gl;
this._ext = app.graphicsDevice.extDisjointTimerQuery;
this._freeQueries = []; // pool of free queries
this._frameQueries = []; // current frame's queries
this._frames = []; // list of previous frame queries
this._timings = [];
this._prevTimings = [];
this.enabled = true;
app.on('frameupdate', this.begin.bind(this, 'update'));
app.on('framerender', this.mark.bind(this, 'render'));
app.on('frameend', this.end.bind(this));
}
Object.assign(GpuTimer.prototype, {
// mark the beginning of the frame
begin: function (name) {
if (!this.enabled) {
return;
}
// store previous frame's queries
if (this._frameQueries.length > 0) {
this.end();
}
// check if all in-flight queries have been invalidated
this._checkDisjoint();
// resolve previous frame timings
if (this._frames.length > 0) {
if (this._resolveFrameTimings(this._frames[0], this._prevTimings)) {
// swap
var tmp = this._prevTimings;
this._prevTimings = this._timings;
this._timings = tmp;
// free
this._freeQueries = this._freeQueries.concat(this._frames.splice(0, 1)[0]);
}
}
this.mark(name);
},
// mark
mark: function (name) {
if (!this.enabled) {
return;
}
// end previous query
if (this._frameQueries.length > 0) {
this._gl.endQuery(this._ext.TIME_ELAPSED_EXT);
}
// allocate new query and begin
var query = this._allocateQuery();
query[0] = name;
this._gl.beginQuery(this._ext.TIME_ELAPSED_EXT, query[1]);
this._frameQueries.push(query);
},
// end of frame
end: function () {
if (!this.enabled) {
return;
}
this._gl.endQuery(this._ext.TIME_ELAPSED_EXT);
this._frames.push(this._frameQueries);
this._frameQueries = [];
},
// check if the gpu has been interrupted thereby invalidating all
// in-flight queries
_checkDisjoint: function () {
var disjoint = this._gl.getParameter(this._ext.GPU_DISJOINT_EXT);
if (disjoint) {
// return all queries to the free list
this._freeQueries = [this._frames, [this._frameQueries], [this._freeQueries]].flat(2);
this._frameQueries = [];
this._frames = [];
}
},
// either returns a previously free'd query or if there aren't any allocates a new one
_allocateQuery: function () {
return (this._freeQueries.length > 0) ?
this._freeQueries.splice(-1, 1)[0] : ["", this._gl.createQuery()];
},
// attempt to resolve one frame's worth of timings
_resolveFrameTimings: function (frame, timings) {
// wait for the last query in the frame to be available
if (!this._gl.getQueryParameter(frame[frame.length - 1][1], this._gl.QUERY_RESULT_AVAILABLE)) {
return false;
}
for (var i = 0; i < frame.length; ++i) {
timings[i] = [frame[i][0], this._gl.getQueryParameter(frame[i][1], this._gl.QUERY_RESULT) * 0.000001];
}
return true;
}
});
Object.defineProperty(GpuTimer.prototype, 'timings', {
get: function () {
return this._timings.map(function (v) {
return v[1];
});
}
});
export { GpuTimer };
|
import subprocess
def heroku_deploy(app_name: str) -> None:
push_command = f"heroku container:push web --app {app_name}"
release_command = f"heroku container:release web --app {app_name}"
try:
subprocess.run(push_command, shell=True, check=True)
subprocess.run(release_command, shell=True, check=True)
print(f"Deployment to Heroku for app '{app_name}' successful.")
except subprocess.CalledProcessError as e:
print(f"Error occurred during deployment: {e}") |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2015:1634
#
# Security announcement date: 2015-08-17 15:33:32 UTC
# Script generation date: 2017-01-01 21:11:33 UTC
#
# Operating System: CentOS 6
# Architecture: i386
#
# Vulnerable packages fix on version:
# - lemon.i686:3.6.20-1.el6_7.2
# - sqlite.i686:3.6.20-1.el6_7.2
# - sqlite-devel.i686:3.6.20-1.el6_7.2
# - sqlite-doc.i686:3.6.20-1.el6_7.2
# - sqlite-tcl.i686:3.6.20-1.el6_7.2
#
# Last versions recommanded by security team:
# - lemon.i686:3.6.20-1.el6_7.2
# - sqlite.i686:3.6.20-1.el6_7.2
# - sqlite-devel.i686:3.6.20-1.el6_7.2
# - sqlite-doc.i686:3.6.20-1.el6_7.2
# - sqlite-tcl.i686:3.6.20-1.el6_7.2
#
# CVE List:
# - CVE-2015-3416
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install lemon.i686-3.6.20 -y
sudo yum install sqlite.i686-3.6.20 -y
sudo yum install sqlite-devel.i686-3.6.20 -y
sudo yum install sqlite-doc.i686-3.6.20 -y
sudo yum install sqlite-tcl.i686-3.6.20 -y
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script executes all hive metastore upgrade scripts on an specific
# database server in order to verify that upgrade scripts are working
# properly.
# This script is run on jenkins only, and it creates some LXC containers
# in order to execute the metastore-upgrade-tests for different
# server configurations.
set -x
. jenkins-common.sh
test -n "$BRANCH" || fail "BRANCH must be specified"
test -n "$ISSUE_NUM" || fail "ISSUE_NUM must be specified"
test -n "$SSH_HOST" || fail "SSH_HOST must be specified"
test -n "$SSH_KEY" || fail "SSH_KEY must be specified"
test -n "$BUILD_TAG" || fail "BUILD_TAG must be specified"
test -n "$WORKSPACE" || WORKSPACE="$HOME"
export JIRA_NAME="HIVE-${ISSUE_NUM}"
export ROOT=$PWD
export JIRA_ROOT_URL="https://issues.apache.org"
export BUILD_TAG="${BUILD_TAG##jenkins-}"
process_jira
# Jenkins may call this script with BRANCH=trunk to refer to SVN.
# We use git here, so we change to master.
[[ "$BRANCH" = "trunk" ]] && BRANCH="master"
PUBLISH_VARS=(
BUILD_STATUS=buildStatus
BUILD_TAG=buildTag
LOGS_URL=logsURL
JENKINS_URL=jenkinsURL
PATCH_URL=patchUrl
JIRA_NAME=jiraName
JIRA_URL=jiraUrl
BRANCH=branch
REPO=repository
REPO_NAME=repositoryName
REPO_TYPE=repositoryType
TESTS_EXECUTED=numTestsExecuted
FAILED_TESTS=failedTests
MESSAGES=messages
)
BUILD_STATUS=0
PATCH_URL="${JIRA_ROOT_URL}${PATCH_URL}"
TESTS_EXECUTED=0
FAILED_TESTS=()
MESSAGES=()
PROFILE_PROPERTIES_FILE="/usr/local/hiveptest/etc/public/${BUILD_PROFILE}.properties"
[[ -f $PROFILE_PROPERTIES_FILE ]] || fail "$PROFILE_PROPERTIES_FILE file does not exist."
profile_properties_get() {
grep "^$1 = " "$PROFILE_PROPERTIES_FILE" | awk '{print $3}'
}
REPO_TYPE=$(profile_properties_get "repositoryType")
REPO_NAME=$(profile_properties_get "repository")
REPO=$(profile_properties_get "repositoryName")
JIRA_URL=$(profile_properties_get "jiraUrl")
# Avoid showing this information on Jira
set +x
JIRA_USER=$(profile_properties_get "jiraUser")
JIRA_PASS=$(profile_properties_get "jiraPassword")
set -x
JENKINS_URL=$(profile_properties_get "jenkinsURL")
LOGS_URL=$(profile_properties_get "logsURL")
build_ptest2() {
local path="$1"
local curpath="$PWD"
test -d $path || mkdir -p $path
rm -rf $path
git clone --depth 1 -b $BRANCH https://github.com/apache/hive.git $path/ || return 1
cd $path/testutils/ptest2
mvn clean package -B -DskipTests -Drat.numUnapprovedLicenses=1000 -Dmaven.repo.local=$WORKSPACE/.m2 || return 1
cd $curpath
}
publish_results() {
local file="$1"
build_ptest2 "hive/build" || return 1
# Avoid showing this information on Jira
set +x
java -cp "hive/build/testutils/ptest2/target/*:hive/build/testutils/ptest2/target/lib/*" org.apache.hive.ptest.execution.JIRAService \
--user "$JIRA_USER" \
--password "$JIRA_PASS" \
--file "$file"
set -x
}
add_json_object() {
echo -n "\"$2\" : \"$3\"" >> $1
}
add_json_array() {
echo -n "\"$2\" : [ $3 ]" >> $1
}
create_publish_file() {
local json_file=$(mktemp)
local arr_length=${#PUBLISH_VARS[@]}
vars_added=0
echo "{" > $json_file
for i in ${PUBLISH_VARS[@]}
do
var=${i%=*}
key=${i#*=}
val=
# Treat this as an array
if [[ $var = "FAILED_TESTS" ]]; then
if [[ ${#FAILED_TESTS[@]} -gt 0 ]]; then
val=$(printf "\"%s\"," "${FAILED_TESTS[@]}")
val=${val%?}
fi
add_json_array $json_file $key "$val"
elif [[ $var = "MESSAGES" ]]; then
if [[ ${#MESSAGES[@]} -gt 0 ]]; then
val=$(printf "\"%s\"," "${MESSAGES[@]}")
val=${val%?}
fi
add_json_array $json_file $key "$val"
else
val=${!var}
add_json_object $json_file $key $val
fi
vars_added=$((vars_added+1))
if [[ $vars_added -lt $arr_length ]]; then
echo "," >> $json_file
else
echo >> $json_file
fi
done
echo "}" >> $json_file
echo $json_file
}
if patch_contains_hms_upgrade "$PATCH_URL"; then
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY $SSH_HOST "
rm -rf hive/ &&
git clone --depth 1 -b $BRANCH https://github.com/apache/hive.git &&
cd hive/ &&
curl ${PATCH_URL} | bash -x testutils/ptest2/src/main/resources/smart-apply-patch.sh - &&
sudo bash -x testutils/metastore/execute-test-on-lxc.sh --patch \"${PATCH_URL}\" --branch $BRANCH
"
BUILD_STATUS=$?
if [[ $BUILD_STATUS = 0 ]]; then
tmp_test_log="/tmp/execute-test-on-lxc.sh.log"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY ${SSH_HOST}:"$tmp_test_log" "$tmp_test_log" || exit 1
TESTS_EXECUTED=$(cat "$tmp_test_log" | grep "Executing sql test" | wc -l)
while read line
do
if echo $line | grep 'Test failed' > /dev/null; then
FAILED_TESTS+=("$line")
elif echo $line | grep 'Executing sql test' >/dev/null; then
# Remove 'Executing sql test' line from MESSAGES log to avoid a verbose
# comment on JIRA
continue
fi
MESSAGES+=("$line")
done < "$tmp_test_log"
rm "$tmp_test_log"
fi
json_file=$(create_publish_file)
publish_results "$json_file"
ret=$?
rm "$json_file"
exit $ret
fi
|
'use strict';
/**
* Module dependencies.
*/
var mongoose = require('mongoose'),
Schema = mongoose.Schema;
/**
* Nap Schema
*/
var NapSchema = new Schema({
clanname: {
type: String,
default: '',
required: 'Please fill clanname'
},
clantag: {
type: String,
default: ''
},
typ: {
type: [{
type: String,
enum: ['NAP', 'Assi']
}],
default: ['NAP'],
required: 'Please provide NAP-Type'
},
ts: {
type: String,
default: '',
},
leader: {
type: String,
default: '',
required: 'Please fill leader'
},
reason: {
type: String,
default: '',
},
created: {
type: Date,
default: Date.now
},
user: {
type: Schema.ObjectId,
ref: 'User'
}
});
mongoose.model('Nap', NapSchema);
|
<reponame>mcanlas/doodle<gh_stars>0
package doodle
package core
package transform
import org.scalatest._
import org.scalatest.prop.Checkers
class TransformSpec extends FlatSpec with Matchers with Checkers {
import doodle.arbitrary._
import doodle.syntax.approximatelyEqual._
"scale" should "scale the x and y coordinates appropriately" in {
check { (scale: Scale, point: Point) =>
val expected = Point.cartesian(point.x * scale.x, point.y * scale.y)
Transform.scale(scale.x, scale.y)(point) ~= expected
}
}
"rotate" should "rotate the point" in {
check{ (rotate: Rotate, point: Point) =>
val expected = point.rotate(rotate.angle)
Transform.rotate(rotate.angle)(point) ~= expected
}
}
"andThen" should "compose the transformations" in {
check{ (translate: Translate, rotate: Rotate, point: Point) =>
val expected =
Point.cartesian(point.x + translate.x, point.y + translate.y).rotate(rotate.angle)
val tx =
Transform.translate(translate.x, translate.y).andThen(Transform.rotate(rotate.angle))
tx(point) ~= expected
}
}
}
|
def lorentzian(x, amplitude, x0, sigma, background):
# A lorentzian peak with:
# Constant Background : background
return amplitude / (1 + ((x - x0) / sigma) ** 2) + background |
<html>
<head>
<title>My Table</title>
</head>
<body>
<table>
<tr>
<th>Name</th>
<th>Age</th>
<th>City</th>
</tr>
</table>
</body>
</html> |
public class ArrayOptimizer {
public static void main(String[] args) {
int[] array = {1, 7, 3, 2, 0, 4, 8, 5};
// Sort the array
int temp = 0;
for (int i = 0; i < array.length; i++) {
for (int j = i + 1; j < array.length; j++) {
if (array[i] > array[j]) {
temp = array[i];
array[i] = array[j];
array[j] = temp;
}
}
}
System.out.println("Sorted array:");
for (int i = 0; i < array.length; i++) {
System.out.print(array[i] + ", ");
}
System.out.println();
System.out.println("Optimized array: ");
// Optimization using hash map
Map<Integer, Integer> map = new HashMap<>();
for (int i = 0; i < array.length; i++) {
map.put(array[i], i);
}
// Printing out the optimized array
Iterator<Integer> arrayIter = map.keySet().iterator();
while (arrayIter.hasNext()) {
System.out.print(arrayIter.next() + ", ");
}
}
} |
package com.nortal.spring.cw.core.web.exception;
import com.nortal.spring.cw.core.exception.AppBaseRuntimeException;
/**
* @author <NAME>
* @since 25.02.2013
*/
@SuppressWarnings("serial")
public class WrongQueryException extends AppBaseRuntimeException {
public WrongQueryException(String errorMessageCode, Object... messageArgs) {
super(errorMessageCode, messageArgs);
}
}
|
/**
* Copyright 2019 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Services} from '../../../src/services';
/**
* Used for tracking whether or not an item is near the viewport. This is set
* from the IntersectionObserver and consumed by a flush.
*/
const NEAR_VIEWPORT_FLAG = '__AMP_CAROUSEL_NEAR_VIEWPORT';
/**
* Used for tracking whether or not an item is in the viewport. This is set
* from the IntersectionObserver and consumed by a flush.
*/
const IN_VIEWPORT_FLAG = '__AMP_CAROUSEL_IN_VIEWPORT';
/**
* The value for having no margin for intersection. That is, the item must
* intersect the intersection root itself.
*/
const NO_INTERSECTION_MARGIN = '0%';
/**
* The default margin around the scrolling element. This is a percentage of the
* width of the element.
*/
const DEFAULT_NEARBY_MARGIN = 100;
/**
* Additional margin before something is unlaidout. This is a percentage of the
* width of the element. This is used to avoid a rapid back and forth between
* layout and unlayout at the threshold of the nearby margin.
*/
const UNLAYOUT_MARGIN = 10;
/**
* What percentage of an Element must intersect before being considered as
* visible. This defaults to 1%, as the value for zero is not intuitive. For
* example, a horizontal scrolling container that has full width items will
* intersect on the second element if you have an intersection margin/threshold
* of 0, since the left edge of the second element "intersects" with the
* scrolling container, even though none of element is visible (not even 1 pixel).
*
* While the sepc says a value of "0" means "any non-zero number of pixels",
* there are actual no pixels actually visible at this threshold.
*/
const DEFAULT_INTERSECTION_THRESHOLD = 0.01;
/**
* @enum {number}
*/
const ViewportChangeState = {
ENTER: 0,
LEAVE: 1,
};
/**
* Manages scheduling layout/unlayout for children of an AMP component as they
* intersect with the AMP component. The parent AMP component should notify the
* manager as its own layout state changes so that the children can be updated
* accordingly.
*
* Note: For Safari 12, this does not schedule layout for slides until they
* enter the viewport, since `rootMargin` on `IntersectionObserver` is not
* properly handled.
*
* Usage:
*
* Parent element:
* ```
* constructor() {
* this.childLayoutManager = new ChildLayoutManager({
* ampElement: this,
* });
* }
* buildCallback() {
* // Call this each time the effective children you want to manage change.
* this.childLayoutManager.updateChildren(children);
* }
*
* layoutCallback() {
* this.childLayoutManager.wasLaidOut();
* }
*
*
* unlayoutCallback() {
* this.childLayoutManager.wasUnlaidOut();
* }
* ```
*/
export class ChildLayoutManager {
/**
* Creates a ChildLayoutManager for a given intersection container.
*
* Note: If the children live within a scrolling container, the
* intersectionElement must be the scrolling container, and not an
* ancestor in order for `nearbyMarginInPercent` to work.
* @param {{
* ampElement: !AMP.BaseElement,
* intersectionElement: !Element,
* intersectionThreshold: (number|undefined),
* nearbyMarginInPercent: (number|undefined),
* queueChanges: (boolean|undefined),
* viewportIntersectionThreshold: (number|undefined),
* viewportIntersectionCallback: (function(!Element, boolean)|undefined)
* }} config
*/
constructor({
ampElement,
intersectionElement,
intersectionThreshold = DEFAULT_INTERSECTION_THRESHOLD,
nearbyMarginInPercent = DEFAULT_NEARBY_MARGIN,
queueChanges = false,
viewportIntersectionThreshold = intersectionThreshold,
viewportIntersectionCallback = () => {},
}) {
/** @private @const */
this.ampElement_ = ampElement;
/** @private @const */
this.owners_ = Services.ownersForDoc(ampElement.element);
/** @private @const */
this.intersectionElement_ = intersectionElement;
/** @private @const */
this.intersectionThreshold_ = intersectionThreshold;
/** @private @const */
this.nearbyMarginInPercent_ = nearbyMarginInPercent;
/** @private @const */
this.queueChanges_ = queueChanges;
/** @private @const */
this.viewportIntersectionThreshold_ = viewportIntersectionThreshold;
/** @private @const */
this.viewportIntersectionCallback_ = viewportIntersectionCallback;
/** @private {!IArrayLike<!Element>} */
this.children_ = [];
/** @private {?IntersectionObserver} */
this.nearingViewportObserver_ = null;
/** @private {?IntersectionObserver} */
this.backingAwayViewportObserver_ = null;
/** @private {?IntersectionObserver} */
this.inViewportObserver_ = null;
/** @private {boolean} */
this.laidOut_ = false;
/** @private {boolean} */
this.flushNextNearingViewportChanges_ = false;
/** @private {boolean} */
this.flushNextBackingAwayViewportChanges_ = false;
/** @private {boolean} */
this.flushNextInViewportChanges_ = false;
}
/**
* @param {!Element} target
* @param {boolean} isIntersecting
*/
triggerLayout_(target, isIntersecting) {
if (isIntersecting) {
// TODO(sparhami) do we want to delay the layout for the farther
// away elements? Do we want schedule preload farther away elements?
this.owners_.scheduleLayout(this.ampElement_.element, target);
} else {
this.owners_./*OK */ scheduleUnlayout(this.ampElement_.element, target);
}
}
/**
* @param {!Element} target
* @param {boolean} isIntersecting
*/
triggerVisibility_(target, isIntersecting) {
this.owners_.updateInViewport(
this.ampElement_.element,
target,
isIntersecting
);
this.viewportIntersectionCallback_(target, isIntersecting);
}
/**
* Sets up for intersection monitoring, creating IntersectionObserver
* instances for doing layout as well as those that are actually visible.
*
* We set up separate observers for layout and unlayout. When the element is
* near to the viewport, we trigger layout. However, we have some extra
* buffer space before triggering unlayout, to prevent cycling between the
* two on the threshold, which can cause problems in Safari.
*/
setup_() {
if (
this.nearingViewportObserver_ &&
this.backingAwayViewportObserver_ &&
this.inViewportObserver_
) {
return;
}
const {win} = this.ampElement_;
this.nearingViewportObserver_ = new win.IntersectionObserver(
entries => this.processNearingChanges_(entries),
{
root: this.intersectionElement_,
rootMargin: `${this.nearbyMarginInPercent_}%`,
threshold: this.intersectionThreshold_,
}
);
this.backingAwayViewportObserver_ = new win.IntersectionObserver(
entries => this.processBackingAwayChanges_(entries),
{
root: this.intersectionElement_,
rootMargin: `${this.nearbyMarginInPercent_ + UNLAYOUT_MARGIN}%`,
threshold: this.intersectionThreshold_,
}
);
this.inViewportObserver_ = new win.IntersectionObserver(
entries => this.processInViewportChanges_(entries),
{
root: this.intersectionElement_,
rootMargin: NO_INTERSECTION_MARGIN,
threshold: this.viewportIntersectionThreshold_,
}
);
}
/**
* Processes the intersection entries for things nearing the viewport,
* marking them applying the changes if needed.
* @param {!Array<!IntersectionObserverEntry>} entries
*/
processNearingChanges_(entries) {
entries
.filter(({isIntersecting}) => isIntersecting)
.forEach(({target}) => {
target[NEAR_VIEWPORT_FLAG] = ViewportChangeState.ENTER;
});
if (!this.queueChanges_ || this.flushNextNearingViewportChanges_) {
this.flushNearingViewportChanges_();
this.flushNextNearingViewportChanges_ = false;
}
}
/**
* Processes the intersection entries for things backing away from viewport,
* marking them applying the changes if needed.
* @param {!Array<!IntersectionObserverEntry>} entries
*/
processBackingAwayChanges_(entries) {
entries
.filter(({isIntersecting}) => !isIntersecting)
.forEach(({target}) => {
target[NEAR_VIEWPORT_FLAG] = ViewportChangeState.LEAVE;
});
if (!this.queueChanges_ || this.flushNextBackingAwayViewportChanges_) {
this.flushBackingAwayViewportChanges_();
this.flushNextBackingAwayViewportChanges_ = false;
}
}
/**
* Processes the intersection entries for things in the viewport,
* marking them applying the changes if needed.
* @param {!Array<!IntersectionObserverEntry>} entries
*/
processInViewportChanges_(entries) {
entries.forEach(({target, isIntersecting}) => {
target[IN_VIEWPORT_FLAG] = isIntersecting
? ViewportChangeState.ENTER
: ViewportChangeState.LEAVE;
});
if (!this.queueChanges_ || this.flushNextInViewportChanges_) {
this.flushInViewportChanges_();
this.flushNextInViewportChanges_ = false;
}
}
/**
* Flush all intersection changes previously picked up.
*/
flushChanges() {
this.flushNearingViewportChanges_();
this.flushBackingAwayViewportChanges_();
this.flushInViewportChanges_();
}
/**
* Flush changes for things nearing the viewport.
*/
flushNearingViewportChanges_() {
for (let i = 0; i < this.children_.length; i++) {
const child = this.children_[i];
if (child[NEAR_VIEWPORT_FLAG] == ViewportChangeState.ENTER) {
this.triggerLayout_(child, true);
child[NEAR_VIEWPORT_FLAG] = null;
}
}
}
/**
* Flush changes for things backing away from the viewport.
*/
flushBackingAwayViewportChanges_() {
for (let i = 0; i < this.children_.length; i++) {
const child = this.children_[i];
if (child[NEAR_VIEWPORT_FLAG] == ViewportChangeState.LEAVE) {
this.triggerLayout_(child, false);
child[NEAR_VIEWPORT_FLAG] = null;
}
}
}
/**
* Flush changes for things in the viewport.
*/
flushInViewportChanges_() {
for (let i = 0; i < this.children_.length; i++) {
const child = this.children_[i];
if (child[IN_VIEWPORT_FLAG] == ViewportChangeState.ENTER) {
this.triggerVisibility_(child, true);
} else if (child[IN_VIEWPORT_FLAG] == ViewportChangeState.LEAVE) {
this.triggerVisibility_(child, false);
}
child[IN_VIEWPORT_FLAG] = null;
}
}
/**
* @param {boolean} observe Whether or not the parent element is laid out.
*/
monitorChildren_(observe) {
// TODO(sparhami) Load a polyfill for browsers that do not support it? We
// currently just rely on Resource's periodic scan if we do not have
// IntersectionObserver. This means slides may get loaded later than might
// be ideal.
if (!('IntersectionObserver' in this.ampElement_.win)) {
return;
}
this.setup_();
// Simply disconnect, in case the children have changed, we can make sure
// everything is detached.
if (!observe) {
this.nearingViewportObserver_.disconnect();
this.backingAwayViewportObserver_.disconnect();
this.inViewportObserver_.disconnect();
return;
}
for (let i = 0; i < this.children_.length; i++) {
this.nearingViewportObserver_.observe(this.children_[i]);
this.backingAwayViewportObserver_.observe(this.children_[i]);
this.inViewportObserver_.observe(this.children_[i]);
}
}
/**
* Updates the children that should have their layout managed. Should be
* called whenever the children change.
* @param {!IArrayLike<!Element>} children
*/
updateChildren(children) {
this.children_ = children;
if (!('IntersectionObserver' in this.ampElement_.win)) {
return;
}
for (let i = 0; i < this.children_.length; i++) {
this.owners_.setOwner(this.children_[i], this.ampElement_.element);
}
// Update the layout state to false so that we stop observing (and holding
// on to a reference for) any children that stopped existing.
this.monitorChildren_(false);
this.monitorChildren_(this.laidOut_);
}
/**
* This should be called from the `layoutCallback` of the AMP element that
* constructed this manager.
*/
wasLaidOut() {
this.laidOut_ = true;
this.monitorChildren_(this.laidOut_);
// Make sure we flush the next changes from the IntersectionObservers,
// regardless if queuing was requested, since things were just laid out.
this.flushNextNearingViewportChanges_ = true;
this.flushNextBackingAwayViewportChanges_ = true;
this.flushNextInViewportChanges_ = true;
}
/**
* This should be called from the `unlayoutCallback` of the AMP element that
* constructed this manager.
*/
wasUnlaidOut() {
this.laidOut_ = false;
this.monitorChildren_(this.laidOut_);
for (let i = 0; i < this.children_.length; i++) {
this.triggerLayout_(this.children_[i], false);
this.triggerVisibility_(this.children_[i], false);
}
}
}
|
public class TopThree {
public static void main(String[] args) {
int[] arr = {4, 5, 8, 1, 2, 9, 6, 3};
int max1=arr[0], max2=arr[0], max3=arr[0];
for(int i=0; i<arr.length; i++)
{
if(arr[i]>max1)
{
max3=max2;
max2=max1;
max1=arr[i];
}
else if(arr[i]>max2)
{
max3=max2;
max2=arr[i];
}
else if(arr[i]>max3){
max3=arr[i];
}
}
System.out.println("Top three elements: " + max1 + ", "+ max2 + ", "+ max3);
}
} |
# NOTE: This script expects environment variables to be passed for the Salesforce
# credentials to the orgs: feature, master, packaging, beta, prod such as...
# SF_USERNAME_FEATURE
# SF_PASSWORD_MASTER
# SF_SERVERURL_PACKAGING
# Setup variables for branch naming conventions using env overrides if set
if [ "$MASTER_BRANCH" == "" ]; then
MASTER_BRANCH='master'
fi
if [ "$PREFIX_FEATURE" == "" ]; then
PREFIX_FEATURE='feature/'
fi
if [ "$PREFIX_BETA" == "" ]; then
PREFIX_BETA='beta/'
fi
if [ "$PREFIX_PROD" == "" ]; then
PREFIX_PROD='prod/'
fi
# Determine build type and setup Salesforce credentials
if [[ $CI_BRANCH == $MASTER_BRANCH ]]; then
BUILD_TYPE='master'
elif [[ $CI_BRANCH == $PREFIX_FEATURE* ]]; then
BUILD_TYPE='feature'
elif [[ $CI_BRANCH == $PREFIX_BETA* ]]; then
BUILD_TYPE='beta'
elif [[ $CI_BRANCH == $PREFIX_PROD* ]]; then
BUILD_TYPE='prod'
fi
if [ "$BUILD_TYPE" == "" ]; then
echo "BUILD FAILED: Could not determine BUILD_TYPE for $CI_BRANCH"
exit 1
fi
echo "Building $CI_BRANCH as a $BUILD_TYPE build"
# Run the build for the build type
# Master branch commit, build and test a beta managed package
if [ $BUILD_TYPE == "master" ]; then
# Get org credentials from env
export SF_USERNAME=$SF_USERNAME_PACKAGING
export SF_PASSWORD=$SF_PASSWORD_PACKAGING
export SF_SERVERURL=$SF_SERVERURL_PACKAGING
echo "Got org credentials for packaging org from env"
# Deploy to packaging org
echo "Running ant deployCIPackageOrg"
ant deployCIPackageOrg
# Upload beta package
export PACKAGE=`grep cumulusci.package.name.managed cumulusci.properties | sed -e 's/cumulusci.package.name.managed=//g'`
export BUILD_NAME="$PACKAGE Build $CI_BUILD_NUMBER"
export BUILD_WORKSPACE=`pwd`
export BUILD_COMMIT="$CI_COMMIT_ID"
pip install --upgrade selenium
pip install --upgrade requests
python lib/package_upload.py
# Test beta
# Retry if package is unavailable
# Create GitHub Release
# Add release notes
# Merge master commit to all open feature branches
# Feature branch commit, build and test in local unmanaged package
elif [ $BUILD_TYPE == "feature" ]; then
# Get org credentials from env
export SF_USERNAME=$SF_USERNAME_FEATURE
export SF_PASSWORD=$SF_PASSWORD_FEATURE
export SF_SERVERURL=$SF_SERVERURL_FEATURE
echo "Got org credentials for feature org from env"
# Deploy to feature org
echo "Running ant deployCI"
ant deployCI
# Beta tag build, do nothing
elif [ $BUILD_TYPE == "beta" ]; then
echo "Nothing to do for a beta tag"
# Prod tag build, deploy and test in packaging org
elif [ $BUILD_TYPE == "prod" ]; then
# Get org credentials from env
export SF_USERNAME=$SF_USERNAME_PACKAGING
export SF_PASSWORD=$SF_PASSWORD_PACKAGING
export SF_SERVERURL=$SF_SERVERURL_PACKAGING
echo "Got org credentials for packaging org from env"
# Deploy to packaging org
echo "Running ant deployCIPackageOrg"
ant deployCIPackageOrg
fi
|
<reponame>Alexlogvin2019/find2
/*
* (c) Copyright 2015 Micro Focus or one of its affiliates.
*
* Licensed under the MIT License (the "License"); you may not use this file
* except in compliance with the License.
*
* The only warranties for products and services of Micro Focus and its affiliates
* and licensors ("Micro Focus") are as may be set forth in the express warranty
* statements accompanying such products and services. Nothing herein should be
* construed as constituting an additional warranty. Micro Focus shall not be
* liable for technical or editorial errors or omissions contained herein. The
* information contained herein is subject to change without notice.
*/
package com.hp.autonomy.frontend.find.hod.view;
import com.hp.autonomy.frontend.find.core.view.ViewController;
import com.hp.autonomy.frontend.find.core.web.ControllerUtils;
import com.hp.autonomy.frontend.find.core.web.ErrorModelAndViewInfo;
import com.hp.autonomy.hod.client.api.authentication.HodAuthenticationFailedException;
import com.hp.autonomy.hod.client.api.resource.ResourceName;
import com.hp.autonomy.hod.client.error.HodErrorException;
import com.hp.autonomy.searchcomponents.hod.view.HodViewRequest;
import com.hp.autonomy.searchcomponents.hod.view.HodViewRequestBuilder;
import com.hp.autonomy.searchcomponents.hod.view.HodViewServerService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.ObjectFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.NoSuchMessageException;
import org.springframework.http.HttpStatus;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@Controller
@RequestMapping(ViewController.VIEW_PATH)
@Slf4j
class HodViewController extends ViewController<HodViewRequest, ResourceName, HodErrorException> {
private static final String HOD_ERROR_MESSAGE_CODE_PREFIX = "error.iodErrorCode.";
private static final String HOD_ERROR_MESSAGE_CODE_MAIN = "error.iodErrorMain";
private static final String HOD_ERROR_MESSAGE_CODE_SUB = "error.iodErrorSub";
private static final String HOD_ERROR_MESSAGE_CODE_SUB_NULL = "error.iodErrorSubNull";
private static final String HOD_ERROR_MESSAGE_CODE_TOKEN_EXPIRED = "error.iodTokenExpired";
private static final String HOD_ERROR_MESSAGE_CODE_INTERNAL_MAIN = "error.internalServerErrorMain";
private static final String HOD_ERROR_MESSAGE_CODE_INTERNAL_SUB = "error.internalServerErrorSub";
private static final String HOD_ERROR_MESSAGE_CODE_UNKNOWN = "error.unknownError";
private final ControllerUtils controllerUtils;
@SuppressWarnings("TypeMayBeWeakened")
@Autowired
public HodViewController(final HodViewServerService viewServerService,
final ObjectFactory<HodViewRequestBuilder> viewRequestBuilderFactory,
final ControllerUtils controllerUtils) {
super(viewServerService, viewRequestBuilderFactory);
this.controllerUtils = controllerUtils;
}
@SuppressWarnings("TypeMayBeWeakened")
@ExceptionHandler
public ModelAndView handleHodErrorException(
final HodErrorException e,
final HttpServletRequest request,
final HttpServletResponse response
) {
response.reset();
log.error("HodErrorException thrown while viewing document", e);
final String errorKey = HOD_ERROR_MESSAGE_CODE_PREFIX + e.getErrorCode();
String hodErrorMessage;
try {
hodErrorMessage = controllerUtils.getMessage(errorKey, null);
} catch (final NoSuchMessageException ignored) {
// we don't have a key in the bundle for this error code
hodErrorMessage = controllerUtils.getMessage(HOD_ERROR_MESSAGE_CODE_UNKNOWN, null);
}
final int errorCode = e.isServerError() ? HttpServletResponse.SC_INTERNAL_SERVER_ERROR : HttpServletResponse.SC_BAD_REQUEST;
final String subMessageCode;
final Object[] subMessageArgs;
if (hodErrorMessage != null) {
subMessageCode = HOD_ERROR_MESSAGE_CODE_SUB;
subMessageArgs = new String[]{hodErrorMessage};
} else {
subMessageCode = HOD_ERROR_MESSAGE_CODE_SUB_NULL;
subMessageArgs = null;
}
response.setStatus(errorCode);
return controllerUtils.buildErrorModelAndView(new ErrorModelAndViewInfo.Builder()
.setRequest(request)
.setMainMessageCode(HOD_ERROR_MESSAGE_CODE_MAIN)
.setSubMessageCode(subMessageCode)
.setSubMessageArguments(subMessageArgs)
.setStatusCode(errorCode)
.setContactSupport(true)
.setException(e)
.build());
}
@SuppressWarnings("TypeMayBeWeakened")
@ExceptionHandler
public ModelAndView hodAuthenticationFailedException(
final HodAuthenticationFailedException e,
final HttpServletRequest request,
final HttpServletResponse response
) {
response.reset();
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
log.error("HodAuthenticationFailedException thrown while viewing document", e);
return controllerUtils.buildErrorModelAndView(new ErrorModelAndViewInfo.Builder()
.setRequest(request)
.setMainMessageCode(HOD_ERROR_MESSAGE_CODE_MAIN)
.setSubMessageCode(HOD_ERROR_MESSAGE_CODE_TOKEN_EXPIRED)
.setStatusCode(HttpServletResponse.SC_FORBIDDEN)
.setAuthError(true)
.build());
}
@SuppressWarnings("TypeMayBeWeakened")
@ExceptionHandler
@ResponseStatus(HttpStatus.INTERNAL_SERVER_ERROR)
public ModelAndView handleGeneralException(
final Exception e,
final HttpServletRequest request,
final ServletResponse response
) {
response.reset();
return controllerUtils.buildErrorModelAndView(new ErrorModelAndViewInfo.Builder()
.setRequest(request)
.setMainMessageCode(HOD_ERROR_MESSAGE_CODE_INTERNAL_MAIN)
.setSubMessageCode(HOD_ERROR_MESSAGE_CODE_INTERNAL_SUB)
.setStatusCode(HttpServletResponse.SC_INTERNAL_SERVER_ERROR)
.setContactSupport(true)
.setException(e)
.build());
}
}
|
#!/bin/bash
dest=${TARGET-/kb/runtime}
echo "using $dest as installation directory";
mkdir -p $dest
# downlownload version
VERSION='v4.6.1-2012-08-27'
rm -rf cd-hit-${VERSION}*
wget "https://cdhit.googlecode.com/files/cd-hit-${VERSION}.tgz"
tar -zxvf cd-hit-${VERSION}.tgz
# compile and copy binaries
pushd cd-hit-${VERSION}
make
cp cd-hit cd-hit-est cd-hit-2d cd-hit-est-2d cd-hit-div cd-hit-454 $dest/bin
popd
rm -rf cd-hit-${VERSION}*
|
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")
PROJECT_ROOT="$SCRIPT_ROOT/.."
GENERATED_ROOT="$PROJECT_ROOT/.generated"
PKG_NAME="github.com/apache/apisix-ingress-controller"
# Make sure no pollution
rm -rf "$GENERATED_ROOT"
bash "${SCRIPT_ROOT}"/generate-groups.sh "deepcopy,client,informer,lister" \
${PKG_NAME}/pkg/kube/apisix/client ${PKG_NAME}/pkg/kube/apisix/apis \
config:v2beta3,v2beta2,v2beta1 ${PKG_NAME} \
--output-base "$GENERATED_ROOT" \
--go-header-file "${SCRIPT_ROOT}"/boilerplate.go.txt \
"$@"
bash "${SCRIPT_ROOT}"/generate-groups.sh "deepcopy" \
${PKG_NAME}/pkg/types ${PKG_NAME}/pkg/types \
apisix:v1 ${PKG_NAME} \
--output-base "$GENERATED_ROOT" \
--go-header-file "${SCRIPT_ROOT}"/boilerplate.go.txt \
"$@"
bash "${SCRIPT_ROOT}"/generate-groups.sh "register" \
${PKG_NAME}/pkg/kube/apisix/apis ${PKG_NAME}/pkg/kube/apisix/apis \
config:v2beta3,v2beta2,v2beta1,v1 ${PKG_NAME} \
--output-base "$GENERATED_ROOT" \
--go-header-file "${SCRIPT_ROOT}"/boilerplate.go.txt \
"$@"
cp -r "$GENERATED_ROOT/${PKG_NAME}/"** "$PROJECT_ROOT"
rm -rf "$GENERATED_ROOT"
|
<gh_stars>1-10
T = int(input())
def dfs(graph, start):
candidates = [start]
visited = [False] * (len(graph)+1)
while candidates:
curr = candidates.pop()
for node in graph[curr]:
if not visited[node]:
visited[node] = True
candidates.append(node)
return visited
for _ in range(T):
G = int(input())
dfs()
|
<reponame>h4ckh0use/webapp<filename>src/components/SignupPage.js<gh_stars>0
import React, { useState } from 'react'
import { useHistory } from 'react-router-dom'
import styled from 'styled-components'
import Button from './Button'
import { CirclePicker } from 'react-color'
const SignupTitle = styled.h1`
font-family: Roboto Mono;
margin: 0.6em 0 0.3em 0;
`
const Wrapper = styled.div`
@keyframes slidein {
from {
margin-right: 100px;
opacity: 0;
}
to {
margin-right: 0;
opacity: 1;
}
}
animation-duration: 1s;
animation-timing-function: ease;
animation-name: slidein;
display: flex;
flex-direction: column;
align-items: center;
`
const StyledButton = styled(Button)`
width: 90px;
`
const StyledInput = styled.input`
width: 200px;
display: block;
margin: 10px !important;
background: none !important;
color: white;
font-weight: bold;
border: 3px solid white !important;
border-radius: 100px !important;
&&:focus {
outline: none;
box-shadow: 0px 0px 4px #ffffff;
}
padding: 10px !important;
`
const colorsArray = [
'#c01701',
'#ea7f00',
'#ffd84e',
'#2fea00',
'#237f18',
'#1125da',
'#f5d8ff',
'#53fdd7',
'#e0d9ff',
'#e752be',
'#7a7a7a',
'#ffffff',
]
const colorsMap = {
'#c01701': ['#c01701', '#770A39'],
'#ea7f00': ['#ea7f00', '#B14100'],
'#ffd84e': ['#ffd84e', '#c38d37'],
'#2fea00': ['#2fea00', '#2DA82A'],
'#237f18': ['#237f18', '#134D29'],
'#1125da': ['#1125da', '#070B93'],
'#f5d8ff': ['#f5d8ff', '#EBB6FE'],
'#53fdd7': ['#53fdd7', '#35A7C0'],
'#e0d9ff': ['#e0d9ff', '#D1C6FF'],
'#e752be': ['#e752be', '#A728B3'],
'#7a7a7a': ['#7a7a7a', '#363232'],
'#ffffff': ['#D4DFF0', '#8394C2'],
}
export default function SignupPage(props) {
const [color, setColor] = useState(['#C01701', '#760B39'])
const [name, setName] = useState('')
const [stepTwo, setStepTwo] = useState(false)
let history = useHistory()
const handleColorChange = ({ hex }) => {
setColor(colorsMap[hex])
props.colourCallback(colorsMap[hex])
}
const handleChange = (e) => {
setName(e.target.value)
}
const onNext = () => {
name && setStepTwo(true)
}
const handleKeyDown = (event) => {
if (event.key === 'Enter') {
setStepTwo(true)
}
}
const onSubmit = () => {
if (name) {
window.user = name
window.localStorage.setItem('user', name)
history.push('/room')
window.ws.send(JSON.stringify({ newUser: true, username: name, color }))
window.ws.send(JSON.stringify({ broadcast: true, message: `${name} joined the room!` }))
}
}
return (
<Wrapper>
<SignupTitle>Join</SignupTitle>
{stepTwo ? (
<>
<p>Chose your character colour</p>
<CirclePicker colors={colorsArray} onChangeComplete={handleColorChange} />
<StyledButton onClick={() => onSubmit()}>Join</StyledButton>
</>
) : (
<>
<StyledInput
onKeyDown={handleKeyDown}
placeholder="enter your name"
onChange={(e) => handleChange(e)}
type="text"
value={name}
/>
<StyledButton onClick={() => onNext()}>Next</StyledButton>
</>
)}
</Wrapper>
)
}
|
cd ..
cd ./cmake.linux/
cmake .. -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release
make
cd ..
cd BuildScript |
/** @ignore */ /** */
export { At } from './At';
export { Join } from './Join';
export { Length } from './Length';
export { Replace } from './Replace';
export { Split } from './Split';
|
# Application Builder Configuration File. Customized for: unpackerr
# Each line must have an export clause.
# This file is parsed and sourced by the Makefile, Docker and Homebrew builds.
# Powered by Application Builder: https://golift.io/application-builder
# Bring in dynamic repo/pull/source info.
source $(dirname "${BASH_SOURCE[0]}")/init/buildinfo.sh
# Must match the repo name to make things easy. Otherwise, fix some other paths.
BINARY="unpackerr"
REPO="unpackerr"
# github username
GHUSER="davidnewhall"
# Github repo containing homebrew formula repo.
HBREPO="golift/homebrew-mugs"
MAINT="David Newhall II <david at sleepers dot pro>"
VENDOR="Go Lift"
DESC="Extracts downloads so Radarr, Sonarr, Lidarr or Readarr may import them."
GOLANGCI_LINT_ARGS="--enable-all -D dupl -D exhaustivestruct"
# Example must exist at examples/$CONFIG_FILE.example
CONFIG_FILE="unpackerr.conf"
LICENSE="MIT"
# FORMULA is either 'service' or 'tool'. Services run as a daemon, tools do not.
# This affects the homebrew formula (launchd) and linux packages (systemd).
FORMULA="service"
# Used for source links and wiki links.
SOURCE_URL="https://github.com/${GHUSER}/${REPO}/"
# Used for documentation links.
URL="${SOURCE_URL}"
# This parameter is passed in as -X to go build. Used to override the Version variable in a package.
# Name the Version-containing library the same as the github repo, without dashes.
# The Makefile appends .Version and a few other variables for build-time data. See golift.io/version.
VERSION_PATH="golift.io/version"
# Used by homebrew downloads, references VERSION which comes from buildinfo.sh.
SOURCE_PATH=https://golift.io/${REPO}/archive/v${VERSION}.tar.gz
# Use upx to compress binaries. Must install upx. apt/yum/brew install upx
COMPRESS=true
export BINARY GHUSER HBREPO MAINT VENDOR DESC GOLANGCI_LINT_ARGS CONFIG_FILE
export LICENSE FORMULA SOURCE_URL URL VERSION_PATH SOURCE_PATH COMPRESS
|
#!/bin/bash -ex
APP_PATH="/app"
LOGS_PATH="/logs"
ENVVARS="${APP_PATH}/.env"
DOCKERCOMPOSE="${APP_PATH}/docker-compose.yml"
CERTS="${APP_PATH}/certs"
PFX="${CERTS}/tls.pfx"
CERT="${CERTS}/tls.crt"
PKEY="${CERTS}/tls.key"
UNSAFE="false"
ADMIN=$USER
REGISTRY_PREFIX=
# ========================================================================
HOST_NAME="localhost"
PCS_LOG_LEVEL="Info"
PCS_WEBUI_AUTH_TYPE="aad"
PCS_APPLICATION_SECRET=$(cat /dev/urandom | LC_CTYPE=C tr -dc 'a-zA-Z0-9-,./;:[]\(\)_=^!~' | fold -w 64 | head -n 1)
while [ "$#" -gt 0 ]; do
case "$1" in
--hostname) HOST_NAME="$2" ;;
--registry-prefix) REGISTRY_PREFIX="$2" ;;
--admin) ADMIN="$2" ;;
--log-level) PCS_LOG_LEVEL="$2" ;;
--unsafe) UNSAFE="$2" ;;
--iothub-name) PCS_IOTHUBREACT_HUB_NAME="$2" ;;
--iothub-endpoint) PCS_IOTHUBREACT_HUB_ENDPOINT="$2" ;;
--iothub-consumer-group) PCS_IOTHUBREACT_HUB_CONSUMERGROUP="$2" ;;
--iothub-connstring) PCS_IOTHUB_CONNSTRING="$2" ;;
--azureblob-account) PCS_IOTHUBREACT_AZUREBLOB_ACCOUNT="$2" ;;
--azureblob-key) PCS_IOTHUBREACT_AZUREBLOB_KEY="$2" ;;
--azureblob-endpoint-suffix) PCS_IOTHUBREACT_AZUREBLOB_ENDPOINT_SUFFIX="$2" ;;
--docdb-connstring) PCS_STORAGEADAPTER_DOCUMENTDB_CONNSTRING="$2" ;;
--ssl-certificate) PCS_CERTIFICATE="$2" ;;
--ssl-certificate-key) PCS_CERTIFICATE_KEY="$2" ;;
--auth-audience) PCS_AUTH_AUDIENCE="$2" ;;
--auth-type) PCS_WEBUI_AUTH_TYPE="$2" ;;
--aad-appid) PCS_WEBUI_AUTH_AAD_APPID="$2" ;;
--aad-tenant) PCS_WEBUI_AUTH_AAD_TENANT="$2" ;;
--aad-instance) PCS_WEBUI_AUTH_AAD_INSTANCE="$2" ;;
--aad-appsecret) PCS_APPLICATION_SECRET="$2" ;;
--release-version) PCS_RELEASE_VERSION="$2" ;;
--evenhub-connstring) PCS_EVENTHUB_CONNSTRING="$2" ;;
--eventhub-name) PCS_EVENTHUB_NAME="$2" ;;
esac
shift
done
# ========================================================================
apt-get update
apt-get remove -y docker docker-engine docker.io
apt-get autoremove -y
apt-get install -y --no-install-recommends apt-transport-https ca-certificates curl software-properties-common openssl
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
apt-key fingerprint 0EBFCD88
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt-get update
apt-get install -y --no-install-recommends docker-ce
usermod -aG docker $USER
usermod -aG docker $ADMIN
curl -L "https://github.com/docker/compose/releases/download/1.22.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# ========================================================================
PCS_AUTH_ISSUER="https://sts.windows.net/${PCS_WEBUI_AUTH_AAD_TENANT}/"
# Configure Docker registry based on host name
# ToDo: we may need to add similar parameter to AzureGermanCloud and AzureUSGovernment
config_for_azure_china() {
set +e
local host_name=$1
if (echo $host_name | grep -c "\.cn$") ; then
# If the host name has .cn suffix, dockerhub in China will be used to avoid slow network traffic failure.
local config_file='/etc/docker/daemon.json'
echo "{\"registry-mirrors\": [\"https://registry.docker-cn.com\"]}" > ${config_file}
service docker restart
# Rewrite the AAD issuer in Azure China environment
PCS_AUTH_ISSUER="https://sts.chinacloudapi.cn/${PCS_WEBUI_AUTH_AAD_TENANT}/"
fi
set -e
}
config_for_azure_china $HOST_NAME $5
# ========================================================================
# Configure SSH to not use weak HostKeys, algorithms, ciphers and MAC algorithms.
# Comment out the option if exists or ignore it.
switch_off() {
local key=$1
local value=$2
local config_path=$3
sed -i "s~#*$key\s*$value~#$key $value~g" $config_path
}
# Change existing option if found or append specified key value pair.
switch_on() {
local key=$1
local value=$2
local config_path=$3
grep -q "$key" $config_path && sed -i -e "s/$key.*/$key $value/g" $config_path || sed -i -e "\$a$key $value" $config_path
}
config_ssh() {
local config_path="${1:-/etc/ssh/sshd_config}"
switch_off 'HostKey' '/etc/ssh/ssh_host_dsa_key' $config_path
switch_off 'HostKey' '/etc/ssh/ssh_host_ecdsa_key' $config_path
switch_on 'KexAlgorithms' 'curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256' $config_path
switch_on 'Ciphers' 'chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr' $config_path
switch_on 'MACs' 'hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-ripemd160,umac-128@openssh.com' $config_path
service ssh restart
}
config_ssh
# ========================================================================
mkdir -p ${LOGS_PATH}
chmod ugo+rX ${LOGS_PATH}
mkdir -p ${APP_PATH}
chmod ugo+rX ${APP_PATH}
cp -f docker-compose.yml ${APP_PATH}/docker-compose.yml
cp -f nginx.conf ${APP_PATH}/nginx.conf
cp -f setup.sh ${APP_PATH}/setup.sh
cp -f ctrl.sh ${APP_PATH}/ctrl.sh
cd ${APP_PATH}
touch docker-compose.yml && chmod 644 docker-compose.yml
touch nginx.conf && chmod 644 nginx.conf
chmod 755 ctrl.sh
chmod 755 setup.sh
mkdir -p ${CERTS}
# Always have quotes around the pfx to preserve the formatting
echo "${PCS_CERTIFICATE}" | base64 --decode > ${PFX}
openssl pkcs12 -in ${PFX} -clcerts -nokeys -out ${CERT} -passin pass:${PCS_CERTIFICATE_KEY}
openssl pkcs12 -in ${PFX} -nocerts -nodes -out ${PKEY} -passin pass:${PCS_CERTIFICATE_KEY}
touch ${CERT} && chmod 444 ${CERT}
touch ${PKEY} && chmod 444 ${PKEY}
rm -f ${PFX}
# ========================================================================
# Environment variables
rm -f ${ENVVARS}
touch ${ENVVARS} && chmod 644 ${ENVVARS}
echo "HOST_NAME=${HOST_NAME}" >> ${ENVVARS}
echo "PCS_AUTH_HTTPSREDIRECTPORT=0" >> ${ENVVARS}
echo "PCS_AUTH_ISSUER=${PCS_AUTH_ISSUER}" >> ${ENVVARS}
echo "PCS_AUTH_AUDIENCE=${PCS_AUTH_AUDIENCE}" >> ${ENVVARS}
echo "PCS_WEBUI_AUTH_AAD_TENANT=${PCS_WEBUI_AUTH_AAD_TENANT}" >> ${ENVVARS}
echo "PCS_WEBUI_AUTH_AAD_APPID=${PCS_WEBUI_AUTH_AAD_APPID}" >> ${ENVVARS}
echo "PCS_WEBUI_AUTH_AAD_INSTANCE=${PCS_WEBUI_AUTH_AAD_INSTANCE}" >> ${ENVVARS}
echo "REACT_APP_PCS_AUTH_ISSUER=${PCS_AUTH_ISSUER}" >> ${ENVVARS}
echo "REACT_APP_PCS_AUTH_AUDIENCE=${PCS_AUTH_AUDIENCE}" >> ${ENVVARS}
echo "REACT_APP_PCS_WEBUI_AUTH_AAD_TENANT=${PCS_WEBUI_AUTH_AAD_TENANT}" >> ${ENVVARS}
echo "REACT_APP_PCS_WEBUI_AUTH_AAD_APPID=${PCS_WEBUI_AUTH_AAD_APPID}" >> ${ENVVARS}
echo "REACT_APP_PCS_WEBUI_AUTH_AAD_INSTANCE=${PCS_WEBUI_AUTH_AAD_INSTANCE}" >> ${ENVVARS}
echo "PCS_IOTHUB_CONNSTRING=${PCS_IOTHUB_CONNSTRING}" >> ${ENVVARS}
echo "PCS_STORAGEADAPTER_DOCUMENTDB_CONNSTRING=${PCS_STORAGEADAPTER_DOCUMENTDB_CONNSTRING}" >> ${ENVVARS}
echo "PCS_TELEMETRY_DOCUMENTDB_CONNSTRING=${PCS_STORAGEADAPTER_DOCUMENTDB_CONNSTRING}" >> ${ENVVARS}
echo "PCS_TELEMETRYAGENT_DOCUMENTDB_CONNSTRING=${PCS_STORAGEADAPTER_DOCUMENTDB_CONNSTRING}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_ACCESS_CONNSTRING=${PCS_IOTHUB_CONNSTRING}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_HUB_NAME=${PCS_IOTHUBREACT_HUB_NAME}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_HUB_ENDPOINT=${PCS_IOTHUBREACT_HUB_ENDPOINT}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_HUB_CONSUMERGROUP=${PCS_IOTHUBREACT_HUB_CONSUMERGROUP}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_AZUREBLOB_ACCOUNT=${PCS_IOTHUBREACT_AZUREBLOB_ACCOUNT}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_AZUREBLOB_KEY=${PCS_IOTHUBREACT_AZUREBLOB_KEY}" >> ${ENVVARS}
echo "PCS_IOTHUBREACT_AZUREBLOB_ENDPOINT_SUFFIX=${PCS_IOTHUBREACT_AZUREBLOB_ENDPOINT_SUFFIX}" >> ${ENVVARS}
echo "PCS_ASA_DATA_AZUREBLOB_ACCOUNT=${PCS_IOTHUBREACT_AZUREBLOB_ACCOUNT}" >> ${ENVVARS}
echo "PCS_ASA_DATA_AZUREBLOB_KEY=${PCS_IOTHUBREACT_AZUREBLOB_KEY}" >> ${ENVVARS}
echo "PCS_ASA_DATA_AZUREBLOB_ENDPOINT_SUFFIX=${PCS_IOTHUBREACT_AZUREBLOB_ENDPOINT_SUFFIX}" >> ${ENVVARS}
echo "PCS_EVENTHUB_CONNSTRING=${PCS_EVENTHUB_CONNSTRING}" >> ${ENVVARS}
echo "PCS_EVENTHUB_NAME=${PCS_EVENTHUB_NAME}" >> ${ENVVARS}
echo "PCS_APPLICATION_SECRET=${PCS_APPLICATION_SECRET}" >> ${ENVVARS}
echo "PCS_LOG_LEVEL=${PCS_LOG_LEVEL}" >> ${ENVVARS}
echo "PCS_RELEASE_VERSION=${PCS_RELEASE_VERSION}" >> ${ENVVARS}
echo "_HUB_CS=${PCS_IOTHUB_CONNSTRING}" >> ${ENVVARS}
if [ -z "$REGISTRY_PREFIX" ]; then
echo -e "Deploying from default registry."
else
echo -e "Using registry prefix ${REGISTRY_PREFIX}."
echo "SERVICES_REPOSITORY=${REGISTRY_PREFIX}" >> ${ENVVARS}
echo "MODULES_REPOSITORY=${REGISTRY_PREFIX}" >> ${ENVVARS}
fi
# ========================================================================
if [[ "$UNSAFE" == "true" ]]; then
echo -e "${COL_ERR}WARNING! Starting services in UNSAFE mode!${COL_NO}"
# Disable Auth
# Allow cross-origin requests from anywhere
echo "PCS_AUTH_REQUIRED=false" >> ${ENVVARS}
echo "REACT_APP_PCS_AUTH_REQUIRED=false" >> ${ENVVARS}
echo "PCS_CORS_WHITELIST={ 'origins': ['*'], 'methods': ['*'], 'headers': ['*'] }" >> ${ENVVARS}
echo "REACT_APP_PCS_CORS_WHITELIST={ 'origins': ['*'], 'methods': ['*'], 'headers': ['*'] }" >> ${ENVVARS}
else
echo "PCS_AUTH_REQUIRED=true" >> ${ENVVARS}
echo "REACT_APP_PCS_AUTH_REQUIRED=true" >> ${ENVVARS}
echo "PCS_CORS_WHITELIST=" >> ${ENVVARS}
echo "REACT_APP_PCS_CORS_WHITELIST=" >> ${ENVVARS}
fi
chown -R $ADMIN ${APP_PATH}
cd ${APP_PATH}
./ctrl.sh --start
|
<gh_stars>0
// Generated file. DO NOT MODIFY IT BY HAND.
export interface Payload {
/**
* A UUID (unique user ID) specified by you. **Note:** If you send a request with a user ID that is not in the Amplitude system yet, then the user tied to that ID will not be marked new until their first event. Required unless device ID is present.
*/
user_id?: string | null
/**
* A device specific identifier, such as the Identifier for Vendor (IDFV) on iOS. Required unless user ID is present.
*/
device_id?: string
/**
* Amplitude will deduplicate subsequent events sent with this ID we have already seen before within the past 7 days. Amplitude recommends generating a UUID or using some combination of device ID, user ID, event type, event ID, and time.
*/
insert_id?: string
/**
* The timestamp of the event. If time is not sent with the event, it will be set to the request upload time.
*/
time?: string
/**
* Additional data tied to the group in Amplitude.
*/
group_properties?: {
[k: string]: unknown
}
/**
* Type of the group
*/
group_type: string
/**
* Value of the group
*/
group_value: string
/**
* Amplitude has a default minimum id lenght of 5 characters for user_id and device_id fields. This field allows the minimum to be overridden to allow shorter id lengths.
*/
min_id_length?: number | null
}
|
<gh_stars>0
const property: CarProperty = {
name: 'color',
columnName: 'vari',
valueLabels: {
0: { fi: 'Musta', sv: 'Svart', en: 'Black' },
1: { fi: 'Ruskea (beige)', sv: 'Brun (beige)', en: 'Brown (beige)' },
2: { fi: 'Punainen', sv: 'Röd', en: 'Red' },
3: { fi: 'Oranssi', sv: 'Orange', en: 'Orange' },
4: { fi: 'Keltainen', sv: 'Gul', en: 'Yellow' },
5: { fi: 'Vihreä', sv: 'Grön', en: 'Green' },
6: { fi: 'Sininen', sv: 'Blå', en: 'Blue' },
7: { fi: 'Violetti', sv: 'Violett', en: 'Violet' },
8: { fi: 'Harmaa', sv: 'Grå', en: 'Grey' },
9: { fi: 'Valkoinen', sv: 'Vit', en: 'White' },
X: { fi: 'Monivär.', sv: 'Flerfärg', en: 'Multi-coloured' },
Y: { fi: 'Hopea', sv: 'Silver', en: 'Silver' },
Z: { fi: 'Turkoosi', sv: 'Turkos', en: 'Turquoise' },
},
type: 'Proportions',
};
export = property;
|
/**
* Certificate Transparency Utilities
* Test SignedCertificateTimestamp
*
* By <NAME> <<EMAIL>>
*/
require('babel-polyfill');
const assert = require('assert');
const fs = require('fs');
const pvutils = require('pvutils');
const CTUtils = require('..');
const WebCrypto = require('node-webcrypto-ossl');
const webcrypto = new WebCrypto();
CTUtils.setWebCrypto(webcrypto);
const logId = pvutils.stringToArrayBuffer(pvutils.fromBase64(
'pFASaQVaFVReYhGrN7wQP2KuVXakXksXFEU+GyIQaiU='));
const pubKey = pvutils.stringToArrayBuffer(pvutils.fromBase64(
'<KEY>' +
'<KEY>'));
const sctBin = new Uint8Array(pvutils.stringToArrayBuffer(pvutils.fromBase64(
'AKRQEmkFWhVUXmIRqze8ED9irlV2pF5LFxRFPhsiEGolAA<KEY>AAAQDAEcwRQIgIFztN' +
'JNfxaRdUWEwPRWwD5S2GfvijTgvPbVC2wBHOLkCIQCYr3u/5yXhKCOkj4Jm79DK3YouXxzpOg' +
'S45xjWB6SJOg==')));
const sig = pvutils.stringToArrayBuffer(pvutils.fromBase64(
'<KEY>' +
'mbv0Mrdii5fHOk6BLjnGNYHpIk6'));
const certBuffer = fs.readFileSync('test/cert.der');
const cert = new Uint8Array(certBuffer.length);
for(let i = 0; i < certBuffer.length; i++)
cert[i] = certBuffer[i];
describe('SignedCertificateTimestamp', () => {
describe('#toBinary()', () => {
it('should encode correctly', () => {
const sct = new CTUtils.SignedCertificateTimestamp(CTUtils.Version.v1,
logId, 1518094243621, new ArrayBuffer(0), sig,
CTUtils.LogEntryType.x509_entry, cert.buffer);
const sctVerify = new Uint8Array(sct.toBinary());
assert.equal(sctVerify.length, sctBin.length, 'Incorrect encoded length');
for(let i = 0; i < sctBin.length; i++)
assert.equal(sctVerify[i], sctBin[i], `Failed at offset ${i}`);
});
});
describe('#fromBinary()', () => {
it('should decode correctly', () => {
const sct = CTUtils.SignedCertificateTimestamp.fromBinary(sctBin.buffer);
assert.equal(sct.version, CTUtils.Version.v1, 'Incorrect version');
const logIdVerifyView = new Uint8Array(sct.logId);
const logIdView = new Uint8Array(logId);
assert.equal(logIdVerifyView.length, logIdView.length,
'Incorrect logId length');
for(let i = 0; i < logIdVerifyView.length; i++)
assert.equal(logIdVerifyView[i], logIdView[i],
`Failed logId at offset ${i}`);
assert.equal(sct.timestamp, 1518094243621, 'Incorrect timestamp');
const extensionsVerifyView = new Uint8Array(sct.extensions);
const extensionsView = new Uint8Array([]);
assert.equal(extensionsVerifyView.length, extensionsView.length,
'Incorrect extensions length');
for(let i = 0; i < extensionsVerifyView.length; i++)
assert.equal(extensionsVerifyView[i], extensionsView[i],
`Failed extensions at offset ${i}`);
const signatureVerifyView = new Uint8Array(sct.signature);
const signatureView = new Uint8Array(sig);
assert.equal(signatureVerifyView.length, signatureView.length,
'Incorrect signature length');
for(let i = 0; i < signatureVerifyView.length; i++)
assert.equal(signatureVerifyView[i], signatureView[i],
`Failed signature at offset ${i}`);
});
});
describe('#verify()', () => {
it('should verify correct SignedCertificateTimestamp with public key',
() => {
const sct = new CTUtils.SignedCertificateTimestamp(CTUtils.Version.v1,
logId, 1518094243621, new ArrayBuffer(0), sig,
CTUtils.LogEntryType.x509_entry, cert);
return sct.verify(pubKey).then((res) => {
assert.equal(res, true, 'Cannot verify');
});
});
it('should verify correct SignedCertificateTimestamp with CTLog', () => {
const sct = new CTUtils.SignedCertificateTimestamp(CTUtils.Version.v1,
logId, 1518094243621, new ArrayBuffer(0), sig,
CTUtils.LogEntryType.x509_entry, cert);
const log = new CTUtils.CTLog('ct.googleapis.com/pilot/', pubKey);
return sct.verify(log).then((res) => {
assert.equal(res, true, 'Cannot verify');
});
});
it('should detect incorrect SignedCertificateTimestamp', () => {
sctBin[0]++;
const sct = CTUtils.SignedCertificateTimestamp.fromBinary(sctBin.buffer);
sctBin[0]--;
return sct.verify(pubKey).then((res) => {
assert.equal(res, false, 'Cannot detect');
});
});
});
});
|
#!/bin/bash
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
# ABOUT:
#
# Run this script to create and install TileCache and MapServer
# config files and to generate cached tiles for specific Cyclopath
# installations and branches.
#
# This script is somewhat multi-processor aware/friendly.
#
# This script should be run by the apache/www-data user.
# SETUP_NOTES:
#
# Schedule cron to run the wrapper script, check_cache_now.sh
#
# Pass this script a list of instances and branches to process.
# See further down for an example.
# ===========================================================================
# *** Debug options
DEBUG_TRACE=false
# DEVS: Uncomment this if you want a cron email.
#DEBUG_TRACE=true
DTRACE_PIDS=false
#DTRACE_PIDS=true
# 2013.05.17: Start being verbose whenever we load from scratch.
ALWAYS_TRACE_WHEN_FRESH=true
#ALWAYS_TRACE_WHEN_FRESH=false
SKIP_CONSUME_FRESH_DB=false
SKIP_REVID_CHECK=false
SKIP_CONFIG_FILES=false
SKIP_REPORT_SIZES=false
# FIXME, or maybe just NOTICE: The cluster cache takes a day to build
# from scratch: which doesn't seem like something we want to
# do very often... and this file is called from cron...
SKIP_TILECACHE_CACHE=false
SKIP_TILECACHE_TILES=false
# DEVS: Uncomment these if you want
#SKIP_CONSUME_FRESH_DB=true
#SKIP_REVID_CHECK=true
#SKIP_CONFIG_FILES=true
#SKIP_REPORT_SIZES=true
#SKIP_TILECACHE_CACHE=true
#SKIP_TILECACHE_TILES=true
# ***
if $SKIP_REVID_CHECK \
|| $SKIP_CONFIG_FILES \
|| $SKIP_REPORT_SIZES \
|| $SKIP_TILECACHE_CACHE \
|| $SKIP_TILECACHE_TILES \
; then
echo ""
echo "*****************************************"
echo "* *"
echo "* WARNING: Debug switches enabled *"
echo "* *"
echo "*****************************************"
echo ""
fi
# ===========================================================================
# *** Test sandbox
__usage_examples__='
cd $cp/mapserver
rmdir apache_check_cache_now.sh-lock 2&>1 /dev/null; \
sudo -u $httpd_user \
INSTANCE=minnesota \
PYTHONPATH=$PYTHONPATH \
PYSERVER_HOME=$PYSERVER_HOME \
./apache_check_cache_now.sh
# DEVS: To kill this script, and tilecache_update, and tilecache_seed, call:
./kill_cache_check.sh
# DEVS: Test from apache account.
sudo su - $httpd_user
cd /ccp/dev/cp/mapserver
INSTANCE=minnesota \
PYTHONPATH=/ccp/opt/usr/lib/python:/ccp/opt/usr/lib/python2.6/site-packages:/ccp/opt/gdal/lib/python2.6/site-packages \
PYSERVER_HOME=/ccp/dev/cp/pyserver \
./apache_check_cache_now.sh
Maybe:
--force
cd /ccp/dev/cp/mapserver
INSTANCE=minnesota \
PYTHONPATH=/ccp/opt/usr/lib/python:/ccp/opt/usr/lib/python2.6/site-packages:/ccp/opt/gdal/lib/python2.6/site-packages \
PYSERVER_HOME=/ccp/dev/cp/pyserver \
./tilecache_update.py \
--branch Metc Bikeways 2012 \
--changed --cyclopath-cache
sudo su - $httpd_user
cd /ccp/dev/cp/mapserver
./check_cache_now.sh
cd /ccp/var/tilecache-cache
nohup tar -cvzf cycloplan_live.tar.gz cycloplan_live | tee tar-cycloplan_live.log 2>&1 &
sudo su - $httpd_user
/bin/bash
cd /ccp/dev/cp_v1v2-tcc/mapserver
./check_cache_now.sh
export EDITOR=/usr/bin/vim.basic
crontab -e
cd /ccp/var/tilecache-cache
tar -xzf cp_v1v2-tcc.tar.gz
mv cp_v1v2-tcc cycloplan_live
cd cycloplan_live
addr_and_port=ccpv3
cat tilecache.cfg \
| /bin/sed -r \
"s/url=http:\/\/[-_.:a-zA-Z0-9]+/url=http:\/\/${addr_and_port}/" \
> tilecache.new
/bin/mv -f tilecache.new tilecache.cfg
fixperms --public ../cycloplan_live/
re
'
# ===========================================================================
# Utility fcns. and vars.
# NOTE: Setting PYSERVER_HOME relative to ccp_base.sh.
#PYSERVER_HOME=../../pyserver
# On second thought, make ccp_base.sh find PYSERVER_HOME, so it's set absolute.
PYSERVER_HOME=
source ../scripts/util/ccp_base.sh
if [[ -z "${CCP_WORKING}"
|| -z "${PYSERVER_HOME}"
|| -z "${CCP_INSTANCE}"
|| -z "${CCP_DB_NAME}"
]]; then
echo "ERROR: Missing CCP_WORKING (${CCP_WORKING})
and/or PYSERVER_HOME (${PYSERVER_HOME})
and/or CCP_INSTANCE (${CCP_INSTANCE})
and/or CCP_DB_NAME (${CCP_DB_NAME}).
"
exit 1
fi
# ===========================================================================
# *** Make input back into array, being smart about whitespace.
# Set the shell's Internal Field Separator to null. Do this or
# whitespace will delimit things on the command line you put in
# quotes.
OLD_IFS=$IFS
IFS=''
CCP_INSTANCE_BRANCHES=()
while [[ "$1" != "" ]]; do
CCP_INSTANCE_BRANCHES+=($1)
shift
done
# Reset IFS to default.
IFS=$OLD_IFS
#echo "CCP_INSTANCE_BRANCHES[3] = " ${CCP_INSTANCE_BRANCHES[3]}
# Bash indexed arrays are always delimited by spaces so we put the
# branch last so we can coalesce it back together later with $*.
#
# Def'n: INSTANCE, INSTANCE___DEVPATH NICKNAME Full Branch Name in Database
if [[ $CCP_INSTANCE_BRANCHES = '' ]]; then
echo "WARNING: Deprecated: You should specify CCP_INSTANCE_BRANCHES" \
"in check_cache_now.sh"
CCP_INSTANCE_BRANCHES=(
"minnesota" "Minnesota"
"minnesota" "Metc Bikeways 2012"
);
fi
ccpsb_cols_per_row=2
ccp_number_servers=$((${#CCP_INSTANCE_BRANCHES[@]} / $ccpsb_cols_per_row))
#echo "ccpsb_cols_per_row = $ccpsb_cols_per_row"
#echo "CCP_INSTANCE_BRANCHES[3] = ${CCP_INSTANCE_BRANCHES[3]}"
#echo "ccp_number_servers = $ccp_number_servers"
#exit 0
if [[ ${#CCP_INSTANCE_BRANCHES[*]} -eq 0 ]]; then
echo ""
echo "==============================================="
echo "WARNING: Nothing to do: No databases specified."
echo "==============================================="
echo ""
fi
# ===========================================================================
# *** Local machine configuration
CCP_LOG_CKCACHE=${CCP_LOG_DAILY}/cache
/bin/mkdir -p ${CCP_LOG_CKCACHE}
/bin/chmod 2775 ${CCP_LOG_CKCACHE}
CCP_ZIP_CKCACHE=${CCP_LOG_DAILY}/cache_
/bin/mkdir -p ${CCP_ZIP_CKCACHE}
/bin/chmod 2775 ${CCP_ZIP_CKCACHE}
# 2013.05.19: Not sure about this... I think dont_exit_on_error should be set.
dont_exit_on_error=1
# ===========================================================================
# Lock management helpers
# Don't unlock what we haven't locked.
SKIP_UNLOCK_CHECK_CACHE_NOW_LOCKDIR=1
SKIP_UNLOCK_UPGRADE_DUMPDIR=1
SKIP_UNLOCK_CHECK_CACHE_NOW_DUMPDIR=1
do_unlock_dump_locks () {
# NOTE: Order here is important. Do this in the reverse order that you got
# the locks.
if [[ ${SKIP_UNLOCK_CHECK_CACHE_NOW_DUMPDIR} -eq 0 ]]; then
/bin/rmdir "${CHECK_CACHE_NOW_DUMPDIR}-${script_name}" &> /dev/null
/bin/rmdir "${CHECK_CACHE_NOW_DUMPDIR}" &> /dev/null
SKIP_UNLOCK_CHECK_CACHE_NOW_DUMPDIR=1
fi
if [[ ${SKIP_UNLOCK_UPGRADE_DUMPDIR} -eq 0 ]]; then
/bin/rmdir "${CCPDEV_UPGRADE_DUMPDIR}-${script_name}" &> /dev/null
/bin/rmdir "${CCPDEV_UPGRADE_DUMPDIR}" &> /dev/null
SKIP_UNLOCK_UPGRADE_DUMPDIR=1
fi
}
do_unlock_check_cache_now_lock () {
if [[ ${SKIP_UNLOCK_CHECK_CACHE_NOW_LOCKDIR} -eq 0 ]]; then
/bin/rmdir "${CHECK_CACHE_NOW_LOCKDIR}-${script_name}" &> /dev/null
/bin/rmdir "${CHECK_CACHE_NOW_LOCKDIR}" &> /dev/null
SKIP_UNLOCK_CHECK_CACHE_NOW_LOCKDIR=1
fi
}
unlock_all_locks () {
# Order here is important: first the dump locks, then the publish lock.
do_unlock_dump_locks
do_unlock_check_cache_now_lock
}
# ===========================================================================
# Engage lockdown
# We can't just check if we're running because, well, this instance of the
# script is running.
# No Good: if [[ "" != "`ps aux | grep $0`" ]]; do ...
# So we have to use a mutex, like a file lock. Using flocks is easy.
# In CcpV1, the tilecache_update script was only scheduled to run every 15
# minutes. But that's only because it didn't check to see if it was already
# running. In CcpV2, we see if it's running, otherwise we don't run (and just
# wait for the next cron iteration) -- this lets us schedule the cron job
# every minute, if we want.
# MAYBE: Have pyserver make a work_item and then Mr. Do! can run us.
# We'd still have to check if we were already running, though.
# Use a mutually exclusive lock, or mutex, to make sure this script doesn't
# run when it's already running. Use a file lock is one approach, but it's
# easier to use mkdir, which is an atomic operation.
#
# MAYBE: Can we just row lock in tilecache_update? That way, we could start a
# lot of tilecache_update instances to run lots of tilecache_seeds in parallel.
# The lock is a directory named, e.g., apache_check_cache_now.sh-lock
# DEVS: Send --force to ignore lock dir.
CHECK_CACHE_NOW_SCRIPT="${CCP_WORKING}/mapserver/${script_name}"
CHECK_CACHE_NOW_LOCKDIR="${CHECK_CACHE_NOW_SCRIPT}-lock"
CHECK_CACHE_NOW_DUMPDIR="${CHECK_CACHE_NOW_SCRIPT}-dump"
# BUG nnnn: After we're doing with the V1->V2 scripts, we can maybe do away
# with this reloading mechanism... or maybe we need it for when
# we're preparing Cyclopath for other cities...
CCPDEV_UPGRADE_DUMPDIR="${CCPDEV_ROOT}/daily/upgrade_ccpv1-v2.sh-dump"
# Get the script lock or die trying.
DONT_FLOCKING_CARE=0
FLOCKING_REQUIRED=1
NUM_FLOCKING_TRIES=1
FLOCKING_TIMELIMIT=30
if [[ ${CCP_SKIP_LOCKDIR} -eq 1 ]] ; then
DONT_FLOCKING_CARE=1
FLOCKING_REQUIRED=0
fi
flock_dir \
"${CHECK_CACHE_NOW_LOCKDIR}" \
${DONT_FLOCKING_CARE} \
${FLOCKING_REQUIRED} \
${NUM_FLOCKING_TRIES} \
${FLOCKING_TIMELIMIT}
if [[ $? -eq 0 ]]; then
SKIP_UNLOCK_CHECK_CACHE_NOW_LOCKDIR=0
fi
# ===========================================================================
# Reload the dump file, if it's new and if that's what we do.
# By default, we only update the cache based on what's changed since the last
# time we ran.
TC_ALL_OR_CHANGED="--changed"
# But sometimes we redo the whole cache.
# NOTE: Using exclamation on true/false only works outside [[ ]].
if [[ -n ${CCP_CONSUME_FRESH_DB} ]]; then
# 2013.12.07: This was just for testing, so deliberately edit
# this script if you want to start from scratch.
# That is, this script is run from cron, and on
# production, this if-block should never run; it's
# safer to make DEV uncomment this exit if they
# want to reload a database dump.
echo "ERROR: Not on production!"
exit 1
reload_db=0
# Try to get the dump lock, but if it's taken, we'll move on.
DONT_FLOCKING_CARE=0
FLOCKING_REQUIRED=0
NUM_FLOCKING_TRIES=3
FLOCKING_TIMELIMIT=30
flock_dir \
"${CCPDEV_UPGRADE_DUMPDIR}" \
${DONT_FLOCKING_CARE} \
${FLOCKING_REQUIRED} \
${NUM_FLOCKING_TRIES} \
${FLOCKING_TIMELIMIT}
if [[ $? -ne 0 ]]; then
if $ALWAYS_TRACE_WHEN_FRESH; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Could not get lock: ${CCPDEV_UPGRADE_DUMPDIR}."
fi
else # if [[ $? -eq 0 ]]; then
# Got it!
SKIP_UNLOCK_UPGRADE_DUMPDIR=0
CCP_DUMP_FILE=${CCP_DBDUMPS}/${CCP_CONSUME_FRESH_DB}.dump
CCP_MAPSERVER=${CCP_WORKING}/mapserver
CCP_LAST_LOAD=${CCP_MAPSERVER}/${script_name}-${CCP_CONSUME_FRESH_DB}
$DEBUG_TRACE && echo "Looking for db dump: ${CCP_DUMP_FILE}"
$DEBUG_TRACE && echo " ... last load file: ${CCP_LAST_LOAD}"
$DEBUG_TRACE && echo ""
if [[ ! -e ${CCP_DUMP_FILE} ]]; then
echo "WARNING: Dump file not found: ${CCP_DUMP_FILE}"
# Skipping: exit 1
elif [[ ! -e ${CCP_LAST_LOAD} ]]; then
$DEBUG_TRACE && echo "No last load file; loading dump: ${CCP_DUMP_FILE}"
reload_db=1
# NOTE: -nt means "newer than", meaning the file timestamp.
elif [[ ${CCP_DUMP_FILE} -nt ${CCP_LAST_LOAD} ]]; then
$DEBUG_TRACE && echo "Dump file is newer; loading dump: ${CCP_DUMP_FILE}"
reload_db=1
else
$DEBUG_TRACE && echo "Dump file is not newer; skipping: ${CCP_DUMP_FILE}"
fi
$DEBUG_TRACE && echo ""
if [[ ${reload_db} -eq 1 ]]; then
# Remember to use --all rather than --changed for the update.
TC_ALL_OR_CHANGED="--all"
# If we're marked for reload, we general get verbose, since this is a
# long, important process.
if $ALWAYS_TRACE_WHEN_FRESH; then
DEBUG_TRACE=true
DTRACE_PIDS=true
fi
# Skip lengthy db_load, maybe.
if [[ ${SKIP_CONSUME_FRESH_DB} == true ]]; then
$DEBUG_TRACE && echo "SKIP_CONSUME_FRESH_DB is true; not reloading db"
reload_db=0
fi
fi
# Load the database.
#set -e # Exit on error
if [[ ${reload_db} -eq 1 ]]; then
$DEBUG_TRACE && echo "Will look for dump: ${CCP_DUMP_FILE}"
$DEBUG_TRACE && echo " .. last load file: ${CCP_LAST_LOAD}"
$DEBUG_TRACE && echo ""
# We need write access on the directory for the .ascii and .list files.
if [[ ! -d ${CCP_DBDUMPS} ]]; then
echo "FATAL ERROR: Is Cyclopath even installed on this machine?"
exit 1
fi
touch ${CCP_DBDUMPS} &> /dev/null
if [[ $? -ne 0 ]]; then
# E.g., "touch: setting times of `/ccp/var/dbdumps': Permission denied"
echo ""
echo "=============================================="
echo "ERROR: The dbdumps directory is not writeable."
echo "Hey, you, DEV: This is certainly _your_ fault."
echo "Try: chmod 2777 ${CCP_DBDUMPS}"
echo "=============================================="
echo ""
exit 1
fi
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo -n "Loading newer database to ${CCP_DB_NAME}..."
LOG_FILE="${CCP_LOG_CKCACHE}/db_load-${CCP_DB_NAME}.log"
${CCP_WORKING}/scripts/db_load.sh \
${CCP_DUMP_FILE} ${CCP_DB_NAME} \
> ${LOG_FILE} 2>&1
# Check for errors.
if [[ $? -ne 0 ]]; then
echo "failed!"
echo "ERROR: db_load.sh failed: Please see: ${LOG_FILE}"
echo ""
# Dump the log to our log.
#cat ${LOG_FILE}
exit 1
fi
echo "ok"
$DEBUG_TRACE && echo `date`
# Remember when we did this.
touch ${CCP_LAST_LOAD}
/bin/chmod 664 ${CCP_LAST_LOAD}
fi
#set +e # Stay on error
# Since we just reloaded the database, whack the tilecache cache directory.
if [[ ${reload_db} -eq 1 ]]; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Purging cache dir: ${TILECACHE_CACHE}"
/bin/rm -rf "${TILECACHE_CACHE}" &> /dev/null
# Also whack any old touch files, which are meaningless now.
# See: RID_COMPARATOR.
/bin/rm -f last_rev-*.touch
fi
# Free the lock now that the database is loaded.
# /bin/rmdir "${CCPDEV_UPGRADE_DUMPDIR}" &> /dev/null
do_unlock_dump_locks
fi # else, didn't get the lock, and we'll try again next cron.
# FIXME: Return now if there was no database to load -- that is,
# CCP_CONSUME_FRESH_DB doesn't otherwise make tiles or do any work.
#
# FIXME: Enable this once we know the --changed problem in tilecache_update is
# fixed.
# if [[ ${reload_db} -eq 0 ]]; then
if false; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Didn't get lock or didn't reload database: done."
exit 0
fi
fi # end: if CCP_CONSUME_FRESH_DB and not SKIP_CONSUME_FRESH_DB.
# ===========================================================================
# Verify the database exists.
# [lb]'s PAGER is set to less, but we want more, i.e., less is interactive, and
# more just dumps to stdout.
# Note that -A strips whitespace and dashes, and -t strips headers, so we're
# just left with a number-string.
# Oh, but wait, www-data can't run as postgres, because pg_ident.conf says so.
# Oh, wait, www-data needs postgres access to be able to reload the database!
# Anyway, either of these approaches works:
#
# # Option 1:
# ccp_db_exists=`
# PAGER=more \
# psql -U postgres postgres \
# -c "SELECT COUNT(*) FROM pg_database WHERE datname = '${CCP_DB_NAME}';"\
# -A -t`
# # Option 2:
psql -U cycling ${CCP_DB_NAME} -c "" --no-psqlrc &> /dev/null
errno=$?
if [[ $errno -eq 2 ]]; then
# E.g., 'psql: FATAL: database "ccpv3_blah" does not exist'
echo "ERROR: Database does not exist: ${CCP_DB_NAME}"
exit 1
elif [[ $errno -ne 0 ]]; then
echo "ERROR: Database does not exist (unknown why): ${CCP_DB_NAME}"
exit 1
fi
# ===========================================================================
# Fcns.
# ***
instance_branch_check_rid() {
db_instance=$1
branch_name=$2
branch__ed=`echo $branch_name | tr ' ' '_'`
if [[ -z "${branch_name}" ]]; then
echo "Please specify the db_instance and branch_name."
exit 1
fi
$DEBUG_TRACE && echo "Looking for work for: ${db_instance}-${branch_name}"
$DEBUG_TRACE && echo ""
# FIXME: Should we move all the lock dirs and touch files
# to a different location? Things are getting cluttered.
# REMEMBER: To continue a set-var cmd in bash you can't have any whitespace.
RID_COMPARATOR=\
"${CCP_WORKING}/mapserver/last_rev-${db_instance}-${branch__ed}.touch"
RID_CURRENTLY=\
"${CCP_WORKING}/mapserver/last_rev-${db_instance}-${branch__ed}.curr"
export INSTANCE=${db_instance}
latest_rev_ts=$(ccp_latest_rev_ts ${branch_name})
if [[ -z ${latest_rev_ts} ]]; then
echo "ERROR: Problem getting last revision timestamp for ${branch_name}"
exit 1
fi
touch -d "${latest_rev_ts}" "${RID_CURRENTLY}"
process_branch=true
if [[ ! -e "${RID_COMPARATOR}" ]]; then
# NOTE: Skipping DEBUG_TRACE.
if [[ -n ${CCP_CONSUME_FRESH_DB} ]]; then
echo "WARNING: No rid file: ${RID_COMPARATOR}"
fi
touch -d "${latest_rev_ts}" "${RID_COMPARATOR}"
elif [[ "${RID_CURRENTLY}" -nt "${RID_COMPARATOR}" ]]; then
$DEBUG_TRACE && echo "Latest branch revision is more recent; doing work."
else
$DEBUG_TRACE && echo "Not working on branch with no recent changes."
$DEBUG_TRACE && echo "Comparator: ${RID_COMPARATOR}"
$DEBUG_TRACE && echo " Currently: ${RID_CURRENTLY}"
process_branch=false
fi
/bin/rm -f "${RID_CURRENTLY}"
if ${process_branch} || $SKIP_REVID_CHECK; then
$DEBUG_TRACE && echo "Adding: ${db_instance}-${branch_name}"
CCP_WORKTODO_BRANCHES+=("$db_instance")
CCP_WORKTODO_BRANCHES+=("$branch_name")
fi
}
# ***
instance_config_write() {
db_instance=$1
branch_name=$2
branch__ed=`echo $branch_name | tr ' ' '_'`
if [[ -z "${branch_name}" ]]; then
echo "Please specify the db_instance and branch_name."
exit 1
fi
INSTANCE="${db_instance}___${CCP_INSTANCE}"
#TILECACHE_CACHE=${TILECACHE_BASE}/${CCP_INSTANCE}
/bin/mkdir -p "${TILECACHE_CACHE}" &> /dev/null
/bin/chmod 2775 ${TILECACHE_CACHE}
# ***
# Install config to /ccp/var/tilecache-cache/[installation]...
# ... wms_instance.map and fonts.
mapserver_mapfile_install
# ... tilecache.cfg.
tilecache_config_install
# WANTED: We could also auto-generate this script's wrapper?
# Okay, maybe not from here, but from somewhere else.
# (Since this script is justed called by www-data.
# We need to setup the wrapper from upgrade_ccpv1-v2.sh.)
# Not here: ... check_cache_now.sh
}
# ***
mapserver_mapfile_install() {
# MAYBE: Is this costly to do if we run every minute from cron?
# Would it be better to 'svn info $cp' and see if that changed... but
# then we'd have to worry about dev folders that are not svnified.
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Recreating wms_instance.map."
# SYNC_ME: This commands match some CxPx commands atop tilecache_update.py.
# Start by making the map file for MapServer.
# It's rather large, so we store it outside of the Cyclopath source tree.
# SYNC: httpd.conf's MS_MAPFILE matches ${TILECACHE_CACHE}/wms_instance.map:
cd ${CCP_WORKING}/mapserver
# NOTE: make_mapfile doesn't say anything, so the log file is zero, zilch,
# empty.
LOG_FILE="${CCP_LOG_CKCACHE}/make_mapfile-${CCP_INSTANCE}.log"
LOCAL_TARGET=${CCP_WORKING}/mapserver/wms_instance.map
FINAL_TARGET=${TILECACHE_CACHE}/wms_instance.map
INSTANCE=${INSTANCE} \
PYTHONPATH=${ccp_python_path} \
PYSERVER_HOME=${CCP_WORKING}/pyserver \
${CCP_WORKING}/mapserver/make_mapfile.py \
> ${LOG_FILE} 2>&1
check_prev_cmd_for_error $? ${LOG_FILE} ${dont_exit_on_error}
# Post-process with m4 to build one helluva mapfile.
m4 ${CCP_WORKING}/mapserver/wms-${db_instance}.m4 > ${LOCAL_TARGET}
# We can diff against locations that don't exist -- $? will be 2 (it's 1 for
# existing files that differ or 0 for two files that match).
install_mapfile=false
if [[ -e "${FINAL_TARGET}" ]]; then
$DEBUG_TRACE && echo " .. diffing against existing."
mapfiles_diff=$(diff ${LOCAL_TARGET} ${FINAL_TARGET})
if [[ "" != "${mapfiles_diff}" ]]; then
#$DEBUG_TRACE && echo "Mapfiles are different."
echo "WARNING: Mapfiles are different. Recreating, but not retiling."
# MAYBE: Call tilecache_update.py --all so we rebuild all tiles?
install_mapfile=true
else
$DEBUG_TRACE && echo " .. mapfile unchanged; leaving be."
/bin/rm -f ${LOCAL_TARGET}
fi
else
$DEBUG_TRACE && echo "NOTICE: Mapfile does not exist; creating."
install_mapfile=true
fi
if ${install_mapfile}; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Installing new mapile: from: ${LOCAL_TARGET}"
$DEBUG_TRACE && echo " .. to: ${FINAL_TARGET}"
/bin/mv -f ${LOCAL_TARGET} ${FINAL_TARGET}
/bin/chmod 664 ${FINAL_TARGET}
# We also want to copy the fonts.list and fonts/ directory.
/bin/cp -f ${CCP_WORKING}/mapserver/fonts.list ${TILECACHE_CACHE}
/bin/rm -rf ${TILECACHE_CACHE}/fonts
/bin/cp -rf ${CCP_WORKING}/mapserver/fonts/ ${TILECACHE_CACHE}
# Fix perms.
/bin/chmod 664 ${TILECACHE_CACHE}/fonts.list
/bin/chmod 2775 ${TILECACHE_CACHE}/fonts
/bin/chmod 664 ${TILECACHE_CACHE}/fonts/*
/bin/chmod 2775 ${TILECACHE_CACHE}
else
/bin/rm -f ${LOCAL_TARGET}
fi
# Clean up an intermediate file that make_mapfile created that
# wms-instance.map needed.
/bin/rm -f ${CCP_WORKING}/mapserver/byways_and_labels.map
}
# ***
tilecache_config_install() {
# C.f. similar mapserver_mapfile_install, above.
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Recreating tilecache.cfg."
# Generate the tilecache.cfg and install or check for changes.
cd ${CCP_WORKING}/mapserver
LOG_FILE="${CCP_LOG_CKCACHE}/gen_tilecache_cfg-${CCP_INSTANCE}.log"
LOCAL_TARGET=${CCP_WORKING}/mapserver/tilecache.cfg
FINAL_TARGET=${TILECACHE_CACHE}/tilecache.cfg
INSTANCE=${INSTANCE} \
PYTHONPATH=${ccp_python_path} \
PYSERVER_HOME=${CCP_WORKING}/pyserver \
${CCP_WORKING}/mapserver/gen_tilecache_cfg.py \
> ${LOG_FILE} 2>&1
check_prev_cmd_for_error $? ${LOG_FILE} ${dont_exit_on_error}
diff ${LOCAL_TARGET} ${FINAL_TARGET} &> /dev/null
if [[ $? -ne 0 ]] ; then
echo "WARNING: Tilecache cfgs are different (or target d/n/e)."
# MAYBE: Should we always overwrite existing cfg? Hrm...
/bin/mv -f ${LOCAL_TARGET} ${FINAL_TARGET}
/bin/chmod 664 ${FINAL_TARGET}
else
# Nothing changed, so just delete the new file.
$DEBUG_TRACE && echo " .. tilecache cfg unchanged; leaving be."
/bin/rm -f ${LOCAL_TARGET}
fi
}
# ***
instance_update_cache() {
db_instance=$1
branch_name=$2
branch__ed=`echo $branch_name | tr ' ' '_'`
if [[ -z "${branch_name}" ]]; then
echo "Please specify the db_instance and branch_name."
exit 1
fi
INSTANCE="${db_instance}___${CCP_INSTANCE}"
#TILECACHE_CACHE=${TILECACHE_BASE}/${CCP_INSTANCE}
BRANCH_QUALIFIER="${CCP_INSTANCE}_${db_instance}_${branch__ed}"
LOG_FILE="${CCP_LOG_CKCACHE}/tc_cache-${BRANCH_QUALIFIER}.log"
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && printf " %33s:%12s:%22s: starting... " \
"${CCP_INSTANCE}" "${db_instance}" "${branch__ed}"
__2013_05_29__='
export INSTANCE=minnesota
export PYSERVER_HOME=/ccp/dev/cp_v1v2-tcc/pyserver
export PYTHONPATH=/ccp/opt/usr/lib/python:/ccp/opt/usr/lib/python2.6/site-packages:/ccp/opt/gdal/lib/python2.6/site-packages
./tilecache_update.py \
--branch "Minnesota" \
--all --cyclopath-cache
sudo su - $httpd_user
/bin/bash
cd /ccp/dev/cycloplan_live/mapserver
#export INSTANCE=minnesota
export INSTANCE=minnesota___cycloplan_live
export PYSERVER_HOME=/ccp/dev/cycloplan_live/pyserver
export PYTHONPATH=/ccp/opt/usr/lib/python:/ccp/opt/usr/lib/python2.6/site-packages:/ccp/opt/gdal/lib/python2.6/site-packages
LOG_FILE=/ccp/var/log/daily/cache/tc_cache-cycloplan_live_minnesota_Mpls-St._Paul.log
./tilecache_update.py \
--branch "Minnesota" \
--all --cyclopath-cache \
> ${LOG_FILE} 2>&1 &
DO THIS STILL:
LOG_FILE=/ccp/var/log/daily/cache/tc_tiles-cycloplan_live_minnesota_Mpls-St._Paul-zooms_09_09.log
./tilecache_update.py \
--branch "Minnesota" \
--all --tilecache-tiles \
--zoom 09 09 \
> ${LOG_FILE} 2>&1 &
LOG_FILE=/ccp/var/log/daily/cache/tc_tiles-cycloplan_live_minnesota_Mpls-St._Paul-zooms_10_13.log
./tilecache_update.py \
--branch "Minnesota" \
--all --tilecache-tiles \
--zoom 10 13 \
> ${LOG_FILE} 2>&1 &
LOG_FILE=/ccp/var/log/daily/cache/tc_tiles-cycloplan_live_minnesota_Mpls-St._Paul-zooms_14_14.log
./tilecache_update.py \
--branch "Minnesota" \
--all --tilecache-tiles \
--zoom 14 14 \
> ${LOG_FILE} 2>&1 &
LOG_FILE=/ccp/var/log/daily/cache/tc_tiles-cycloplan_live_minnesota_Mpls-St._Paul-zooms_15_15.log
./tilecache_update.py \
--branch "Minnesota" \
--all --tilecache-tiles \
--zoom 15 15 \
> ${LOG_FILE} 2>&1 &
also metc
LOG_FILE=/ccp/var/log/daily/cache/tc_cache-cycloplan_live_minnesota_Metc_Bikeways_2012.log
./tilecache_update.py \
--branch "Metc Bikeways 2012" \
--all --cyclopath-cache \
> ${LOG_FILE} 2>&1 &
and the zooms...
The cluster cache takes 24 hours to build... hrmmm
'
INSTANCE=${INSTANCE} \
PYTHONPATH=${ccp_python_path} \
PYSERVER_HOME=${CCP_WORKING}/pyserver \
./tilecache_update.py \
--branch "${branch_name}" \
${TC_ALL_OR_CHANGED} \
--cyclopath-cache \
> ${LOG_FILE} \
2>&1 \
&
WAITPIDS+=("${!}")
WAITLOGS+=("${LOG_FILE}")
$DEBUG_TRACE && echo "ok!"
$DTRACE_PIDS && echo "Added to WAITPIDS: ${!}"
$DTRACE_PIDS && echo WAITPIDS: ${WAITPIDS[*]}
}
# ***
branch_update_tiles() {
db_instance=$1
branch_name=$2
branch__ed=`echo $branch_name | tr ' ' '_'`
if [[ -z "${branch_name}" ]]; then
echo "Please specify the db_instance and branch_name."
exit 1
fi
INSTANCE="${db_instance}___${CCP_INSTANCE}"
#TILECACHE_CACHE=${TILECACHE_BASE}/${CCP_INSTANCE}
# ***
# FIXME: Use work items to schedule tilecache_update.py to run on specific
# installations and branches within installations only when required? For now
# we're run periodically by cron and call tilecache_update.py on all servers,
# branches, and zooms. This might not be a big deal, but [lb] hasn't profiled
# the strain on the server that this script causes.
# Early versions of this script went through the zooms one-by-one (and the
# branches one-by-one above that, and the servers above those). But that
# means we quickly build the higher zooms for some branch in whatever server
# we pick first, and then we waste hours going through the lower zooms, and
# then we move on to the next branch starting with the higher zooms to the
# lower, iterating through branches until we're done with the first server
# before moving on to the next server.
#
# But we can improves this in two ways.
#
# 1. We have a Big, Meaty server, so we should use more than just a single
# processor.
# 2. It'd be nice if servers and branches didn't block one another, so that,
# e.g., we can quickly build all the higher zoom levels for all branches
# and all servers and not have to wait for any lower zoom levels to build.
# SYNC_ME: Search conf.ccp_min_zoom and conf.ccp_max_zoom.
#
# DEVS: Here's the extra zooms, if you want:
# for zoom in "9" "10" "11" "12" "13" "14" "15" "16" "17" "18" "19"; do
#
# DEVS: Here's to quicker testing.
# Though you might not want to use --branch -1, either.
# for zoom in "9"; do
#
# SYNC_ME: See below; the min/max are 9/15.
# 2013.04.23: The zooms 9 through 13 take a couple of hours to complete (a
# few minutes on zooms 9 and 10, then doubling on each successive zoom).
# The zoom 14 takes 2 hours, and the zoom 15 takes 4 hours. So if we use
# three zoom groupings, each group should take a few to a four hours.
BRANCH_QUALIFIER="${CCP_INSTANCE}_${db_instance}_${branch__ed}"
LOGBASE="${CCP_LOG_CKCACHE}/tc_tiles-${BRANCH_QUALIFIER}"
#zoom_groups=("09 13" "14 14" "15 15")
#zoom_groups=("09 13")
#zoom_groups=("07 09" "10 13" "14 14" "15 15")
# MAYBE: Break into counties... the State of MN is 7x larger than MetC bbox.
#zoom_groups=("06 09" "10 11" "12 12" "13 13" "14 14" "15 15")
# 2014.09.08: [lb] doesn't want the public to be able to influence the
# way-zoomed-out tiles, which are particularly hard to make beautiful,
# especially since they use stack IDs from the database!! =)
# In the skins file, see: l_restrict_stack_ids_major_trail_05 through
# l_restrict_stack_ids_major_trail_08
if [[ ${TC_ALL_OR_CHANGED} == "--all" ]]; then
zoom_groups=("05 09" "10 11" "12 12" "13 13" "14 14" "15 15")
elif [[ ${TC_ALL_OR_CHANGED} == "--changed" ]]; then
zoom_groups=("09 11" "12 12" "13 13" "14 14" "15 15")
else
echo "Error: What's TC_ALL_OR_CHANGED?: ${TC_ALL_OR_CHANGED}"
exit 1
fi
# BUG nnnn/2014.08.25: Zoom 14 is now taking a day (Statewide MN);
# can we do a parallel build by bbox (either
# on different cores or different machines
# altogether).
# Add: --bbox
$DEBUG_TRACE && echo ""
for arr_index in ${!zoom_groups[*]}; do
# MAYBE: Do we want to keep a running logfile? Do it for now, until
# script is more mature.
# NOTE: You're going to end up with lots of logfiles...
zooms=${zoom_groups[$arr_index]}
zooms_=`echo $zooms | tr ' ' '_'`
LOG_FILE="${LOGBASE}-zooms_${zooms_}.log"
$DEBUG_TRACE && printf " %23s:%12s:%22s:%7s: starting... " \
"${CCP_INSTANCE}" "${db_instance}" "${branch__ed}" "${zooms_}"
# NOTE: We don't have to use --all: the --changed option will repopulate
# everything if nothing exists for the zoom level. (But we still
# have the check_cache_now.sh-init folder to tell us to use --all
# anyway...).
# EXPLAIN: We archive the logs later... do/should we keep them forever?
INSTANCE=${INSTANCE} \
PYTHONPATH=${ccp_python_path} \
PYSERVER_HOME=${CCP_WORKING}/pyserver \
./tilecache_update.py \
--branch "${branch_name}" \
${TC_ALL_OR_CHANGED} \
--zoom ${zooms} \
--tilecache-tiles \
> ${LOG_FILE} \
2>&1 \
&
WAITPIDS+=("${!}")
WAITLOGS+=("${LOG_FILE}")
$DEBUG_TRACE && echo "ok!"
$DTRACE_PIDS && echo "Added to WAITPIDS: ${!}"
$DTRACE_PIDS && echo WAITPIDS: ${WAITPIDS[*]}
done
}
# ***
# In bash, you fork a command simply by running it with the ampersand
# appended to the command line. (You can't really fork in the middle
# of a script; you can exec, but that's not quite the same).
#
# There are a number of ways to detect when your new, background
# processes, as they're called, complete.
#
# See
# http://stackoverflow.com/questions/1455695/forking-multi-threaded-processes-bash
#
# One method is to start all your jobs and then bring each one to the
# foreground. The fg command blocks until the job finishes, or returns
# nonzero if there are no more jobs for this session.
#
# # Wait for all parallel jobs to finish.
# while [ 1 ]; do fg 2> /dev/null; [ $? == 1 ] && break; done
#
# Another method is to use WAITPID. E.g.,
#
# sleep 3 & WAITPID=$!; wait $WAITPID
#
# Note that you can concatenate process IDs with spaces to wait on multiple.
#
# But the obvious solution is to use Bash's jobs command, which returns the
# process IDs of the jobs as a list. It's empty when the jobs are all done.
# UPDATE: That stackoverflow answer is wrong: jobs is an interactive-only
# command. From within a script it doesn't behave well, because of how child
# processes are spawned.
# http://stackoverflow.com/questions/690266/why-cant-i-use-job-control-in-a-bash-script
tilecache_updates_wait() {
time_1=$(date +%s.%N)
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && printf "Waiting for tilecache_updates after %.2F mins.\n" \
$(echo "(${time_1} - ${script_time_0}) / 60.0" | bc -l)
# Not using: wait $WAITPIDS
# since that waits on all PIDs, so it's all or nothing.
# Instead we loop through the PIDs ourselves and check
# with ps to see if each process is running.
# 2013.05.16: Let's try this: looping through an array of PIDs...
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Waiting for ${#WAITPIDS[*]} processes to complete."
while [[ ${#WAITPIDS[*]} > 0 ]]; do
NEXTPIDS=()
for cur_pid in ${WAITPIDS[*]}; do
PROCESS_DETAILS=`ps h --pid ${cur_pid}`
if [[ -n ${PROCESS_DETAILS} ]]; then
NEXTPIDS+=(${cur_pid})
else
$DTRACE_PIDS && echo "No longer running: process ID: ${cur_pid}."
time_2=$(date +%s.%N)
$DEBUG_TRACE && printf "Since started waiting: %.2F mins.\n" \
$(echo "(${time_2} - ${time_1}) / 60.0" | bc -l)
fi
done
# Nonono: WAITPIDS=${NEXTPIDS}
# This is how you copy an array:
WAITPIDS=("${NEXTPIDS[@]}")
if [[ ${#WAITPIDS[*]} > 0 ]]; then
# MAYBE: Is this loop too tight?
sleep 1
else
$DTRACE_PIDS && echo "No longer running: any child process."
fi
done
# The subprocesses might still be spewing to the terminal so hold off a sec,
# otherwise the terminal prompt might get scrolled away after the script
# exits if a child process output is still being output (and if that happens,
# it might appear to the user that this script is still running (or, more
# accurately, hung), since output is stopped but there's no prompt (until you
# hit Enter and realize that script had exited and what you're looking at is
# background process blather)).
sleep 1
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "All tilecache_update.pys complete!"
time_2=$(date +%s.%N)
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && printf "Waited for background tasks for %.2F mins.\n" \
$(echo "(${time_2} - ${time_1}) / 60.0" | bc -l)
# We kept a list of log files that the background processes to done wrote, so
# we can analyze them now for failures.
dont_exit_on_error=1
#dont_exit_on_error=0
for logfile in ${WAITLOGS[*]}; do
check_prev_cmd_for_error $? ${logfile} ${dont_exit_on_error}
done
}
# ***
report_setup_sizes() {
db_instance=$1
branch_name=$2
branch__ed=`echo $branch_name | tr ' ' '_'`
if [[ -z "${branch_name}" ]]; then
echo "Please specify the db_instance and branch_name."
exit 1
fi
INSTANCE="${db_instance}___${CCP_INSTANCE}"
#TILECACHE_CACHE=${TILECACHE_BASE}/${CCP_INSTANCE}
# Print the initial size of the cache folder.
du_resp=`du -m -s $TILECACHE_CACHE`
cache_size_init=(`echo $du_resp | tr ' ' ' '`)
arr_key="${CCP_INSTANCE}:${db_instance}:${branch__ed}"
SERVER_BRANCH_ZOOM_SIZES[${arr_key}]=${cache_size_init[0]}
# $DEBUG_TRACE && echo \
# " || ${CCP_INSTANCE}:${db_instance}:${branch__ed}:" \
# " || ${cache_size_init[0]} Mb."
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && printf " %33s:%12s:%22s: %5d Mb.\n" \
"${CCP_INSTANCE}" "${db_instance}" "${branch__ed}" "${cache_size_init[0]}"
}
report_delta_sizes() {
db_instance=$1
branch_name=$2
branch__ed=`echo $branch_name | tr ' ' '_'`
if [[ -z "${branch_name}" ]]; then
echo "Please specify the db_instance and branch_name."
exit 1
fi
INSTANCE="${db_instance}___${CCP_INSTANCE}"
#TILECACHE_CACHE=${TILECACHE_BASE}/${CCP_INSTANCE}
du_resp=`du -m -s $TILECACHE_CACHE`
cache_size_last=(`echo $du_resp | tr ' ' ' '`)
arr_key="${CCP_INSTANCE}:${db_instance}:${branch__ed}"
cache_size_early=${SERVER_BRANCH_ZOOM_SIZES[${arr_key}]}
cache_size_delta=$((${cache_size_last[0]} - $cache_size_early))
# $DEBUG_TRACE && echo \
# "Final tilecache size ${CCP_INSTANCE}:${db_instance}:${branch__ed}:" \
# "${cache_size_delta} Mb."
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && printf " %33s:%12s:%22s: %5d Mb. +\n" \
"${CCP_INSTANCE}" "${db_instance}" "${branch__ed}" "${cache_size_delta[0]}"
}
# ===========================================================================
# Application "main"
# *** Start of application...
$DEBUG_TRACE && echo "Welcome to check_cache_now!"
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "No. of server-branches: ${ccp_number_servers}"
$DEBUG_TRACE && echo ""
# Make an associate array to hold the directory sizes before.
declare -A SERVER_BRANCH_ZOOM_SIZES
# Pre-process each branch: check the latest revision ID and skip
# doing any work if the branch's latest rid is unchanged.
$DEBUG_TRACE && echo "Checking branches' last revision IDs..."
$DEBUG_TRACE && echo ""
CCP_WORKTODO_BRANCHES=()
arr2_fcn_iter 'instance_branch_check_rid' \
${ccpsb_cols_per_row} CCP_INSTANCE_BRANCHES[@]
# MAYBE: Do we skip the bbox computation if the revision hasn't changed?
# Or do we waste a few seconds on each branch computing the bbox?
if ! $SKIP_CONFIG_FILES; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Creating and installing config files..."
# This is a sneaky way in Bash to pass an array as an argument.
# Use the array's name! In the fcn we're calling, it'll use the
# bang operator to resolve the string we're sending to the array.
arr2_fcn_iter 'instance_config_write' \
${ccpsb_cols_per_row} CCP_WORKTODO_BRANCHES[@]
fi
if ! $SKIP_REPORT_SIZES; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Initial tilecache directory sizes:"
arr2_fcn_iter 'report_setup_sizes' \
${ccpsb_cols_per_row} CCP_WORKTODO_BRANCHES[@]
fi
if ! $SKIP_TILECACHE_CACHE; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Updating database caches w/ bg processes..."
WAITPIDS=()
WAITLOGS=()
arr2_fcn_iter 'instance_update_cache' \
${ccpsb_cols_per_row} CCP_WORKTODO_BRANCHES[@]
# Wait for child processes.
tilecache_updates_wait
fi
# FIXME:
if ! $SKIP_TILECACHE_TILES; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Recreating tilecache tiles w/ bg processes..."
WAITPIDS=()
WAITLOGS=()
arr2_fcn_iter 'branch_update_tiles' \
${ccpsb_cols_per_row} CCP_WORKTODO_BRANCHES[@]
# Wait for child processes.
tilecache_updates_wait
fi
if ! $SKIP_REPORT_SIZES; then
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo "Delta tilecache directory sizes:"
arr2_fcn_iter 'report_delta_sizes' \
${ccpsb_cols_per_row} CCP_WORKTODO_BRANCHES[@]
fi
# FIXME: Split branches from zooms, i.e., right now, sequence is:
# basemap 9 to 15, metc 9 to 15, statewide 9 to 15
# maybe in cron:
# run script for each installation for each branch,
# maybe for 14 and 15 and 16 and 17 separately...
# ===========================================================================
# Dump the database and whack the lock.
# Don't continue if we can't get our dump lock -- a developer will have to
# clear this up if we can't.
# HACK: This gives ${TC_ALL_OR_CHANGED} a second meaning:
if [[ ${TC_ALL_OR_CHANGED} == "--all" ]]; then
DONT_FLOCKING_CARE=0
FLOCKING_REQUIRED=1
NUM_FLOCKING_TRIES=-1
FLOCKING_TIMELIMIT=3600
flock_dir \
"${CHECK_CACHE_NOW_DUMPDIR}" \
${DONT_FLOCKING_CARE} \
${FLOCKING_REQUIRED} \
${NUM_FLOCKING_TRIES} \
${FLOCKING_TIMELIMIT}
if [[ $? -eq 0 ]]; then
SKIP_UNLOCK_CHECK_CACHE_NOW_DUMPDIR=0
fi
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && echo `date`
$DEBUG_TRACE && echo "Dumping tilecache'd database: ${CCP_DB_NAME}"
#
TCC_DUMP_FILE=${CCP_DBDUMPS}/${CCP_DB_NAME}.dump
touch ${TCC_DUMP_FILE} &> /dev/null
if [[ $? -ne 0 ]]; then
echo ""
echo "=============================================="
echo "ERROR: The dump file is not writeable."
echo "Hey, you, DEV: This is certainly _your_ fault."
echo "Try: chmod 2777 ${TCC_DUMP_FILE}"
echo "=============================================="
echo ""
exit 1
else
pg_dump ${HOSTNAME_SWITCH} -U cycling ${CCP_DB_NAME} -Fc -E UTF8 \
> ${TCC_DUMP_FILE}
# 2013.05.22: Weird: "Why is the dump owned by me, landonb?"
# /bin/chmod: changing permissions of `/ccp/var/dbdumps/ccpv2_tcc.dump':
# Operation not permitted
/bin/chmod 664 ${TCC_DUMP_FILE}
fi
# Release the dump lock.
# /bin/rmdir "${CHECK_CACHE_NOW_DUMPDIR}" &> /dev/null
do_unlock_dump_locks
fi
# ===========================================================================
# Archive all our logs
# Store to, i.e., /ccp/var/log/daily/cache_
ARCHIVE_NAME=${CCP_ZIP_CKCACHE}/ckcache-`date +%Y_%m_%d_%Hh%Mm`
$DEBUG_TRACE && echo ""
$DEBUG_TRACE && \
echo "Archiving cache for ${script_name}: ${ARCHIVE_NAME}.tar.gz"
# Collect files from, i.e., /ccp/var/log/daily/cache
# NOTE: If you use the full path than the tarball includes the dir. ancestry.
# tar -czf ${ARCHIVE_NAME}.tar.gz ${CCP_LOG_CKCACHE}/* \
# > ${ARCHIVE_NAME}.log 2>&1
cd ${CCP_LOG_DAILY}
tar -czf ${ARCHIVE_NAME}.tar.gz cache/* \
> ${ARCHIVE_NAME}.log 2>&1
# ===========================================================================
# Release lockdown
unlock_all_locks
# ===========================================================================
# Print elapsed time
script_finished_print_time
# ===========================================================================
# All done.
exit 0
# ***
|
var classarmnn_1_1_elementwise_base_layer =
[
[ "ElementwiseBaseLayer", "classarmnn_1_1_elementwise_base_layer.xhtml#a6aa67447c0f7d3aa62124c535e5b550b", null ],
[ "~ElementwiseBaseLayer", "classarmnn_1_1_elementwise_base_layer.xhtml#a70745c21e09e36c3a83aff0c9848514c", null ],
[ "InferOutputShapes", "classarmnn_1_1_elementwise_base_layer.xhtml#a65ca562c882ad619684445a1402f415a", null ],
[ "ValidateTensorShapesFromInputs", "classarmnn_1_1_elementwise_base_layer.xhtml#a8c8f543d7e9729362c266d12ec169966", null ]
]; |
<filename>src/main/java/com/alipay/api/domain/AlipayEcoRebateBalanceSendModel.java<gh_stars>0
package com.alipay.api.domain;
import java.util.List;
import com.alipay.api.AlipayObject;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.internal.mapping.ApiListField;
/**
* 更新可领取的集分宝余额
*
* @author auto create
* @since 1.0, 2021-11-01 11:21:18
*/
public class AlipayEcoRebateBalanceSendModel extends AlipayObject {
private static final long serialVersionUID = 1759293464331426846L;
/**
* 用户订单明细
*/
@ApiListField("items")
@ApiField("rebate_good")
private List<RebateGood> items;
/**
* 商户侧的用户订单id
*/
@ApiField("order_id")
private String orderId;
/**
* 用户支付宝唯一标识,2088开头。
*/
@ApiField("user_id")
private String userId;
public List<RebateGood> getItems() {
return this.items;
}
public void setItems(List<RebateGood> items) {
this.items = items;
}
public String getOrderId() {
return this.orderId;
}
public void setOrderId(String orderId) {
this.orderId = orderId;
}
public String getUserId() {
return this.userId;
}
public void setUserId(String userId) {
this.userId = userId;
}
}
|
def fibonacci(num: Int): Int = {
if (num <= 1)
num
else
fibonacci(num - 1) + fibonacci(num - 2)
}
def printFibo(num: Int) = {
println(s"$num numbers of Fibonacci series: ");
for (i <- 0 to num-1) {
println(fibonacci(i));
}
}
printFibo(20) |
#!/bin/bash
[ -n "$DEBUG" ] && set -x
set -o errexit
# working environment
export WORKSPACE=${WORKSPACE:-$(pwd)}
export TF_CONFIG_DIR="${WORKSPACE}/.tf"
export TF_DEVENV_PROFILE="${TF_CONFIG_DIR}/dev.env"
[ -e "$TF_DEVENV_PROFILE" ] && source "$TF_DEVENV_PROFILE"
# determined variables
export DISTRO=$(cat /etc/*release | egrep '^ID=' | awk -F= '{print $2}' | tr -d \")
# working build directories
export SRC_ROOT=${SRC_ROOT:-}
export CONTRAIL_DIR="${SRC_ROOT:-${WORKSPACE}/contrail}"
# build environment preparation options
export REGISTRY_PORT=${REGISTRY_PORT:-5000}
export REGISTRY_IP=${REGISTRY_IP:-'localhost'}
export RPM_REPO_IP=${RPM_REPO_IP:-}
export RPM_REPO_PORT=${RPM_REPO_PORT:-'6667'}
export REGISTRY_CONTAINER_NAME=${REGISTRY_CONTAINER_NAME:-"tf-dev-env-registry"}
export RPM_CONTAINER_NAME=${RPM_CONTAINER_NAME:-"tf-dev-env-rpm-repo"}
export TF_DEVENV_CONTAINER_NAME=${TF_DEVENV_CONTAINER_NAME:-"tf-developer-sandbox"}
export CONTRAIL_PARALLEL_BUILD=${CONTRAIL_PARALLEL_BUILD:-true}
# tf-dev-env sandbox parameters
export IMAGE=${IMAGE:-"tungstenfabric/developer-sandbox"}
export DEVENVTAG=${DEVENVTAG:-"latest"}
export DEVENV_IMAGE=${IMAGE}:${DEVENVTAG}
# RHEL specific build options
export ENABLE_RHSM_REPOS=${ENABLE_RHSM_REPOS:-1}
# versions info
export CONTRAIL_CONTAINER_TAG=${CONTRAIL_CONTAINER_TAG:-'dev'}
export VENDOR_NAME="Tungsten Fabric"
export VENDOR_DOMAIN="tungsten.io"
|
package io.hnfmr.chapter4
import cats.Id
object MonadEx extends App {
def pure[A](value: A): Id[A] = value
def map[A, B](ia: Id[A])(f: A => B): Id[B] = f(ia)
def flatMap[A, B](ia: Id[A])(f: A => Id[B]): Id[B] = f(ia)
}
|
<filename>user_tools/tiled_csv_to_tilemap.py
# Turns a .csv file exported from Tiled into a .tilemap text file for use with our Tile-RAM.
# This program is very strict about the format of the input file.
# The input file must be:
# * A .csv file.
# * Each comma separated entry has a single 32-bit integer.
# * The first 3 MSBs of the 32-bit integers are reserved, and the top of those two indicate the
# * horizontal mirror and vertical mirror. The 3rd MSB should be 0, as rotations are incompatible
# * with FP-GAme.
# Tiled can be found here:
# https://www.mapeditor.org/
import sys
import csv
def main(input_path, output_path, palette_ID):
fr = open(input_path, 'r')
csv_reader = csv.reader(fr, delimiter=',')
fw = open(output_path + ".tilemap", 'w')
# read file into list
# [ [tile-row 0 tiles], [tile-row 1 tiles], ..., [tile-row 63] ]
data_2d = list(csv_reader)
if (len(data_2d) == 0 or len(data_2d[0]) == 0):
print("Malformed .csv file!")
quit()
# Iterate through each tile entry and find three things:
# 1. Tile ID Reference
# 2. Horizontal mirror bit
# 3. Vertical mirror bit
# (and while we are at it, notify the user of any incompatible rotated tiles)
for row in range(len(data_2d)):
for col in range(len(data_2d[0])):
entry = int(data_2d[row][col])
pattern_addr = entry & 0x1FFFFFFF
h = (entry & 0x80000000) > 0
v = (entry & 0x40000000) > 0
r = (entry & 0x20000000) > 0
mirror = (v << 1) | h
if r:
print("ERROR: Rotated Tile at row %d column %d!" % (row, entry))
print("Fix this by ensuring there are no rotated tiles!")
# Write entry:
prepend_space = " " if (col != 0) else ""
append_newline = "\n" if (col == len(data_2d[0]) - 1 and row != len(data_2d) - 1) else ""
fw.write("%s(%03X,%X,%X)%s" % (prepend_space, pattern_addr, int(palette_ID), mirror,
append_newline))
if __name__ == "__main__":
if len(sys.argv) == 4:
main(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print("Expecting 3 arguments: <src .csv file>, <dest filename no extension>, and "
"<palette_ID>")
|
#!/bin/bash
echo "📄 Creating upfile..."
# Prompt the user to enter the file name
read -p "Enter the file name: " fileName
# Validate the file name to ensure it does not contain any special characters or spaces
if [[ $fileName =~ ^[a-zA-Z0-9_]+$ ]]; then
# Prompt the user to enter the content of the file
read -p "Enter the content of the file: " fileContent
# Create a file with the provided name and content
echo "$fileContent" > "$fileName"
echo "File '$fileName' created successfully."
else
echo "Invalid file name. File name should only contain alphanumeric characters and underscores."
fi |
<reponame>geoand/quarkus-config-extensions<gh_stars>1000+
package io.quarkus.consul.config.runtime;
import java.util.Collections;
import org.eclipse.microprofile.config.spi.ConfigSource;
import org.eclipse.microprofile.config.spi.ConfigSourceProvider;
import org.jboss.logging.Logger;
import io.quarkus.runtime.RuntimeValue;
import io.quarkus.runtime.annotations.Recorder;
@Recorder
public class ConsulConfigRecorder {
private static final Logger log = Logger.getLogger(ConsulConfigRecorder.class);
final ConsulConfig consulConfig;
public ConsulConfigRecorder(ConsulConfig consulConfig) {
this.consulConfig = consulConfig;
}
public RuntimeValue<ConfigSourceProvider> configSources() {
if (!consulConfig.enabled) {
log.debug(
"No attempt will be made to obtain configuration from Consul because the functionality has been disabled via configuration");
return emptyRuntimeValue();
}
return new RuntimeValue<>(
new ConsulConfigSourceProvider(consulConfig));
}
private RuntimeValue<ConfigSourceProvider> emptyRuntimeValue() {
return new RuntimeValue<>(new EmptyConfigSourceProvider());
}
private static class EmptyConfigSourceProvider implements ConfigSourceProvider {
@Override
public Iterable<ConfigSource> getConfigSources(ClassLoader forClassLoader) {
return Collections.emptyList();
}
}
}
|
def validate_password(password):
# Check the length of the password
if len(password) < 8:
return False
# Check for at least 1 uppercase
if not any(letter.isupper() for letter in password):
return False
# Check for at least 1 lowercase
if not any(letter.islower() for letter in password):
return False
# Check for at least 1 digit
if not any(digit.isdigit() for digit in password):
return False
# Check for at least 1 special character
if not any(letter in '!#$%&@^*' for letter in password):
return False
# Password passes all tests
return True |
<html>
<head>
<title>Best Programming Practices</title>
</head>
<body>
<h1>Best Programming Practices</h1>
<h2>By John Smith</h2>
<h3>May 8, 2021</h3>
<p>Learning good programming practices is essential for writing efficient, maintainable code. This post will provide some tips for good programming practices.</p>
</body>
</html> |
#!/bin/bash
cd "$NiftyMatch_BUILD_DIR"
make install
rm -rf "$Test_REPO_DIR"
git clone git@cmiclab.cs.ucl.ac.uk:GIFT-Surg/NiftyMatch-Test.git "$Test_REPO_DIR" --branch dev
rm -rf "$Test_BUILD_DIR"
mkdir -p "$Test_BUILD_DIR"
cd "$Test_BUILD_DIR"
cmake -D NiftyMatch_DIR="$NiftyMatch_DIR" -D BUILD_TESTS=ON "$Test_SOURCE_DIR"
make -j
|
import React, {useState} from 'react';
import axios from 'axios';
const App = () => {
const [inputA, setInputA] = useState('');
const [inputB, setInputB] = useState('');
const submitInputs = async () => {
const data = {
inputA: inputA,
inputB: inputB
};
try {
const response = await axios.post('https://example.com/api/endpoint', data);
alert(response.data.message);
} catch (err) {
console.log(err);
alert('Something went wrong!');
}
};
return (
<div>
<div>
<input value={inputA} onChange={e => setInputA(e.target.value)} />
</div>
<div>
<input value={inputB} onChange={e => setInputB(e.target.value)} />
</div>
<div>
<button onClick={submitInputs}>Submit</button>
</div>
</div>
);
};
export default App; |
const cloud = require('wx-server-sdk')
cloud.init({
traceUser: true,
})
const md5 = require('md5-node')
const db = cloud.database()
const usersTable = db.collection("jingzhi-user")
const _ = db.command
// 云函数入口函数
exports.main = async (event, context) => {
console.log(event)
const wxContext = cloud.getWXContext()
//更新当前信息
if (event.update == true) {
try {
return await usersTable.doc(md5(wxContext.OPENID)).update({
data: {
userData: _.set(event.userData)
},
})
} catch (e) {
console.error(e)
}
} else if (event.getSelf == true) {
//获取当前用户信息
try {
return await usersTable.doc(md5(wxContext.OPENID)).field({
openid: false
}).get()
} catch (e) {
console.error(e)
}
} else if (event.setSelf == true) {
//添加当前用户信息
try {
return await usersTable.add({
data: {
_id: md5(wxContext.OPENID),
openid: wxContext.OPENID,
userData: event.userData,
boughtList: [],
messageList: [],
ontransList: []
}
})
} catch (e) {
console.error(e)
}
} else if (event.getOthers == true) {
//获取指定用户信息
try {
return await usersTable.doc(event.userId).field({
userData: true
}).get()
} catch (e) {
console.error(e)
}
}
}
|
<filename>src/4-Database-Git/Exercises/3-API-with-Mongoose/src/Routes/StudentRoute.js
import { Router } from 'express'
import { docs } from './docs.js'
import { Student } from '../Models/Student.js'
import { generateID } from '../utils/generateID.js'
export const studentRouter = Router()
.get('/', docs)
.post('/create', async (req, res, next) => {
try {
const id = await generateID()
const student = new Student({
_id: id,
...req.body,
})
await student.save()
res.send(student)
} catch (err) {
next(err)
}
})
.get('/retrieve/all', async (_, res, next) => {
try {
const result = await Student.find({})
res.send({ results: result })
} catch (err) {
next(err)
}
})
.get('/retrieve/:id', async (req, res, next) => {
try {
const { id } = req.params
const result = await Student.find({ _id: id }).limit(1)
res.send({ result: result })
} catch (err) {
next(err)
}
})
.patch('/update/:id', async (req, res, next) => {
const { id } = req.params
try {
const result = await Student.findByIdAndUpdate(
id,
{ lastModified: new Date(), ...req.body },
{
new: true,
}
)
if (result == null) throw new Error('Invalid ID')
res.send({ newObject: result })
} catch (err) {
next(err)
}
})
.delete('/delete/:id', async (req, res, next) => {
const { id } = req.params
try {
const result = await Student.findByIdAndDelete(id)
if (!result) throw new Error('Invalid ID!')
res.send({ ok: true })
} catch (err) {
next(err)
}
})
.use((err, req, res, next) => {
res.status(400).send({ error: err.message })
console.error(err.message)
})
|
<gh_stars>10-100
package protobuf
import (
"bufio"
"context"
"encoding/binary"
"fmt"
"io"
"github.com/christianalexander/kvdb/stores"
"github.com/gogo/protobuf/proto"
)
type protoReader struct {
reader io.Reader
}
func NewReader(reader io.Reader) stores.Reader {
return protoReader{reader}
}
func (r protoReader) Read(ctx context.Context, records chan<- stores.Record) error {
br := bufio.NewReader(r.reader)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
l, err := binary.ReadUvarint(br)
if err != nil {
return fmt.Errorf("failed to read from record file: %v", err)
}
buf := make([]byte, l)
br.Read(buf)
var record Record
err = proto.Unmarshal(buf, &record)
if err != nil {
return fmt.Errorf("failed to unmarshal record: %v", err)
}
records <- *record.ToRecord()
}
}
}
|
<filename>src/cts-utils.h
/*
* Contacts Service
*
* Copyright (c) 2010 - 2012 Samsung Electronics Co., Ltd. All rights reserved.
*
* Contact: <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef __CTS_UTILS_H__
#define __CTS_UTILS_H__
#include <stdbool.h>
#define CTS_IMG_PATH_SIZE_MAX 1024
#define CTS_IMAGE_LOCATION "/opt/data/contacts-svc/img"
#define CTS_VCARD_IMAGE_LOCATION "/opt/data/contacts-svc/img/vcard"
#define CTS_GROUP_IMAGE_LOCATION "/opt/data/contacts-svc/img/group"
#define CTS_MY_IMAGE_LOCATION "/opt/data/contacts-svc/img/my"
#define CTS_NOTI_CONTACT_CHANGED_DEF "/opt/data/contacts-svc/.CONTACTS_SVC_DB_CHANGED"
void cts_deregister_noti(void);
void cts_register_noti(void);
int cts_get_default_language(void);
void cts_set_contact_noti(void);
void cts_set_plog_noti(void);
void cts_set_missed_call_noti(void);
void cts_set_favor_noti(void);
void cts_set_speed_noti(void);
void cts_set_addrbook_noti(void);
void cts_set_group_noti(void);
void cts_set_group_rel_noti(void);
void cts_set_link_noti(void);
int cts_exist_file(char *path);
int cts_convert_nicknames2textlist(GSList *src, char *dest, int dest_size);
GSList* cts_convert_textlist2nicknames(char *text_list);
int cts_increase_outgoing_count(int contact_id);
int cts_get_next_ver(void);
int cts_update_contact_changed_time(int contact_id);
int cts_contact_delete_image_file(int img_type, int index);
int cts_contact_add_image_file(int img_type, int index, char *src_img, char *dest_name, int dest_size);
int cts_contact_update_image_file(int img_type, int index, char *src_img, char *dest_name, int dest_size);
char* cts_get_img(const char *dir, int index, char *dest, int dest_size);
int cts_set_img(const char *dir, int index, const char *path);
#ifndef __CONTACTS_SVC_H__
//<!--
/**
* This function starts database transaction
* If you want to handle a transaction, use it.
*
* @par Multiple inserting case
* case1 has only one DB commit. Therefore it is faster than case 2.
* And if 5th inserted contact is failed,
* case 1 insert nothing but case 2 insert 1,2,3 and 4th contacts.
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
* @code
* //case 1
* contacts_svc_begin_trans();
* for(i = 0; i< 20; i++) {
* if(CTS_SUCCESS != "insert api") {
* contacts_svc_end_trans(false);
* return -1;
* }
* }
* ret = contacts_svc_end_trans(true);
* if(ret < CTS_SUCCESS){
* printf("all work were rollbacked");
* return;
* }
*
* //case 2
* for(i = 0; i< 20; i++) {
* if(CTS_SUCCESS != "insert api") {
* return -1;
* }
* }
* @endcode
*/
int contacts_svc_begin_trans(void);
/**
* This function finishes database transaction of contacts service
* If you want to handle a transaction, use it.
* If returned value is error, the transaction was rollbacked.
* When transaction is success, it returns the last contacts version.
*
* @param[in] is_success true : commit, false : rollback
* @return #CTS_SUCCESS or the last contact version(when success) on success,
* Negative value(#cts_error) on error
*/
int contacts_svc_end_trans(bool is_success);
/**
* A kind of order in contacts service of contacts service
* @see contacts_svc_get_order()
*/
typedef enum{
CTS_ORDER_NAME_FIRSTLAST = 0, /**<First Name first */
CTS_ORDER_NAME_LASTFIRST = 1 /**<Last Name first */
}cts_order_type;
/**
* Use for contacts_svc_get_order().
*/
typedef enum{
CTS_ORDER_OF_SORTING, /**< Sorting Order */
CTS_ORDER_OF_DISPLAY /**< Display Order */
}cts_order_op;
/**
* This function gets the display or sorting order(Firstname first or LastName first)
*
* @param[in] op_code #cts_order_op
* @return #CTS_ORDER_NAME_FIRSTLAST or #CTS_ORDER_NAME_LASTFIRST on success,
* \n Negative value(#cts_error) on error
*/
cts_order_type contacts_svc_get_order(cts_order_op op_code);
/**
* This function sets the display or sorting order(Firstname first or LastName first)
*
* @param[in] op_code #cts_order_op
* @param[in] order order type(#cts_order_type)
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_set_order(cts_order_op op_code, cts_order_type order);
/**
* Use for contacts_svc_subscribe_change(), contacts_svc_unsubscribe_change()
*/
typedef enum{
CTS_SUBSCRIBE_CONTACT_CHANGE,
CTS_SUBSCRIBE_PLOG_CHANGE,
CTS_SUBSCRIBE_FAVORITE_CHANGE,
CTS_SUBSCRIBE_GROUP_CHANGE,
CTS_SUBSCRIBE_SPEEDDIAL_CHANGE,
CTS_SUBSCRIBE_ADDRESSBOOK_CHANGE,
CTS_SUBSCRIBE_MISSED_CALL_CHANGE,
CTS_SUBSCRIBE_LINK_CHANGE,
CTS_SUBSCRIBE_GROUP_RELATION_CHANGE /**< This is only for OSP. We cannot guarantee action for your use */
}cts_subscribe_type;
/**
* This function watchs contacts service changes.
* The notification is sent once per a transaction.
* This is handled by default context of g_main_loop.
*
* @param[in] noti_type A kind of Notification
* @param[in] cb callback function pointer
* @param[in] user_data data which is passed to callback function
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
* @par example
* @code
#include <stdio.h>
#include <glib.h>
#include <contacts-svc.h>
void test_callback(void *data)
{
printf("Contact data of contacts service is changed\n");
}
int main()
{
GMainLoop *loop;
contacts_svc_subscribe_change(CTS_SUBSCRIBE_CONTACT_CHANGE, test_callback, NULL);
loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop);
contacts_svc_unsubscribe_change(CTS_SUBSCRIBE_CONTACT_CHANGE, test_callback);
g_main_loop_unref(loop);
return 0;
}
* @endcode
*/
int contacts_svc_subscribe_change(cts_subscribe_type noti_type,
void (*cb)(void *), void *user_data);
/**
* This function stops to watch contacts service changes.
* @param[in] noti_type A kind of Notification(#cts_subscribe_type)
* @param[in] cb callback function which is added by contacts_svc_subscribe_change()
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_unsubscribe_change(cts_subscribe_type noti_type,
void (*cb)(void *));
/**
* This function delete a callback function which is specified with user_data.
* @param[in] noti_type A kind of Notification(#cts_subscribe_type)
* @param[in] cb The callback function which is added by contacts_svc_subscribe_change()
* @param[in] user_data The user_data which is added by contacts_svc_subscribe_change()
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_unsubscribe_change_with_data(cts_subscribe_type noti_type,
void (*cb)(void *), void *user_data);
/**
* Use for contacts_svc_count()
*/
typedef enum
{
CTS_GET_ALL_CONTACT, /**< The count of contacts in the all addressbook */
CTS_GET_COUNT_SDN, /**< The count of SDN(Service Dialing Number) in SIM */
CTS_GET_ALL_PHONELOG, /**< The count of all phonelog */
CTS_GET_UNSEEN_MISSED_CALL, /**< The count of unseen missed call */
CTS_GET_INCOMING_CALL, /**< The count of incomming call */
CTS_GET_OUTGOING_CALL, /**< The count of outgoing call */
CTS_GET_MISSED_CALL, /**< The count of missed call */
CTS_GET_COUNT_ALL_GROUP, /**< The count of groups */
}cts_count_op;
/**
* This function gets count related with op_code.
*
* @param[in] op_code #cts_count_op
* @return The count number on success, Negative value(#cts_error) on error
*/
int contacts_svc_count(cts_count_op op_code);
/**
* Use for contacts_svc_count_with_int()
*/
typedef enum
{
CTS_GET_COUNT_CONTACTS_IN_ADDRESSBOOK, /**< The count of contacts in the addressbook related to index(search_value) */
CTS_GET_COUNT_CONTACTS_IN_GROUP, /**< The count of contacts in the group related to index(search_value) */
CTS_GET_COUNT_NO_GROUP_CONTACTS_IN_ADDRESSBOOK, /**< The count of not assigned contacts in the addressbook related to index(search_value) */
CTS_GET_COUNT_GROUPS_IN_ADDRESSBOOK /**< The count of groups in the addressbook related to index(search_value) */
}cts_count_int_op;
/**
* This function gets count related with op_code and search_value.
* \n #search_value is related with op_code. The Word after preposition is a property of search_value.
*
* @param[in] op_code #cts_count_int_op
* @param[in] search_value interger value(almost a related index) for searching
* @return The count number on success, Negative value(#cts_error) on error
*/
int contacts_svc_count_with_int(cts_count_int_op op_code, int search_value);
/**
* Use for contacts_svc_save_image()
*/
typedef enum
{
CTS_IMG_NORMAL, /**< . */
CTS_IMG_FULL, /**< . */
} cts_img_t;
/**
* This function saves image to contacts service domain.
*
* @param[in] img_type #cts_img_t
* @param[in] index index of contact
* @param[in] src_img The image path to copy(Should include extension at path)
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_save_image(cts_img_t img_type, int index, char *src_img);
/**
* This function gets image from contacts service domain.
* Usually, You can get the #CTS_IMG_NORMAL in Contacts Struct(#CTSstruct).
*
* @param[in] img_type #cts_img_t
* @param[in] index index of contact
* @param[in] img_path The pointer of getting image path(should be freed by using free())
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_get_image(cts_img_t img_type, int index, char **img_path);
/**
* This function imports sim phonebook.
*
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_import_sim(void);
/**
* This function exports sim phonebook.
* @param[in] index index of contact
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
*/
int contacts_svc_export_sim(int index);
/**
* This function sets the outgoing count of the contact to zero.
*
* @param[in] person_id The index of person
* @return #CTS_SUCCESS on success, Negative value(#cts_error) on error
* @see contacts_svc_get_list(), #CTS_LIST_OFTEN_USED_CONTACT
*/
int contacts_svc_reset_outgoing_count(int person_id);
//-->
#endif //#ifndef __CONTACTS_SVC_H__
#endif //__CTS_UTILS_H__
|
package study.business.domain.model.person;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import java.io.Serializable;
import java.util.UUID;
@Getter
@Setter
@ToString
@NoArgsConstructor
public class PersonDeletedEvent implements Serializable {
private String id = UUID.randomUUID().toString();
private Person person;
public PersonDeletedEvent(Person person) {
this.person = person;
}
}
|
package string_handle;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
/**
*
* @author minchoba
* 백준 11575번: Affine Cipher
*
* @see https://www.acmicpc.net/problem/11575/
*
*/
public class Boj11575 {
private static final String NEW_LINE = "\n";
private static final char I_TO_C = 'A';
private static final int MOD = 26;
public static void main(String[] args) throws Exception{
// 버퍼를 통한 값 입력
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringBuilder sb = new StringBuilder();
int T = Integer.parseInt(br.readLine());
while(T-- > 0) {
StringTokenizer st = new StringTokenizer(br.readLine());
int a = Integer.parseInt(st.nextToken());
int b = Integer.parseInt(st.nextToken());
String s = br.readLine();
for(char word : s.toCharArray()) {
char tmp = (char) (((a * (word - I_TO_C) + b) % MOD) + I_TO_C); // (a * X + B) % MOD 를 계산 후 문자형으로 변형
sb.append(tmp); // 변형된 문자를 버퍼에 담음
}
sb.append(NEW_LINE);
}
System.out.println(sb.toString()); // 결과 값 한번에 출력
}
}
|
<filename>dataset.py<gh_stars>1-10
#!/usr/bin/env python
import os
import numpy as np
import PIL.Image
import torch
from torch.utils import data
class MyData(data.Dataset):
"""
对训练集数据进行获取/处理操作
"""
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def __init__(self, root, transform=True):
super(MyData, self).__init__()
self.root = root
self._transform = transform
img_root = os.path.join(self.root, 'Image_after')
gt_root = os.path.join(self.root, 'Mask')
file_imgnames = os.listdir(img_root)
self.img_names = []
self.gt_names = []
self.names = []
# 只选择jpg的图像, 以及对应的真实标注
for i, name in enumerate(file_imgnames):
if not name.endswith('.jpg'):
continue
self.img_names.append(
os.path.join(img_root, name[:-4] + '.jpg')
)
self.gt_names.append(
os.path.join(gt_root, name[:-4] + '.png')
)
# 汇总最后保存的名字
self.names.append(name[:-4])
def __len__(self):
# 定义len(MyData)的效果
return len(self.img_names)
def __getitem__(self, index):
# 定义索引效果, 依次载入图像, 调整大小, 对真值图像进行二值化
img_file = self.img_names[index]
img = PIL.Image.open(img_file)
img = img.resize((224, 224))
img = np.array(img, dtype=np.uint8)
gt_file = self.gt_names[index]
gt = PIL.Image.open(gt_file)
gt = gt.resize((224, 224))
gt = np.array(gt, dtype=np.int32)
gt[gt != 0] = 1
# 为了防止输入的数据不是三个通道, 或者真实标注是大于一个通道.
if len(img.shape) < 3:
img = np.stack((img, img, img), axis=2)
if img.shape[2] > 3:
img = img[:, :, :3]
if len(gt.shape) > 2:
gt = gt[:, :, 0]
if self._transform:
img, gt = self.transform(img, gt)
return img, gt
else:
return img, gt
def transform(self, img, gt):
"""
对于图像数据进行处理.
图片数据归一化, 调整维度, 转化为tensor.
真实数据直接转化为tensor.
:param img: 训练集图片
:param gt: 真实标注
:return: 调整后的图片与真实标注
"""
img = img.astype(np.float64) / 255
img -= self.mean
img /= self.std
img = img.transpose(2, 0, 1)
# Creates a Tensor from a numpy.ndarray.
img = torch.from_numpy(img).float()
gt = torch.from_numpy(gt).float()
return img, gt
class MyTestData(data.Dataset):
"""
对测试集数据进行读取
root: director/to/images/
structure:
- root
- images
- images (images here)
- masks (ground truth)
"""
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def __init__(self, root, transform=True):
super(MyTestData, self).__init__()
self.root = root
self._transform = transform
# 仅获取验证集图像
img_root = os.path.join(self.root, 'Image')
file_names = os.listdir(img_root)
self.img_names = []
self.names = []
for i, name in enumerate(file_names):
if not name.endswith('.jpg'):
continue
self.img_names.append(
os.path.join(img_root, name[:-4] + '.jpg')
)
self.names.append(name[:-4])
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
# load image
img_file = self.img_names[index]
img = PIL.Image.open(img_file)
img_size = img.size
img = img.resize((224, 224))
img = np.array(img, dtype=np.uint8)
# 为了防止输入的数据不是三个通道, 或者真实标注是大于一个通道.
if len(img.shape) < 3:
img = np.stack((img, img, img), axis=2)
if img.shape[2] > 3:
img = img[:, :, :3]
if self._transform:
img = self.transform(img)
return img, self.names[index], img_size
else:
return img, self.names[index], img_size
def transform(self, img):
img = img.astype(np.float64) / 255
img -= self.mean
img /= self.std
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
return img
|
<gh_stars>10-100
import asyncio
import os
import re
import time
from bleak import discover, BleakClient
from .exceptions import MirobotError, MirobotAlarm, MirobotReset, InvalidBluetoothAddressError
os_is_posix = os.name == 'posix'
def chunks(lst, n):
"""Yield successive n-sized chunks from lst.
Parameters
----------
lst : Collection
An iterable of items.
n : int
The size of the chunks to split the list into.
Returns
-------
result : Generator[List]
A generator that yields each chunk of the list.
"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
class BluetoothLowEnergyInterface:
"""
An interface for talking to the low-energy Bluetooth extender module for the Mirobot.
NOTE: This mode is inherently instable at the moment (@rirze, Thu 14 May 2020). Sometimes commands may not be parsed correctly, causing execution to fail on a misparsing error. While this happens rarely, users should be made aware of the potential exceptions that may arise. It is recommended to only use this connection when serial communication is unavailable.
"""
def __init__(self, mirobot, address=None, debug=False, logger=None, autofindaddress=True):
"""
Parameters
----------
mirobot : `mirobot.base_mirobot.BaseMirobot`
Mirobot object that this instance is attached to.
address : str
(Default value = None) Bluetooth address of the Mirobot's bluetooth extender module to connect to. If unknown, leave as `None` and this class will automatically scan and try to find the box on its own. If provided, it should be of the form `50:33:8B:L4:95:6X` (except on Apple products which use a format like `123JKDSF-F0E3-F96A-F0A3-64A68508A53C`)
debug : bool
(Default value = False) Whether to show debug statements in logger.
logger : Logger
(Default value = None) Logger instance to use for this class. Usually `mirobot.base_mirobot.BaseMirobot.logger`.
autofindaddress : bool
(Default value = True) Whether to automatically search for Mirobot's bluetooth module if `address` parameter is `None`.
Returns
-------
class : `mirobot.bluetooth_low_energy_interface.BluetoothLowEnergyInterface`
"""
self.mirobot = mirobot
if logger is not None:
self.logger = logger
self._debug = debug
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self._run_and_get(self._ainit())
async def _ainit(self, address=None, autofindaddress=True):
# if address was not passed in and autofindaddress is set to true,
# then autosearch for a bluetooth device
if not address:
if autofindaddress:
self.address = await self._find_address()
""" The default address to use when making connections. To override this on a individual basis, provide portname to each invocation of `BaseMirobot.connect`. """
self.logger.info(f"Using Bluetooth Address \"{self.address}\"")
else:
self.logger.exception(InvalidBluetoothAddressError('Must either provide a Bluetooth address or turn on autodiscovery!'))
else:
self.address = address
self.client = BleakClient(self.address, loop=self.loop)
def _run_and_get(self, coro):
return self.loop.run_until_complete(coro)
@property
def debug(self):
""" Whether to show debug statements in the logger. """
return self._debug
@debug.setter
def debug(self, value):
"""Set the new value for the `debug` property of `mirobot.bluetooth_low_energy_interface.BluetoothLowEnergyInterface`. Use as in `BluetoothLowEnergyInterface.setDebug(value)`.
Parameters
----------
value : bool
New value for `debug`
"""
self._debug = bool(value)
async def _find_address(self):
""" Try to find the Bluetooth Address automagically """
devices = await discover()
mirobot_bt = next((d for d in devices if d.name == 'QN-Mini6Axis'), None)
if mirobot_bt is None:
raise Exception('Could not find mirobot bt')
return mirobot_bt.address
def connect(self):
""" Connect to the Bluetooth Extender Box """
async def start_connection():
connection = await self.client.connect()
services = await self.client.get_services()
service = services.get_service("0000ffe0-0000-1000-8000-00805f9b34fb")
self.characteristics = [c.uuid for c in service.characteristics]
return connection
self.connection = self._run_and_get(start_connection())
def disconnect(self):
""" Disconnect from the Bluetooth Extender Box """
async def async_disconnect():
try:
await self.client.disconnect()
except AttributeError:
'''
File "/home/chronos/.local/lib/python3.7/site-packages/bleak/backends/bluezdbus/client.py", line 235, in is_connected
return await self._bus.callRemote(
AttributeError: 'NoneType' object has no attribute 'callRemote'
'''
# don\t know why it happens, it shouldn't and doesn't in normal async flow
# but if it complains that client._bus is None, then we're good, right...?
pass
self._run_and_get(async_disconnect())
@property
def is_connected(self):
""" Whether this class is connected to the Bluetooth Extender Box """
return self.connection
def send(self, msg, disable_debug=False, terminator=None, wait=True, wait_idle=True):
"""
Send a message to the Bluetooth Extender Box. Shouldn't be used by the end user.
Parameters
----------
msg : str
The message/instruction to send. A `\\r\\n` will be appended to this message.
disable_debug : bool
(Default value = False) Whether to disable debug statements on `idle`-state polling.
terminator : str
(Default value = `None`) Dummy variable for this method. This implementation will always use `\\r\\n` as the line terminator.
wait : bool
(Default value = True) Whether to wait for the command to return a `ok` response.
wait_idle :
(Default value = True) Whether to wait for the Mirobot to be in an `Idle` state before returning.
Returns
-------
msg : List[str] or bool
If `wait` is `True`, then return a list of strings which contains message output.
If `wait` is `False`, then return whether sending the message succeeded.
"""
self.feedback = []
self.ok_counter = 0
self.disable_debug = disable_debug
reset_strings = ['Using reset pos!']
def matches_eol_strings(terms, s):
for eol in terms:
if s.endswith(eol):
return True
return False
def notification_handler(sender, data):
data = data.decode()
data_lines = re.findall(r".*[\r\n]{0,1}", data)
for line in data_lines[:-1]:
if self._debug and not self.disable_debug:
self.logger.debug(f"[RECV] {repr(line)}")
if self.feedback and not self.feedback[-1].endswith('\r\n'):
self.feedback[-1] += line
else:
if self.feedback:
self.feedback[-1] = self.feedback[-1].strip('\r\n')
if 'error' in line:
self.logger.error(MirobotError(line.replace('error: ', '')))
if 'ALARM' in line:
self.logger.error(MirobotAlarm(line.split('ALARM: ', 1)[1]))
if matches_eol_strings(reset_strings, line):
self.logger.error(MirobotReset('Mirobot was unexpectedly reset!'))
self.feedback.append(line)
if self.feedback[-1] == 'ok\r\n':
self.ok_counter += 1
async def async_send(msg):
async def write(msg):
for c in self.characteristics:
await self.client.write_gatt_char(c, msg)
if wait:
for c in self.characteristics:
await self.client.start_notify(c, notification_handler)
for s in chunks(bytes(msg + '\r\n', 'utf-8'), 20):
await write(s)
if self._debug and not disable_debug:
self.logger.debug(f"[SENT] {msg}")
if wait:
while self.ok_counter < 2:
# print('waiting...', msg, self.ok_counter)
await asyncio.sleep(0.1)
if wait_idle:
# TODO: really wish I could recursively call `send(msg)` here instead of
# replicating logic. Alas...
orig_feedback = self.feedback
async def check_idle():
self.disable_debug = True
self.feedback = []
self.ok_counter = 0
await write(b'?\r\n')
while self.ok_counter < 2:
# print('waiting for idle...', msg, self.ok_counter)
await asyncio.sleep(0.1)
self.mirobot._set_status(self.mirobot._parse_status(self.feedback[0]))
await check_idle()
while self.mirobot.status.state != 'Idle':
# print(self.mirobot.status.state)
await check_idle()
# print('finished idle')
self.feedback = orig_feedback
for c in self.characteristics:
await self.client.stop_notify(c)
self._run_and_get(async_send(msg))
if self.feedback:
self.feedback[-1] = self.feedback[-1].strip('\r\n')
# BUG:
# the following bugs me so much, but I can't figure out why this is happening and needed:
# Instant subsequent calls to `send_msg` hang, for some reason.
# Like the second invocation doesn't start, it's gets stuck as `selector._poll` in asyncio
# Putting a small delay fixes this but why...???
if os_is_posix:
time.sleep(0.1)
return self.feedback
|
<gh_stars>0
import { combineEpics } from 'redux-observable'
import { todosEpic } from './ToDosEpic/index'
export default combineEpics(todosEpic)
|
import engine from 'store/src/store-engine';
const localStorage = require('store/storages/localStorage');
const localStorages = [localStorage];
export const localStore: StoreJsAPI = engine.createStore(localStorages);
// ########localStore###########
/**
* 保存数据
* @param key
* @param value
*/
export const localSave = (key: string, value: any) => {
localStore.set(key, value);
};
/**
* 获取值
* @param key
*/
export const localGet = (key: string) => localStore.get(key);
/**
* 删除数据
*/
export const localRemove = (key: string) => {
localStore.remove(key);
};
/**
* 删除所有
*/
export const localClearAll = () => {
localStore.clearAll();
};
|
#!/bin/bash
# extract-chromosomes-3 0.0.1
# Generated by dx-app-wizard.
#
# Basic execution pattern: Your app will run on a single machine from
# beginning to end.
#
# Your job's input variables (if any) will be loaded as environment
# variables before this script runs. Any array inputs will be loaded
# as bash arrays.
#
# Any code outside of main() (or any entry point you may add) is
# ALWAYS executed, followed by running the entry point itself.
#
# See https://wiki.dnanexus.com/Developer-Portal for tutorials on how
# to modify this file.
main() {
echo "Start to Download docker image in background..."
dx-docker pull artifacts/variationanalysis-app:${Image_Version} &>/dev/null &
mkdir -p /input/Sorted_Bam
mkdir -p /out/Filtered_BAM
echo "Value of Chromosome_List: '${Chromosome_List}'"
#download the sorted bam
echo "Downloading sorted BAM file '${Sorted_Bam_name}'"
dx download "${Sorted_Bam}" -o /input/Sorted_Bam/${Sorted_Bam_name}
echo "Downloading sorted BAM file '${Sorted_Bam_Index_name}'"
dx download "${Sorted_Bam_Index}" -o /input/Sorted_Bam/${Sorted_Bam_Index_name}
echo "Make sure Downloading the docker image has finished..."
dx-docker pull artifacts/variationanalysis-app:${Image_Version}
cpus=`grep physical /proc/cpuinfo |grep id|wc -l`
dx-docker run \
-v /input/:/input \
-v /out/:/out \
artifacts/variationanalysis-app:${Image_Version} \
bash -c "source ~/.bashrc; cd /out/; extract-chromosomes.sh ${cpus} /input/Sorted_Bam/${Sorted_Bam_name} \"${Chromosome_List}\""
mkdir -p $HOME/out
mkdir -p $HOME/out/Filtered_BAM
mkdir -p $HOME/out/Filtered_BAM_Index
mv /out/*-subset.bam $HOME/out/Filtered_BAM
mv /out/*-subset.bam.bai $HOME/out/Filtered_BAM_Index
echo "Files to publish"
ls -lrt $HOME/out
dx-upload-all-outputs --parallel
}
|
<reponame>julioxavierr/use-input-mask<filename>docs/Date.js
import React, { useRef } from 'react'
import useInputMask from '../src'
import { createAutoCorrectedDatePipe } from 'text-mask-addons'
const DateInput = props => {
const input = useRef(null)
const autoCorrectedDatePipe = createAutoCorrectedDatePipe('dd/mm/yyyy HH:MM')
const onChange = useInputMask({
input,
onChange: props.onChange,
mask: [/\d/, /\d/, '/', /\d/, /\d/, '/', /\d/, /\d/, /\d/, /\d/],
pipe: autoCorrectedDatePipe,
placeholder: 'Please enter a date',
keepCharPositions: true,
})
return <input {...props} ref={input} onChange={onChange} />
}
export default DateInput
|
<reponame>CarsonChu1/Team-Earth-App-Project<filename>BCFoodie/app/src/main/java/com/example/produceapp/MainActivity.java
package com.example.produceapp;
import androidx.appcompat.app.AppCompatActivity;
import androidx.recyclerview.widget.GridLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
import android.os.Bundle;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.IOException;
import java.io.InputStream;
import java.time.*;
public class MainActivity extends AppCompatActivity {
public static ProduceInfo lastProduce;//Instance of last selected produce
RecyclerView recyclerView;
RecyclerView.LayoutManager layoutManager;
RecyclerViewAdapter recyclerViewAdapter;
ProduceInfo[] produces;//All the produces
ProduceInfo[] filteredProduce;
int currentMonth;
char monthChar;
public void loadJSONFromAsset(String fileName) {
String json;
try {
InputStream is = this.getAssets().open(fileName);
int size = is.available();
byte[] buffer = new byte[size];
is.read(buffer);
is.close();
json = new String(buffer, "UTF-8");
JSONObject jObj = new JSONObject(json);//Creates JSON Object from the text
JSONArray jArr = jObj.getJSONArray("produce");//Gets the array from the JSON object
produces = new ProduceInfo[jArr.length()];
for(int i = 0; i < jArr.length(); i++){//Loop that assigns the values from the names
JSONObject produceObj = jArr.getJSONObject(i);
String name = produceObj.getString("name");
String descShort = produceObj.getString("descriptionShort");
String descLong = produceObj.getString("descriptionLong");
String image = produceObj.getString("imgURL");
String season = produceObj.getString("seasonRange");
String price = produceObj.getString("price");
ProduceInfo produce = new ProduceInfo( //Filling in the ProduceInfo class's constructor
name,
descShort,
descLong,
getResources().getIdentifier(image , "drawable", getPackageName()),//Gets the normal images
season,
price
);
produces[i] = produce;//produces array at position i
}
} catch (JSONException | IOException ex) {
Log.e("ERROR","BIG OOPS", ex);
}
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
loadJSONFromAsset("produce.json");//Loads and assigns the values into the variables
Instant instant = Instant.now();
currentMonth = Integer.parseInt(instant.toString().substring(5,7));
Log.i("dateSet", currentMonth+"");
if(currentMonth == 12 || currentMonth == 1 || currentMonth == 2)
monthChar = 48; //char value 0
if(currentMonth == 3 || currentMonth == 4 || currentMonth == 5)
monthChar = 49; // value 1
if(currentMonth == 6 || currentMonth == 7 || currentMonth == 8)
monthChar = 50; //value 2
if(currentMonth == 9 || currentMonth == 10 || currentMonth == 11)
monthChar = 51; //value 3
Log.i("monthVarSet", monthChar +"");
int filterLength = 0;
for(int i = 0; i<produces.length; i++){
if(produces[i].getSeason().contains(monthChar+"")){
filterLength++;
Log.i("monthFilter","success " + filterLength);}
}
filteredProduce = new ProduceInfo[filterLength];
int successfulHits = 0;
for(int i = 0; i<produces.length; i++){
if(produces[i].getSeason().contains(monthChar +"")){
filteredProduce[successfulHits] = produces[i];
successfulHits++;}
}
setContentView(R.layout.activity_main);
recyclerView = findViewById(R.id.recyclerView);
layoutManager = new GridLayoutManager(this, 1);
recyclerView.setLayoutManager(layoutManager);
recyclerViewAdapter = new RecyclerViewAdapter(filteredProduce, this);
recyclerView.setAdapter(recyclerViewAdapter);
recyclerView.setHasFixedSize(true);
}
}
|
const getUsers = (users) => {
return users.filter(user => user.age > 25);
};
//Example Usage
let users = [
{
"id": 1,
"name": "Bob",
"age": 25
},
{
"id": 2,
"name": "John",
"age": 55
},
{
"id": 3,
"name": "Alice",
"age": 32
}
];
let filteredUsers = getUsers(users);
console.log(filteredUsers);
// [
// {
// "id": 2,
// "name": "John",
// "age": 55
// },
// {
// "id": 3,
// "name": "Alice",
// "age": 32
// }
// ] |
<reponame>dbulaja98/ISA-2020-TEAM19
package com.pharmacySystem.model.medicine;
import com.pharmacySystem.model.pharmacy.Pharmacy;
import com.pharmacySystem.model.user.Patient;
import java.util.Date;
import javax.persistence.*;
@Entity
@Table(name = "medicineReservations")
public class MedicineReservation {
@Id
@SequenceGenerator(name = "medicineReservation_gen", sequenceName = "medicineReservation_seq", initialValue = 50, allocationSize = 1)
@GeneratedValue(strategy = GenerationType.SEQUENCE, generator = "medicineReservation_gen")
private long id;
@Column
private Date dueDate;
@Enumerated(EnumType.STRING)
private MedicineReservationStatus status;
@Column
private Date purchaseDate;
@ManyToOne(fetch = FetchType.EAGER, cascade = CascadeType.ALL)
private Pharmacy pharmacy;
@ManyToOne(fetch = FetchType.EAGER, cascade = CascadeType.ALL)
private MedicineQuantity medicineQuantity;
@ManyToOne(fetch = FetchType.EAGER, cascade = CascadeType.ALL)
private Patient patient;
@Column
private double totalPrice;
@Version
@Column(nullable = false)
private Long version;
public MedicineReservation() {
}
public MedicineReservation(long id, Date dueDate, MedicineReservationStatus status,
Date purchaseDate, Pharmacy pharmacy, MedicineQuantity medicineQuantity,
Patient patient, double totalPrice, Long version) {
super();
this.id = id;
this.dueDate = dueDate;
this.status = status;
this.purchaseDate = purchaseDate;
this.pharmacy = pharmacy;
this.medicineQuantity = medicineQuantity;
this.patient = patient;
this.totalPrice = totalPrice;
this.version = version;
}
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public Date getDueDate() {
return dueDate;
}
public void setDueDate(Date dueDate) {
this.dueDate = dueDate;
}
public MedicineReservationStatus getStatus() {
return status;
}
public void setStatus(MedicineReservationStatus status) {
this.status = status;
}
public Date getPurchaseDate() {
return purchaseDate;
}
public void setPurchaseDate(Date purchaseDate) {
this.purchaseDate = purchaseDate;
}
public Pharmacy getPharmacy() {
return pharmacy;
}
public void setPharmacy(Pharmacy pharmacy) {
this.pharmacy = pharmacy;
}
public MedicineQuantity getMedicineQuantity() {
return medicineQuantity;
}
public void setMedicineQuantity(MedicineQuantity medicineQuantity) {
this.medicineQuantity = medicineQuantity;
}
public Patient getPatient() {
return patient;
}
public void setPatient(Patient patient) {
this.patient = patient;
}
public double getTotalPrice() {
return totalPrice;
}
public void setTotalPrice(double totalPrice) {
this.totalPrice = totalPrice;
}
public Long getVersion() {
return version;
}
public void setVersion(Long version) {
this.version = version;
}
} |
#!/bin/bash
set -xeuo pipefail
./load_sql.sh single-user-bootstrap.sql account-types.sql accounts.sql action-categories.sql
|
package com.example.basicbeans.proxy_print_obj_ref;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class ProxyController {
/**
* Spring will use the variable names to qualify down and
* respectively inject the correct references for each.
*
* For demonstration I gave the prototype a mismatching variable name
* but used Spring's @Qualifer to help narrow it down.
*/
@Autowired
MyPOJO singleton;
@Autowired
MyPOJO session;
@Autowired @Qualifier("prototype")
MyPOJO prototypeDifferentName;
/**
* The same reference is used globally by everyone.
*/
@GetMapping("/proxy/singleton")
String singleton(){ return getResult(singleton); }
/**
* Each user session is given their own instance of the session bean.
*/
@GetMapping("/proxy/session")
String session() { return getResult(session); }
/**
* Every invocation against the prototype bean is a brand new instance.
*/
@GetMapping("/proxy/prototype")
String prototype() { return getResult(prototypeDifferentName); }
private String getResult(Object obj){
return obj + "\n" + obj + "\n" + obj;
}
}
|
#!/bin/sh
export DISPLAY=:0.0
/usr/local/bin/ctest -V -VV -S /Dashboards/farsight-trunk-nightly/DashboardScripts/farsight-ubuntu-1_gcc43_rel_static_nightly.cmake > /Dashboards/Logs/rel_static_nightly.log 2>&1
/usr/local/bin/ctest -V -VV -S /Dashboards/farsight-trunk-nightly/DashboardScripts/farsight-ubuntu-1_gcc43_dbg_static_nightly.cmake > /Dashboards/Logs/dbg_static_nightly.log 2>&1
/usr/local/bin/ctest -V -VV -S /Dashboards/farsight-trunk-nightly/DashboardScripts/farsight-ubuntu-1_gcc43_dbg_static_nightly_vtkgit.cmake > /Dashboards/Logs/dbg_static_nightly.log 2>&1
/usr/local/bin/ctest -V -VV -S /Dashboards/farsight-trunk-continuous/DashboardScripts/farsight-ubuntu-1_gcc43_dbg_static_continuous.cmake > /Dashboards/Logs/dbg_static_continuous.log 2>&1
|
function deploy_current_branch_to_staging() {
capistrano_make_current_branch_deploy_to_staging || return $?
bundle exec cap staging deploy || return $?
git checkout config/deploy/staging.rb || return $?
}
|
<reponame>acouvreur/skeleton-generator
package org.sklsft.generator.bc.validation.rules.impl;
import org.sklsft.generator.bc.validation.rules.ProjectMetaDataRuleChecker;
import org.sklsft.generator.model.metadata.PackageMetaData;
import org.sklsft.generator.model.metadata.ProjectMetaData;
import org.sklsft.generator.model.metadata.TableMetaData;
import org.sklsft.generator.model.metadata.validation.ProjectValidationReport;
public class InvalidCardinalityChecker implements ProjectMetaDataRuleChecker {
@Override
public ProjectValidationReport checkRules(ProjectMetaData project, ProjectValidationReport report) {
for (PackageMetaData packageMetaData:project.getAllPackages()) {
if (packageMetaData.getTables()!=null) {
for (TableMetaData table:packageMetaData.getTables()) {
if (table.getCardinality() < 0) {
report.addError(table, null, "Cardinality must be positive");
}
if (table.getCardinality() > table.getColumns().size()) {
report.addError(table, null, "Cardinality must be lower or equal to the number of columns");
}
}
}
}
return report;
}
}
|
<gh_stars>0
#include "gfx/legato/generated/le_gen_scheme.h"
const leScheme RedScheme =
{
{
{ { 0x36, 0xCF, 0xFF, 0x80, 0x40, 0x36, 0xE1, 0x80, 0xFF, 0xE1, 0xCF, 0x36, 0x12, 0xFF, 0xE1, 0x91 } }, // GS_8
{ { 0xE0, 0xBA, 0xFF, 0x92, 0x49, 0xE0, 0xDB, 0x92, 0xFF, 0xDB, 0xBA, 0xE0, 0x3, 0xFF, 0xDB, 0x92 } }, // RGB_332
{ { 0xF800, 0xC67A, 0xFFFF, 0x8410, 0x4208, 0xF800, 0xD71C, 0x8410, 0xFFFF, 0xD71C, 0xC67A, 0xF800, 0x1F, 0xFFFF, 0xD71C, 0x8C92 } }, // RGB_565
{ { 0xF801, 0xC675, 0xFFFF, 0x8421, 0x4211, 0xF801, 0xD739, 0x8421, 0xFFFF, 0xD739, 0xC675, 0xF801, 0x3F, 0xFFFF, 0xD739, 0x8CA5 } }, // RGBA_5551
{ { 0xFF0000, 0xC8D0D4, 0xFFFFFF, 0x808080, 0x404040, 0xFF0000, 0xD6E3E7, 0x808080, 0xFFFFFF, 0xD6E3E7, 0xC8D0D4, 0xFF0000, 0xFF, 0xFFFFFF, 0xD6E3E7, 0x8C9294 } }, // RGB_888
{ { 0xFF0000FF, 0xC8D0D4FF, 0xFFFFFFFF, 0x808080FF, 0x404040FF, 0xFF0000FF, 0xD6E3E7FF, 0x808080FF, 0xFFFFFFFF, 0xD6E3E7FF, 0xC8D0D4FF, 0xFF0000FF, 0xFFFF, 0xFFFFFFFF, 0xD6E3E7FF, 0x8C9294FF } }, // RGBA_8888
{ { 0xFFFF0000, 0xFFC8D0D4, 0xFFFFFFFF, 0xFF808080, 0xFF404040, 0xFFFF0000, 0xFFD6E3E7, 0xFF808080, 0xFFFFFFFF, 0xFFD6E3E7, 0xFFC8D0D4, 0xFFFF0000, 0xFF0000FF, 0xFFFFFFFF, 0xFFD6E3E7, 0xFF8C9294 } }, // ARGB_8888
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0x9, 0xFC, 0xF, 0x8, 0xED, 0x9, 0xFE, 0x8, 0xF, 0xFE, 0xFC, 0x9, 0xC, 0xF, 0xFE, 0xF6 } }, // INDEX_8
},
};
const leScheme GreenScheme =
{
{
{ { 0xB6, 0xCF, 0xFF, 0x80, 0x40, 0xB6, 0xE1, 0x80, 0xFF, 0xE1, 0xCF, 0xB6, 0x12, 0xFF, 0xE1, 0x91 } }, // GS_8
{ { 0x1C, 0xBA, 0xFF, 0x92, 0x49, 0x1C, 0xDB, 0x92, 0xFF, 0xDB, 0xBA, 0x1C, 0x3, 0xFF, 0xDB, 0x92 } }, // RGB_332
{ { 0x7E0, 0xC67A, 0xFFFF, 0x8410, 0x4208, 0x7E0, 0xD71C, 0x8410, 0xFFFF, 0xD71C, 0xC67A, 0x7E0, 0x1F, 0xFFFF, 0xD71C, 0x8C92 } }, // RGB_565
{ { 0x7C1, 0xC675, 0xFFFF, 0x8421, 0x4211, 0x7C1, 0xD739, 0x8421, 0xFFFF, 0xD739, 0xC675, 0x7C1, 0x3F, 0xFFFF, 0xD739, 0x8CA5 } }, // RGBA_5551
{ { 0xFF00, 0xC8D0D4, 0xFFFFFF, 0x808080, 0x404040, 0xFF00, 0xD6E3E7, 0x808080, 0xFFFFFF, 0xD6E3E7, 0xC8D0D4, 0xFF00, 0xFF, 0xFFFFFF, 0xD6E3E7, 0x8C9294 } }, // RGB_888
{ { 0xFF00FF, 0xC8D0D4FF, 0xFFFFFFFF, 0x808080FF, 0x404040FF, 0xFF00FF, 0xD6E3E7FF, 0x808080FF, 0xFFFFFFFF, 0xD6E3E7FF, 0xC8D0D4FF, 0xFF00FF, 0xFFFF, 0xFFFFFFFF, 0xD6E3E7FF, 0x8C9294FF } }, // RGBA_8888
{ { 0xFF00FF00, 0xFFC8D0D4, 0xFFFFFFFF, 0xFF808080, 0xFF404040, 0xFF00FF00, 0xFFD6E3E7, 0xFF808080, 0xFFFFFFFF, 0xFFD6E3E7, 0xFFC8D0D4, 0xFF00FF00, 0xFF0000FF, 0xFFFFFFFF, 0xFFD6E3E7, 0xFF8C9294 } }, // ARGB_8888
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0xA, 0xFC, 0xF, 0x8, 0xED, 0xA, 0xFE, 0x8, 0xF, 0xFE, 0xFC, 0xA, 0xC, 0xF, 0xFE, 0xF6 } }, // INDEX_8
},
};
const leScheme WhiteScheme =
{
{
{ { 0xFF, 0xCF, 0xFF, 0x80, 0x40, 0xFF, 0xE1, 0x80, 0xFF, 0xE1, 0xCF, 0x0, 0x12, 0xFF, 0xE1, 0x91 } }, // GS_8
{ { 0xFF, 0xBA, 0xFF, 0x92, 0x49, 0xFF, 0xDB, 0x92, 0xFF, 0xDB, 0xBA, 0x0, 0x3, 0xFF, 0xDB, 0x92 } }, // RGB_332
{ { 0xFFFF, 0xC67A, 0xFFFF, 0x8410, 0x4208, 0xFFFF, 0xD71C, 0x8410, 0xFFFF, 0xD71C, 0xC67A, 0x0, 0x1F, 0xFFFF, 0xD71C, 0x8C92 } }, // RGB_565
{ { 0xFFFF, 0xC675, 0xFFFF, 0x8421, 0x4211, 0xFFFF, 0xD739, 0x8421, 0xFFFF, 0xD739, 0xC675, 0x1, 0x3F, 0xFFFF, 0xD739, 0x8CA5 } }, // RGBA_5551
{ { 0xFFFFFF, 0xC8D0D4, 0xFFFFFF, 0x808080, 0x404040, 0xFFFFFF, 0xD6E3E7, 0x808080, 0xFFFFFF, 0xD6E3E7, 0xC8D0D4, 0x0, 0xFF, 0xFFFFFF, 0xD6E3E7, 0x8C9294 } }, // RGB_888
{ { 0xFFFFFFFF, 0xC8D0D4FF, 0xFFFFFFFF, 0x808080FF, 0x404040FF, 0xFFFFFFFF, 0xD6E3E7FF, 0x808080FF, 0xFFFFFFFF, 0xD6E3E7FF, 0xC8D0D4FF, 0xFF, 0xFFFF, 0xFFFFFFFF, 0xD6E3E7FF, 0x8C9294FF } }, // RGBA_8888
{ { 0xFFFFFFFF, 0xFFC8D0D4, 0xFFFFFFFF, 0xFF808080, 0xFF404040, 0xFFFFFFFF, 0xFFD6E3E7, 0xFF808080, 0xFFFFFFFF, 0xFFD6E3E7, 0xFFC8D0D4, 0xFF000000, 0xFF0000FF, 0xFFFFFFFF, 0xFFD6E3E7, 0xFF8C9294 } }, // ARGB_8888
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0xF, 0xFC, 0xF, 0x8, 0xED, 0xF, 0xFE, 0x8, 0xF, 0xFE, 0xFC, 0x0, 0xC, 0xF, 0xFE, 0xF6 } }, // INDEX_8
},
};
const leScheme BlueScheme =
{
{
{ { 0x12, 0xCF, 0xFF, 0x80, 0x40, 0x12, 0xE1, 0x80, 0xFF, 0xE1, 0xCF, 0x12, 0x12, 0xFF, 0xE1, 0x91 } }, // GS_8
{ { 0x3, 0xBA, 0xFF, 0x92, 0x49, 0x3, 0xDB, 0x92, 0xFF, 0xDB, 0xBA, 0x3, 0x3, 0xFF, 0xDB, 0x92 } }, // RGB_332
{ { 0x1F, 0xC67A, 0xFFFF, 0x8410, 0x4208, 0x1F, 0xD71C, 0x8410, 0xFFFF, 0xD71C, 0xC67A, 0x1F, 0x1F, 0xFFFF, 0xD71C, 0x8C92 } }, // RGB_565
{ { 0x3F, 0xC675, 0xFFFF, 0x8421, 0x4211, 0x3F, 0xD739, 0x8421, 0xFFFF, 0xD739, 0xC675, 0x3F, 0x3F, 0xFFFF, 0xD739, 0x8CA5 } }, // RGBA_5551
{ { 0xFF, 0xC8D0D4, 0xFFFFFF, 0x808080, 0x404040, 0xFF, 0xD6E3E7, 0x808080, 0xFFFFFF, 0xD6E3E7, 0xC8D0D4, 0xFF, 0xFF, 0xFFFFFF, 0xD6E3E7, 0x8C9294 } }, // RGB_888
{ { 0xFFFF, 0xC8D0D4FF, 0xFFFFFFFF, 0x808080FF, 0x404040FF, 0xFFFF, 0xD6E3E7FF, 0x808080FF, 0xFFFFFFFF, 0xD6E3E7FF, 0xC8D0D4FF, 0xFFFF, 0xFFFF, 0xFFFFFFFF, 0xD6E3E7FF, 0x8C9294FF } }, // RGBA_8888
{ { 0xFF0000FF, 0xFFC8D0D4, 0xFFFFFFFF, 0xFF808080, 0xFF404040, 0xFF0000FF, 0xFFD6E3E7, 0xFF808080, 0xFFFFFFFF, 0xFFD6E3E7, 0xFFC8D0D4, 0xFF0000FF, 0xFF0000FF, 0xFFFFFFFF, 0xFFD6E3E7, 0xFF8C9294 } }, // ARGB_8888
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } },
{ { 0xC, 0xFC, 0xF, 0x8, 0xED, 0xC, 0xFE, 0x8, 0xF, 0xFE, 0xFC, 0xC, 0xC, 0xF, 0xFE, 0xF6 } }, // INDEX_8
},
};
|
#!/usr/bin/env bash
echo "Running $0..."
test_files=(
__init__.py
sublime.py
sublime_plugin.py
)
for file in "${test_files[@]}"; do
mv "./tests/${file}" ./
done
py.test
exitcode=$?
for file in "${test_files[@]}"; do
mv "${file}" ./tests/
done
exit $exitcode
|
<gh_stars>1-10
// SPDX-License-Identifier: Apache-2.0
// YAPION
// Copyright (C) 2019,2020 yoyosource
package yapion.serializing.serializer.object.other;
import yapion.annotations.deserialize.YAPIONLoadExclude;
import yapion.annotations.serialize.YAPIONSaveExclude;
import yapion.hierarchy.typegroups.YAPIONAnyType;
import yapion.hierarchy.types.YAPIONObject;
import yapion.hierarchy.types.YAPIONValue;
import yapion.serializing.InternalSerializer;
import yapion.serializing.data.DeserializeData;
import yapion.serializing.data.SerializeData;
import yapion.serializing.serializer.SerializerImplementation;
import static yapion.utils.IdentifierUtils.ENUM_IDENTIFIER;
import static yapion.utils.IdentifierUtils.TYPE_IDENTIFIER;
@YAPIONSaveExclude(context = "*")
@YAPIONLoadExclude(context = "*")
@SuppressWarnings({"java:S1192"})
@SerializerImplementation
public class EnumSerializer implements InternalSerializer<Enum<?>> {
@Override
public String type() {
return "java.lang.Enum";
}
@Override
public YAPIONAnyType serialize(SerializeData<Enum<?>> serializeData) {
YAPIONObject yapionObject = new YAPIONObject();
yapionObject.add(TYPE_IDENTIFIER, type());
yapionObject.add(ENUM_IDENTIFIER, serializeData.object.getClass().getTypeName());
yapionObject.add("value", serializeData.object.name());
yapionObject.add("ordinal", serializeData.object.ordinal());
return yapionObject;
}
@Override
public Enum<?> deserialize(DeserializeData<? extends YAPIONAnyType> deserializeData) {
YAPIONObject yapionObject = (YAPIONObject) deserializeData.object;
String type = yapionObject.getValue(ENUM_IDENTIFIER, "").get();
String enumType = yapionObject.getValue("value", "").get();
int ordinal = -1;
if (yapionObject.getValue("ordinal", 0) != null) {
ordinal = yapionObject.getValue("ordinal", 0).get();;
}
try {
if (!Class.forName(type).isEnum()) return null;
Enum<?>[] enums = (Enum<?>[]) Class.forName(type).getEnumConstants();
if (ordinal >= 0 && ordinal < enums.length && enums[ordinal].name().equals(enumType)) {
return enums[ordinal];
}
for (Enum<?> e : enums) {
if (e.name().equals(enumType)) {
yapionObject.add("ordinal", new YAPIONValue<>(e.ordinal()));
return e;
}
}
} catch (ClassNotFoundException e) {
// Ignored
}
return null;
}
} |
#!/bin/bash
openssl genrsa -out privkey.pem 2048
openssl rsa -pubout -in privkey.pem -out pubkey.pem
SIGNING_KEY=$(cat privkey.pem)
VERIFICATION_KEY=$(cat pubkey.pem)
JWT_SIGNING_KEYS=$(cat <<EOF
jwt:
token:
signing-key: |
$(echo "$SIGNING_KEY" | awk '{printf " %s\n", $0}')
verification-key: |
$(echo "$VERIFICATION_KEY" | awk '{printf " %s\n", $0}')
EOF
)
echo "$JWT_SIGNING_KEYS" > uaa_config.yml
rm privkey.pem
rm pubkey.pem
|
source $BS_ZSH_BASE/zsh-config/dirmark.sh
source $BS_ZSH_BASE/zsh-config/chinese_characters_adapter.sh
source $BS_ZSH_BASE/zsh-config/find.sh
source $BS_ZSH_BASE/zsh-config/git.sh
source $BS_ZSH_BASE/zsh-config/grep.sh
source $BS_ZSH_BASE/zsh-config/misc.sh
source $BS_ZSH_BASE/zsh-config/tools.sh
source $BS_ZSH_BASE/zsh-config/alias.sh
source $BS_ZSH_BASE/zsh-config/fzf.sh
source $BS_ZSH_BASE/software/qiniu/qiniu.sh
for f in $BS_ZSH_BASE/zsh-config/functions/*.sh; do source $f; done
# Script only for mac
source $BS_ZSH_BASE/zsh-config/platform.mac.sh
source $BS_ZSH_BASE/zsh-config/personalized.sh
source $BS_ZSH_BASE/zsh-config/colors.sh
|
<filename>NChart3D_OSX.framework/Versions/A/Headers/NChartDataSmoother.h
/**
* This file is the part of NChart3D Framework
* http://www.nchart3d.com
*
* File: NChartDataSmoother.h
* Version: "2.9.1"
*
* Copyright (C) 2017 Nulana LTD. All Rights Reserved.
*/
#import "NChartTypes.h"
/**
* The NChartDataSmoother class provides basic data smoother used to create smooth charts by only a few points.
*/
NCHART3D_EXPORT @interface NChartDataSmoother : NSObject
/**
* Create new instance of data smoother.
*/
+ (id)dataSmoother;
/**
* Resolution of the spline. Resolution is the number of subdivisions by the step dimension for each segment.
* The default value is 32. The allowed value are [2 .. 1024].
*/
@property (nonatomic, assign) NSInteger resolution;
/**
* Key of the value that should be interpreted as step dimension. Typically it is NChartValueX (which is the default
* value), however for bar series it should be NChartValueY.
*/
@property (nonatomic, assign) NChartValue stepDimension;
/**
* Key of the value that should be interpreted as height dimension. Typically it is NChartValueY (which is the default
* value), however for bar series it should be NChartValueX.
*/
@property (nonatomic, assign) NChartValue valueDimension;
@end
|
<gh_stars>1-10
package router
import (
"fmt"
"github.com/raythorn/zebra/context"
"github.com/raythorn/zebra/oss"
"regexp"
"strings"
)
type Route struct {
pattern string
regexp *regexp.Regexp
actions map[string]Handler
group *Group
oss *oss.Oss
}
func newRoute() *Route {
return &Route{"", nil, make(map[string]Handler), nil, nil}
}
func (r *Route) match(ctx *context.Context) bool {
if _, ok := r.actions[ctx.Method()]; !ok {
if _, ok := r.actions["ANY"]; !ok {
return false
}
}
if ctx.URL() == r.pattern {
return true
}
matches := r.regexp.FindStringSubmatch(ctx.URL())
if len(matches) > 0 && matches[0] == ctx.URL() {
for i, name := range r.regexp.SubexpNames() {
// log.Println(name)
if len(name) > 0 {
ctx.Set(name, matches[i])
}
}
return true
}
return false
}
func (r *Route) regexpCompile() {
routeExp := regexp.MustCompile(`:[^/#?()\.\\]+`)
r.pattern = routeExp.ReplaceAllStringFunc(r.pattern, func(m string) string {
return fmt.Sprintf(`(?P<%s>[^/#?]+)`, m[1:])
})
pattern := r.pattern
if !strings.HasSuffix(pattern, `\/?`) {
pattern += `\/?`
}
r.regexp = regexp.MustCompile(pattern)
if strings.Contains(r.pattern, "(?P") {
r.pattern = pattern
}
}
// cleanPath is the URL version of path.Clean, it returns a canonical URL path
// for example, eliminating . and .. elements.
//
// The following rules are applied iteratively until no further processing can
// be done:
// 1. Replace multiple slashes with a single slash.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path.
// 5. Omit regexp characters in the path, all regexp will be in a pair of "()"
// 6. Eliminate the trailing slash
//
// If the result of this process is an empty string, "/" is returned
func cleanPath(pattern string) string {
path := pattern
if path == "" {
return "/"
}
read := 1
write := 1
size := len(path)
var buf []byte = nil
if path[0] != '/' {
buf = make([]byte, size+1)
// Must start with a single slash
buf[0] = '/'
read = 0
}
for read < size {
switch {
case path[read] == '/':
//Eliminate trailing slash and multiple slash
read++
case path[read] == '.' && (read+1 == size || path[read+1] == '/'):
//Eliminate trailing '.'
read++
case path[read] == '.' && path[read+1] == '.' && (read+2 == size || path[read+2] == '/'):
read += 2
if buf == nil {
for write > 1 && path[write] != '/' {
write--
}
} else {
for write > 1 && buf[write] != '/' {
write--
}
}
default:
if buf == nil && read != write {
buf = make([]byte, size+1)
copy(buf, path[:write])
}
if write > 1 && buf != nil {
buf[write] = '/'
write++
}
wildcard := path[read] == '('
for read < size && ((wildcard && path[read] != ')') || path[read] != '/') {
if buf != nil {
buf[write] = path[read]
}
read++
write++
}
}
}
if buf != nil {
return string(buf[:write])
} else {
return string(pattern[:write])
}
}
|
<reponame>kh1iu/vrl
import logging
import sys
sys.path.append('..')
import os
import argparse
import numpy as np
import tensorflow as tf
import ipdb
from lib.vis_utils import *
from lib.logging_utils import *
from lib.common_utils import *
from lib.data_utils import *
from data.load_data import *
import model.vrl as vrl
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("model", help="model name")
parser.add_argument("-e", "--exp", type=int, default=0,
help="which experiment to run")
args = parser.parse_args()
setup = {
'max_iter': 3e5,
'batch_size': 100,
'lr_init': 4e-4,
'lr_decay_step': 1e5,
'use_rpda': True
}
# MNIST 3-rel
if args.exp == 0:
setup['data_file'] = 'mnist'
setup['z_dim'] = 2
rel_sel = {0:list(np.random.permutation([0,1,2,3,4])[:3])}
setup['relation_sel'] = {d:rel_sel for d in range(10)}
setup['vrl_model'] = vrl.vrl2D_mlp_bce
# setup['vrl_model'] = vrl.vrl2D_gumbel_bce
# setup['use_rpda'] = False
# MNIST 5-rel
elif args.exp == 1:
setup['data_file'] = 'mnist'
setup['z_dim'] = 2
rel_sel = {0:list(np.random.permutation([0,1,2,3,4])[:5])}
setup['relation_sel'] = {d:rel_sel for d in range(10)}
setup['vrl_model'] = vrl.vrl2D_mlp_bce
# setup['vrl_model'] = vrl.vrl2D_gumbel_bce
# Omniglot 3-rel
elif args.exp == 2:
setup['data_file'] = 'omniglot'
setup['z_dim'] = 2
rel_sel = {0:list(np.random.permutation([0,1,2,3,4])[:3])}
setup['relation_sel'] = rel_sel
setup['vrl_model'] = vrl.vrl2D_mlp_bce
# setup['vrl_model'] = vrl.vrl2D_gumbel_bce
# setup['use_rpda'] = False
# Omniglot 5-rel
elif args.exp == 3:
setup['data_file'] = 'omniglot'
setup['z_dim'] = 2
rel_sel = {0:list(np.random.permutation([0,1,2,3,4])[:5])}
setup['relation_sel'] = rel_sel
setup['vrl_model'] = vrl.vrl2D_mlp_bce
# setup['vrl_model'] = vrl.vrl2D_gumbel_bce
# MNIST coupled-rel
elif args.exp == 4:
setup['data_file'] = 'mnist'
setup['z_dim'] = 2
setup['relation_sel'] = {d:{0:[d//2]} for d in range(10)}
rel_sel = {0:list(np.random.permutation([0,1,2,3,4])[:5])}
setup['test_relation_sel'] = {d:rel_sel for d in range(10)}
setup['vrl_model'] = vrl.vrl2D_mlp_bce
# Yale facial expression
elif args.exp == 5:
setup['data_file'] = 'yale_expression'
setup['z_dim'] = 2
setup['relation_sel'] = [1,2,3]
setup['vrl_model'] = vrl.vrl2D_mlp_mse
# Yale illumination condition
elif args.exp == 6:
setup['data_file'] = 'yale_illumination'
setup['z_dim'] = 2
setup['relation_sel'] = [0, 1, 2, 3]
setup['vrl_model'] = vrl.vrl2D_mlp_mse
# RAVDESS
elif args.exp == 7:
setup['data_file'] = 'ravdess'
setup['z_dim'] = 2
setup['relation_sel'] = [0,1,2]
setup['vrl_model'] = vrl.vrl2D_mlp_mse
# MNIST 10-rel
elif args.exp == 8:
setup['data_file'] = 'mnist'
setup['z_dim'] = 2
setup['relation_sel'] = {d:{s:[0,1,2,3,4,5,6,7,8,9] if s in [0,1,2,3,4] else [0,1,2,3,4,10,11,12,13,14] for s in [0,1,2,3,4,10,11,12,13,14]} for d in range(10)}
setup['vrl_model'] = vrl.vrl2D_mlp_bce
setup['n_hidden'] = 1024
# MNIST continuous-rel
elif args.exp == 9:
setup['data_file'] = 'mnist'
setup['z_dim'] = 2 #3
setup['relation_sel'] = 'continuous'
setup['vrl_model'] = vrl.vrl2D_cnn_bce
else:
raise ValueError('Unrecognized data file!!')
setup['model_name'] = args.model
MODEL_PATH = os.path.join('saved_models', args.model)
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
SAMPLE_PATH = os.path.join(MODEL_PATH, 'sample')
if not os.path.isdir(SAMPLE_PATH):
os.makedirs(SAMPLE_PATH)
# Setup logging
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.propagate = False
setup_console_logger(logger=logger, level='info')
setup_file_logger(fn=os.path.join(MODEL_PATH, 'training.log'),
logger=logger,
level='info')
# Load data
tr_iter, vaX, rpda_aug = load_data(setup['data_file'],
setup['batch_size'],
setup['relation_sel'])
setup['image_patch_size'] = list(vaX.shape[1:])
vis_ny, vis_nx = 8, 8
grayscale_grid_vis(vaX[:vis_ny*vis_nx,...,1], vis_ny, vis_nx,
save_path=os.path.join(SAMPLE_PATH, 'sample_img_c2.png'))
grayscale_grid_vis(vaX[:vis_ny*vis_nx,...,0], vis_ny, vis_nx,
save_path=os.path.join(SAMPLE_PATH, 'sample_img_c1.png'))
# Start training
config = tf.compat.v1.ConfigProto()
with tf.compat.v1.Session(config = config) as sess:
model = setup['vrl_model'](sess, setup)
timer = Stopwatch()
eval_steps = 10000
tr_loss = []
timer.start()
while model.num_updates() < setup['max_iter']:
imb, idx = next(tr_iter)
# relation-preserving data augmentation
imb_aug1 = rpda_aug(imb)
imb_aug2 = rpda_aug(imb)
loss_value = model.train(imb = imb_aug1,
c1 = imb_aug2[...,:1],
c2 = imb_aug2[...,1:])
tr_loss.append(loss_value)
# evaluation
if model.num_updates() % eval_steps == 0:
tr_loss_avg = np.array(tr_loss).mean()
tr_loss_std = np.array(tr_loss).std()
tr_loss = []
h, m, s = timer('lapse', 'hms')
msg = (f"[{args.model}]: "
f"Time:{int(h):d}:{int(m):02d}:{int(s):02d}, "
f"Update:{model.num_updates()}, "
f"Tr_loss:{tr_loss_avg:+.2f} +/- {tr_loss_std:.2f}")
logger.info(msg)
model.saver.save_checkpoint(model.num_updates())
_, rx_vax = model.validate(vaX)
grayscale_grid_vis(rx_vax[:vis_ny*vis_nx,...,0],
vis_ny, vis_nx,
save_path=os.path.join(SAMPLE_PATH, f'{model.num_updates()}.png'))
|
echo Entrando na pasta backend
cd backend
echo Entrando na pasta backend
sudo docker-compose up -d
echo Executando run.sh
sudo chmod +x ./run.sh
sudo ./run.sh
echo Entrando na pasta mobile
cd .. && cd mobile
echo instalando dependências
yarn
echo Entrando na pasta frontend
cd .. && cd frontend
echo instalando dependências
yarn
echo startando o projeto
yarn start |
package net.oiyou.day002;
public class Day002 {
public ListNode addTwoNumbers(ListNode l1, ListNode l2) {
ListNode dummyHead = new ListNode(0);
ListNode cur = dummyHead;
int carry = 0;
while (l1 != null || l2 != null) {
int sum = carry;
if (l1 != null) {
sum += l1.val;
l1 = l1.next;
}
if (l2 != null) {
sum += l2.val;
l2 = l2.next;
}
// 创建新节点
carry = sum / 10;
cur.next = new ListNode(sum % 10);
cur = cur.next;
}
if (carry > 0) {
cur.next = new ListNode(carry);
}
return dummyHead.next;
}
}
class ListNode {
int val;
ListNode next;
ListNode() {
}
ListNode(int val) {
this.val = val;
}
ListNode(int val, ListNode next) {
this.val = val;
this.next = next;
}
}
|
#!/bin/bash
VERSION=${VERSION-master}
CMDS="
time -v shards install --production
VERSION=${VERSION} time -v shards build --static --release --stats --time
mv bin/gluster-metrics-exporter bin/gluster-metrics-exporter-amd64
"
docker run --rm -it -v $PWD:/workspace -w /workspace crystallang/crystal:0.35.1-alpine /bin/sh -c "$CMDS"
|
# frozen_string_literal: true
RSpec.describe RuboCop::Cop::Lint::DeprecatedClassMethods, :config do
context 'prefer `File.exist?` over `File.exists?`' do
it 'registers an offense and corrects File.exists?' do
expect_offense(<<~RUBY)
File.exists?(o)
^^^^^^^ `File.exists?` is deprecated in favor of `File.exist?`.
RUBY
expect_correction(<<~RUBY)
File.exist?(o)
RUBY
end
it 'registers an offense and corrects ::File.exists?' do
expect_offense(<<~RUBY)
::File.exists?(o)
^^^^^^^ `File.exists?` is deprecated in favor of `File.exist?`.
RUBY
expect_correction(<<~RUBY)
::File.exist?(o)
RUBY
end
it 'does not register an offense for File.exist?' do
expect_no_offenses('File.exist?(o)')
end
end
context 'prefer `Dir.exist?` over `Dir.exists?`' do
it 'registers an offense and corrects Dir.exists?' do
expect_offense(<<~RUBY)
Dir.exists?(o)
^^^^^^^ `Dir.exists?` is deprecated in favor of `Dir.exist?`.
RUBY
expect_correction(<<~RUBY)
Dir.exist?(o)
RUBY
end
it 'registers an offense and corrects ::Dir.exists?' do
expect_offense(<<~RUBY)
::Dir.exists?(o)
^^^^^^^ `Dir.exists?` is deprecated in favor of `Dir.exist?`.
RUBY
expect_correction(<<~RUBY)
::Dir.exist?(o)
RUBY
end
it 'does not register an offense for Dir.exist?' do
expect_no_offenses('Dir.exist?(o)')
end
it 'does not register an offense for offensive method `exists?`on other receivers' do
expect_no_offenses('Foo.exists?(o)')
end
end
context 'prefer `block_given?` over `iterator?`' do
it 'registers an offense and corrects iterator?' do
expect_offense(<<~RUBY)
iterator?
^^^^^^^^^ `iterator?` is deprecated in favor of `block_given?`.
RUBY
expect_correction(<<~RUBY)
block_given?
RUBY
end
it 'does not register an offense for block_given?' do
expect_no_offenses('block_given?')
end
it 'does not register an offense for offensive method `iterator?`on other receivers' do
expect_no_offenses('Foo.iterator?')
end
end
context 'prefer `Addrinfo#getnameinfo` over `Socket.gethostbyaddr`' do
it 'registers an offense for Socket.gethostbyaddr' do
expect_offense(<<~RUBY)
Socket.gethostbyaddr([221,186,184,68].pack("CCCC"))
^^^^^^^^^^^^^ `Socket.gethostbyaddr` is deprecated in favor of `Addrinfo#getnameinfo`.
RUBY
expect_no_corrections
end
it 'registers an offense for ::Socket.gethostbyaddr' do
expect_offense(<<~RUBY)
::Socket.gethostbyaddr([221,186,184,68].pack("CCCC"))
^^^^^^^^^^^^^ `Socket.gethostbyaddr` is deprecated in favor of `Addrinfo#getnameinfo`.
RUBY
expect_no_corrections
end
it 'does not register an offense for method `gethostbyaddr` on other receivers' do
expect_no_offenses('Foo.gethostbyaddr')
end
end
context 'prefer `Addrinfo#getaddrinfo` over `Socket.gethostbyname`' do
it 'registers an offense for Socket.gethostbyname' do
expect_offense(<<~RUBY)
Socket.gethostbyname("hal")
^^^^^^^^^^^^^ `Socket.gethostbyname` is deprecated in favor of `Addrinfo#getaddrinfo`.
RUBY
expect_no_corrections
end
it 'registers an offense for ::Socket.gethostbyname' do
expect_offense(<<~RUBY)
::Socket.gethostbyname("hal")
^^^^^^^^^^^^^ `Socket.gethostbyname` is deprecated in favor of `Addrinfo#getaddrinfo`.
RUBY
expect_no_corrections
end
it 'does not register an offense for method `gethostbyname` on other receivers' do
expect_no_offenses('Foo.gethostbyname')
end
end
end
|
#
# Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 6667089
# @summary Reflexive invocation of newly added methods broken.
# @author Daniel D. Daugherty
#
# @modules java.instrument
# @run shell MakeJAR3.sh RedefineMethodAddInvokeAgent 'Can-Redefine-Classes: true'
# @run build RedefineMethodAddInvokeApp
# @run shell RedefineMethodAddInvoke.sh
#
if [ "${TESTJAVA}" = "" ]
then
echo "TESTJAVA not set. Test cannot execute. Failed."
exit 1
fi
if [ "${COMPILEJAVA}" = "" ]
then
COMPILEJAVA="${TESTJAVA}"
fi
echo "COMPILEJAVA=${COMPILEJAVA}"
if [ "${TESTSRC}" = "" ]
then
echo "TESTSRC not set. Test cannot execute. Failed."
exit 1
fi
if [ "${TESTCLASSES}" = "" ]
then
echo "TESTCLASSES not set. Test cannot execute. Failed."
exit 1
fi
JAVAC="${COMPILEJAVA}"/bin/javac
JAVA="${TESTJAVA}"/bin/java
cp "${TESTSRC}"/RedefineMethodAddInvokeTarget_1.java \
RedefineMethodAddInvokeTarget.java
"${JAVAC}" ${TESTJAVACOPTS} ${TESTTOOLVMOPTS} -d . RedefineMethodAddInvokeTarget.java
mv RedefineMethodAddInvokeTarget.java RedefineMethodAddInvokeTarget_1.java
mv RedefineMethodAddInvokeTarget.class RedefineMethodAddInvokeTarget_1.class
cp "${TESTSRC}"/RedefineMethodAddInvokeTarget_2.java \
RedefineMethodAddInvokeTarget.java
"${JAVAC}" ${TESTJAVACOPTS} ${TESTTOOLVMOPTS} -d . RedefineMethodAddInvokeTarget.java
mv RedefineMethodAddInvokeTarget.java RedefineMethodAddInvokeTarget_2.java
mv RedefineMethodAddInvokeTarget.class RedefineMethodAddInvokeTarget_2.class
"${JAVA}" ${TESTVMOPTS} -javaagent:RedefineMethodAddInvokeAgent.jar \
-classpath "${TESTCLASSES}" RedefineMethodAddInvokeApp > output.log 2>&1
cat output.log
MESG="Exception"
grep "$MESG" output.log
result=$?
if [ "$result" = 0 ]; then
echo "FAIL: found '$MESG' in the test output"
result=1
else
echo "PASS: did NOT find '$MESG' in the test output"
result=0
fi
exit $result
|
<filename>src/main/java/org/ringingmaster/util/javafx/dialog/EditMode.java
package org.ringingmaster.util.javafx.dialog;
/**
* TODO Comments
*
* @author <NAME>
*/
public enum EditMode {
ADD("Add"),
EDIT("Edit");
private final String editText;
EditMode(String editText) {
this.editText = editText;
}
public String getEditText() {
return editText;
}
}
|
#! /usr/bin/env zsh
HZ_BANNER[version]="Show the current version of Hz."
hz-version() { --hz-version }
HZ_BANNER[commands]="Show all commands known to Hz."
hz-commands()
{
--hz-once!
hz-help
}
HZ_BANNER[install]="Install Hz configuration."
hz-install()
{
--hz-ruby-hz "Hz::Installer.run('${HZ_ROOT}', '${HZ_TARGET}', ${HZ_OPTION_FORCE:-false})"
}
if ${HZ_RUN_RELATIVE}; then
HZ_BANNER[bootstrap]="Bootstrap Hz."
HZ_HELP[bootstrap]=$'
Performs the initial configuration of Hz. Performs user data setup and installs
the Hz configuration like with `hz install`. This should only need to be run
once.
'
hz-bootstrap()
{
--hz-once!
builtin print "Bootstrapping Hz..."
--hz-install-highline
hz-user-setup
hz-install
}
fi
HZ_BANNER[user-setup]="Configure user data."
hz-user-setup()
{
--hz-ruby-hz "Hz::UserData.run('${HZ_ROOT}', '${HZ_TARGET}')"
}
HZ_BANNER[user-backup]="Create an archive of user data"
HZ_HELP[user-backup]=$'
Backs up the current user data configuration as "user-backup.tar.gz".
'
hz-user-backup()
{
if [[ -d user ]]; then
tar cfz user-backup.tar.gz user
else
builtin print "No user data to back up."
fi
}
|
import logging
import rlp
from eth_rpc_client import Client as EthereumRpcClient
from .node import NodeProcess
log = logging.getLogger('golem.ethereum')
class Client(EthereumRpcClient):
""" RPC interface client for Ethereum node."""
STATIC_NODES = ["enode://f1fbbeff7e9777a3a930f1e55a5486476845f799f7d603f71be7b00898df98f2dc2e81b854d2c774c3d266f1fa105d130d4a43bc58e700155c4565726ae6804e@94.23.17.170:30900"] # noqa
node = None
def __init__(self, datadir, nodes=None):
if not nodes:
nodes = Client.STATIC_NODES
if not Client.node:
Client.node = NodeProcess(nodes, datadir)
else:
assert Client.node.datadir == datadir, \
"Ethereum node's datadir cannot be changed"
if not Client.node.is_running():
Client.node.start(rpc=True)
super(Client, self).__init__(port=Client.node.rpcport)
@staticmethod
def _kill_node():
# FIXME: Keeping the node as a static object might not be the best.
if Client.node:
Client.node.stop()
Client.node = None
def get_peer_count(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_peerCount
"""
response = self.make_request("net_peerCount", [])
return int(response['result'], 16)
def is_syncing(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_syncing
"""
response = self.make_request("eth_syncing", [])
result = response['result']
return bool(result)
def get_transaction_count(self, address):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactioncount
"""
response = self.make_request("eth_getTransactionCount", [address, "pending"])
return int(response['result'], 16)
def send_raw_transaction(self, data):
response = self.make_request("eth_sendRawTransaction", [data])
return response['result']
def send(self, transaction):
return self.send_raw_transaction(rlp.encode(transaction).encode('hex')) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# Load data
df = pd.read_csv('car_data.csv')
X = df[['cylinders', 'displacement', 'horsepower', 'weight', 'acceleration']]
y = df['mpg']
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Train model
model = LinearRegression()
model.fit(X_train, y_train)
# Evaluate model
preds = model.predict(X_test)
rmse = mean_squared_error(y_test, preds)**0.5
print('RMSE:', rmse) |
#!/bin/sh
# compat.sh
#
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Purpose
#
# Test interoperbility with OpenSSL, GnuTLS as well as itself.
#
# Check each common ciphersuite, with each version, both ways (client/server),
# with and without client authentication.
set -u
# Limit the size of each log to 10 GiB, in case of failures with this script
# where it may output seemingly unlimited length error logs.
ulimit -f 20971520
# initialise counters
TESTS=0
FAILED=0
SKIPPED=0
SRVMEM=0
# default commands, can be overridden by the environment
: ${M_SRV:=../programs/ssl/ssl_server2}
: ${M_CLI:=../programs/ssl/ssl_client2}
: ${OPENSSL_CMD:=openssl} # OPENSSL would conflict with the build system
: ${GNUTLS_CLI:=gnutls-cli}
: ${GNUTLS_SERV:=gnutls-serv}
# do we have a recent enough GnuTLS?
if ( which $GNUTLS_CLI && which $GNUTLS_SERV ) >/dev/null 2>&1; then
G_VER="$( $GNUTLS_CLI --version | head -n1 )"
if echo "$G_VER" | grep '@VERSION@' > /dev/null; then # git version
PEER_GNUTLS=" GnuTLS"
else
eval $( echo $G_VER | sed 's/.* \([0-9]*\)\.\([0-9]\)*\.\([0-9]*\)$/MAJOR="\1" MINOR="\2" PATCH="\3"/' )
if [ $MAJOR -lt 3 -o \
\( $MAJOR -eq 3 -a $MINOR -lt 2 \) -o \
\( $MAJOR -eq 3 -a $MINOR -eq 2 -a $PATCH -lt 15 \) ]
then
PEER_GNUTLS=""
else
PEER_GNUTLS=" GnuTLS"
if [ $MINOR -lt 4 ]; then
GNUTLS_MINOR_LT_FOUR='x'
fi
fi
fi
else
PEER_GNUTLS=""
fi
# default values for options
# /!\ keep this synchronised with:
# - basic-build-test.sh
# - all.sh (multiple components)
MODES="tls12 dtls12"
VERIFIES="NO YES"
TYPES="ECDSA RSA PSK"
FILTER=""
# By default, exclude:
# - NULL: excluded from our default config + requires OpenSSL legacy
# - ARIA: requires OpenSSL >= 1.1.1
# - ChachaPoly: requires OpenSSL >= 1.1.0
EXCLUDE='NULL\|ARIA\|CHACHA20-POLY1305'
VERBOSE=""
MEMCHECK=0
PEERS="OpenSSL$PEER_GNUTLS mbedTLS"
# hidden option: skip DTLS with OpenSSL
# (travis CI has a version that doesn't work for us)
: ${OSSL_NO_DTLS:=0}
print_usage() {
echo "Usage: $0"
printf " -h|--help\tPrint this help.\n"
printf " -f|--filter\tOnly matching ciphersuites are tested (Default: '%s')\n" "$FILTER"
printf " -e|--exclude\tMatching ciphersuites are excluded (Default: '%s')\n" "$EXCLUDE"
printf " -m|--modes\tWhich modes to perform (Default: '%s')\n" "$MODES"
printf " -t|--types\tWhich key exchange type to perform (Default: '%s')\n" "$TYPES"
printf " -V|--verify\tWhich verification modes to perform (Default: '%s')\n" "$VERIFIES"
printf " -p|--peers\tWhich peers to use (Default: '%s')\n" "$PEERS"
printf " \tAlso available: GnuTLS (needs v3.2.15 or higher)\n"
printf " -M|--memcheck\tCheck memory leaks and errors.\n"
printf " -v|--verbose\tSet verbose output.\n"
}
get_options() {
while [ $# -gt 0 ]; do
case "$1" in
-f|--filter)
shift; FILTER=$1
;;
-e|--exclude)
shift; EXCLUDE=$1
;;
-m|--modes)
shift; MODES=$1
;;
-t|--types)
shift; TYPES=$1
;;
-V|--verify)
shift; VERIFIES=$1
;;
-p|--peers)
shift; PEERS=$1
;;
-v|--verbose)
VERBOSE=1
;;
-M|--memcheck)
MEMCHECK=1
;;
-h|--help)
print_usage
exit 0
;;
*)
echo "Unknown argument: '$1'"
print_usage
exit 1
;;
esac
shift
done
# sanitize some options (modes checked later)
VERIFIES="$( echo $VERIFIES | tr [a-z] [A-Z] )"
TYPES="$( echo $TYPES | tr [a-z] [A-Z] )"
}
log() {
if [ "X" != "X$VERBOSE" ]; then
echo ""
echo "$@"
fi
}
# is_dtls <mode>
is_dtls()
{
test "$1" = "dtls12"
}
# minor_ver <mode>
minor_ver()
{
case "$1" in
tls12|dtls12)
echo 3
;;
tls13)
echo 4
;;
*)
echo "error: invalid mode: $MODE" >&2
# exiting is no good here, typically called in a subshell
echo -1
esac
}
filter()
{
LIST="$1"
NEW_LIST=""
EXCLMODE="$EXCLUDE"
for i in $LIST;
do
NEW_LIST="$NEW_LIST $( echo "$i" | grep "$FILTER" | grep -v "$EXCLMODE" )"
done
# normalize whitespace
echo "$NEW_LIST" | sed -e 's/[[:space:]][[:space:]]*/ /g' -e 's/^ //' -e 's/ $//'
}
# OpenSSL 1.0.1h with -Verify wants a ClientCertificate message even for
# PSK ciphersuites with DTLS, which is incorrect, so disable them for now
check_openssl_server_bug()
{
if test "X$VERIFY" = "XYES" && is_dtls "$MODE" && \
echo "$1" | grep "^TLS-PSK" >/dev/null;
then
SKIP_NEXT="YES"
fi
}
filter_ciphersuites()
{
if [ "X" != "X$FILTER" -o "X" != "X$EXCLUDE" ];
then
# Ciphersuite for mbed TLS
M_CIPHERS=$( filter "$M_CIPHERS" )
# Ciphersuite for OpenSSL
O_CIPHERS=$( filter "$O_CIPHERS" )
# Ciphersuite for GnuTLS
G_CIPHERS=$( filter "$G_CIPHERS" )
fi
# For GnuTLS client -> mbed TLS server,
# we need to force IPv4 by connecting to 127.0.0.1 but then auth fails
if [ "X$VERIFY" = "XYES" ] && is_dtls "$MODE"; then
G_CIPHERS=""
fi
}
reset_ciphersuites()
{
M_CIPHERS=""
O_CIPHERS=""
G_CIPHERS=""
}
check_translation()
{
if [ $1 -ne 0 ]; then
echo "translate_ciphers.py failed with exit code $1" >&2
echo "$2" >&2
exit 1
fi
}
# Ciphersuites that can be used with all peers.
# Since we currently have three possible peers, each ciphersuite should appear
# three times: in each peer's list (with the name that this peer uses).
add_common_ciphersuites()
{
CIPHERS=""
case $TYPE in
"ECDSA")
CIPHERS="$CIPHERS \
TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA \
TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256 \
TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256 \
TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA \
TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384 \
TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384 \
TLS-ECDHE-ECDSA-WITH-NULL-SHA \
"
;;
"RSA")
CIPHERS="$CIPHERS \
TLS-DHE-RSA-WITH-AES-128-CBC-SHA \
TLS-DHE-RSA-WITH-AES-128-CBC-SHA256 \
TLS-DHE-RSA-WITH-AES-128-GCM-SHA256 \
TLS-DHE-RSA-WITH-AES-256-CBC-SHA \
TLS-DHE-RSA-WITH-AES-256-CBC-SHA256 \
TLS-DHE-RSA-WITH-AES-256-GCM-SHA384 \
TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA \
TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA \
TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA \
TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256 \
TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256 \
TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA \
TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384 \
TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384 \
TLS-ECDHE-RSA-WITH-NULL-SHA \
TLS-RSA-WITH-AES-128-CBC-SHA \
TLS-RSA-WITH-AES-128-CBC-SHA256 \
TLS-RSA-WITH-AES-128-GCM-SHA256 \
TLS-RSA-WITH-AES-256-CBC-SHA \
TLS-RSA-WITH-AES-256-CBC-SHA256 \
TLS-RSA-WITH-AES-256-GCM-SHA384 \
TLS-RSA-WITH-CAMELLIA-128-CBC-SHA \
TLS-RSA-WITH-CAMELLIA-256-CBC-SHA \
TLS-RSA-WITH-NULL-MD5 \
TLS-RSA-WITH-NULL-SHA \
TLS-RSA-WITH-NULL-SHA256 \
"
;;
"PSK")
CIPHERS="$CIPHERS \
TLS-PSK-WITH-AES-128-CBC-SHA \
TLS-PSK-WITH-AES-256-CBC-SHA \
"
;;
esac
M_CIPHERS="$M_CIPHERS $CIPHERS"
T=$(./scripts/translate_ciphers.py g $CIPHERS)
check_translation $? "$T"
G_CIPHERS="$G_CIPHERS $T"
T=$(./scripts/translate_ciphers.py o $CIPHERS)
check_translation $? "$T"
O_CIPHERS="$O_CIPHERS $T"
}
# Ciphersuites usable only with Mbed TLS and OpenSSL
# A list of ciphersuites in the Mbed TLS convention is compiled and
# appended to the list of Mbed TLS ciphersuites $M_CIPHERS. The same list
# is translated to the OpenSSL naming convention and appended to the list of
# OpenSSL ciphersuites $O_CIPHERS.
#
# NOTE: for some reason RSA-PSK doesn't work with OpenSSL,
# so RSA-PSK ciphersuites need to go in other sections, see
# https://github.com/Mbed-TLS/mbedtls/issues/1419
#
# ChachaPoly suites are here rather than in "common", as they were added in
# GnuTLS in 3.5.0 and the CI only has 3.4.x so far.
add_openssl_ciphersuites()
{
CIPHERS=""
case $TYPE in
"ECDSA")
CIPHERS="$CIPHERS \
TLS-ECDH-ECDSA-WITH-AES-128-CBC-SHA \
TLS-ECDH-ECDSA-WITH-AES-128-CBC-SHA256 \
TLS-ECDH-ECDSA-WITH-AES-128-GCM-SHA256 \
TLS-ECDH-ECDSA-WITH-AES-256-CBC-SHA \
TLS-ECDH-ECDSA-WITH-AES-256-CBC-SHA384 \
TLS-ECDH-ECDSA-WITH-AES-256-GCM-SHA384 \
TLS-ECDH-ECDSA-WITH-NULL-SHA \
TLS-ECDHE-ECDSA-WITH-ARIA-128-GCM-SHA256 \
TLS-ECDHE-ECDSA-WITH-ARIA-256-GCM-SHA384 \
TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256 \
"
;;
"RSA")
CIPHERS="$CIPHERS \
TLS-DHE-RSA-WITH-ARIA-128-GCM-SHA256 \
TLS-DHE-RSA-WITH-ARIA-256-GCM-SHA384 \
TLS-DHE-RSA-WITH-CHACHA20-POLY1305-SHA256 \
TLS-ECDHE-RSA-WITH-ARIA-128-GCM-SHA256 \
TLS-ECDHE-RSA-WITH-ARIA-256-GCM-SHA384 \
TLS-ECDHE-RSA-WITH-CHACHA20-POLY1305-SHA256 \
TLS-RSA-WITH-ARIA-128-GCM-SHA256 \
TLS-RSA-WITH-ARIA-256-GCM-SHA384 \
"
;;
"PSK")
CIPHERS="$CIPHERS \
TLS-DHE-PSK-WITH-ARIA-128-GCM-SHA256 \
TLS-DHE-PSK-WITH-ARIA-256-GCM-SHA384 \
TLS-DHE-PSK-WITH-CHACHA20-POLY1305-SHA256 \
TLS-ECDHE-PSK-WITH-CHACHA20-POLY1305-SHA256 \
TLS-PSK-WITH-ARIA-128-GCM-SHA256 \
TLS-PSK-WITH-ARIA-256-GCM-SHA384 \
TLS-PSK-WITH-CHACHA20-POLY1305-SHA256 \
"
;;
esac
M_CIPHERS="$M_CIPHERS $CIPHERS"
T=$(./scripts/translate_ciphers.py o $CIPHERS)
check_translation $? "$T"
O_CIPHERS="$O_CIPHERS $T"
}
# Ciphersuites usable only with Mbed TLS and GnuTLS
# A list of ciphersuites in the Mbed TLS convention is compiled and
# appended to the list of Mbed TLS ciphersuites $M_CIPHERS. The same list
# is translated to the GnuTLS naming convention and appended to the list of
# GnuTLS ciphersuites $G_CIPHERS.
add_gnutls_ciphersuites()
{
CIPHERS=""
case $TYPE in
"ECDSA")
CIPHERS="$CIPHERS \
TLS-ECDHE-ECDSA-WITH-AES-128-CCM \
TLS-ECDHE-ECDSA-WITH-AES-128-CCM-8 \
TLS-ECDHE-ECDSA-WITH-AES-256-CCM \
TLS-ECDHE-ECDSA-WITH-AES-256-CCM-8 \
TLS-ECDHE-ECDSA-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-ECDHE-ECDSA-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-ECDHE-ECDSA-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-ECDHE-ECDSA-WITH-CAMELLIA-256-GCM-SHA384 \
"
;;
"RSA")
CIPHERS="$CIPHERS \
TLS-DHE-RSA-WITH-AES-128-CCM \
TLS-DHE-RSA-WITH-AES-128-CCM-8 \
TLS-DHE-RSA-WITH-AES-256-CCM \
TLS-DHE-RSA-WITH-AES-256-CCM-8 \
TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-DHE-RSA-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256 \
TLS-DHE-RSA-WITH-CAMELLIA-256-GCM-SHA384 \
TLS-ECDHE-RSA-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-ECDHE-RSA-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-ECDHE-RSA-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-ECDHE-RSA-WITH-CAMELLIA-256-GCM-SHA384 \
TLS-RSA-WITH-AES-128-CCM \
TLS-RSA-WITH-AES-128-CCM-8 \
TLS-RSA-WITH-AES-256-CCM \
TLS-RSA-WITH-AES-256-CCM-8 \
TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-RSA-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256 \
TLS-RSA-WITH-CAMELLIA-256-GCM-SHA384 \
"
;;
"PSK")
CIPHERS="$CIPHERS \
TLS-DHE-PSK-WITH-AES-128-CBC-SHA \
TLS-DHE-PSK-WITH-AES-128-CBC-SHA256 \
TLS-DHE-PSK-WITH-AES-128-CCM \
TLS-DHE-PSK-WITH-AES-128-CCM-8 \
TLS-DHE-PSK-WITH-AES-128-GCM-SHA256 \
TLS-DHE-PSK-WITH-AES-256-CBC-SHA \
TLS-DHE-PSK-WITH-AES-256-CBC-SHA384 \
TLS-DHE-PSK-WITH-AES-256-CCM \
TLS-DHE-PSK-WITH-AES-256-CCM-8 \
TLS-DHE-PSK-WITH-AES-256-GCM-SHA384 \
TLS-DHE-PSK-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-DHE-PSK-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-DHE-PSK-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-DHE-PSK-WITH-CAMELLIA-256-GCM-SHA384 \
TLS-DHE-PSK-WITH-NULL-SHA256 \
TLS-DHE-PSK-WITH-NULL-SHA384 \
TLS-ECDHE-PSK-WITH-AES-128-CBC-SHA \
TLS-ECDHE-PSK-WITH-AES-128-CBC-SHA256 \
TLS-ECDHE-PSK-WITH-AES-256-CBC-SHA \
TLS-ECDHE-PSK-WITH-AES-256-CBC-SHA384 \
TLS-ECDHE-PSK-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-ECDHE-PSK-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-ECDHE-PSK-WITH-NULL-SHA256 \
TLS-ECDHE-PSK-WITH-NULL-SHA384 \
TLS-PSK-WITH-AES-128-CBC-SHA256 \
TLS-PSK-WITH-AES-128-CCM \
TLS-PSK-WITH-AES-128-CCM-8 \
TLS-PSK-WITH-AES-128-GCM-SHA256 \
TLS-PSK-WITH-AES-256-CBC-SHA384 \
TLS-PSK-WITH-AES-256-CCM \
TLS-PSK-WITH-AES-256-CCM-8 \
TLS-PSK-WITH-AES-256-GCM-SHA384 \
TLS-PSK-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-PSK-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-PSK-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-PSK-WITH-CAMELLIA-256-GCM-SHA384 \
TLS-PSK-WITH-NULL-SHA256 \
TLS-PSK-WITH-NULL-SHA384 \
TLS-RSA-PSK-WITH-AES-128-CBC-SHA \
TLS-RSA-PSK-WITH-AES-128-CBC-SHA256 \
TLS-RSA-PSK-WITH-AES-128-GCM-SHA256 \
TLS-RSA-PSK-WITH-AES-256-CBC-SHA \
TLS-RSA-PSK-WITH-AES-256-CBC-SHA384 \
TLS-RSA-PSK-WITH-AES-256-GCM-SHA384 \
TLS-RSA-PSK-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-RSA-PSK-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-RSA-PSK-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-RSA-PSK-WITH-CAMELLIA-256-GCM-SHA384 \
TLS-RSA-PSK-WITH-NULL-SHA256 \
TLS-RSA-PSK-WITH-NULL-SHA384 \
"
;;
esac
M_CIPHERS="$M_CIPHERS $CIPHERS"
T=$(./scripts/translate_ciphers.py g $CIPHERS)
check_translation $? "$T"
G_CIPHERS="$G_CIPHERS $T"
}
# Ciphersuites usable only with Mbed TLS (not currently supported by another
# peer usable in this script). This provide only very rudimentaty testing, as
# this is not interop testing, but it's better than nothing.
add_mbedtls_ciphersuites()
{
case $TYPE in
"ECDSA")
M_CIPHERS="$M_CIPHERS \
TLS-ECDH-ECDSA-WITH-ARIA-128-CBC-SHA256 \
TLS-ECDH-ECDSA-WITH-ARIA-128-GCM-SHA256 \
TLS-ECDH-ECDSA-WITH-ARIA-256-CBC-SHA384 \
TLS-ECDH-ECDSA-WITH-ARIA-256-GCM-SHA384 \
TLS-ECDH-ECDSA-WITH-CAMELLIA-128-CBC-SHA256 \
TLS-ECDH-ECDSA-WITH-CAMELLIA-128-GCM-SHA256 \
TLS-ECDH-ECDSA-WITH-CAMELLIA-256-CBC-SHA384 \
TLS-ECDH-ECDSA-WITH-CAMELLIA-256-GCM-SHA384 \
TLS-ECDHE-ECDSA-WITH-ARIA-128-CBC-SHA256 \
TLS-ECDHE-ECDSA-WITH-ARIA-256-CBC-SHA384 \
"
;;
"RSA")
M_CIPHERS="$M_CIPHERS \
TLS-DHE-RSA-WITH-ARIA-128-CBC-SHA256 \
TLS-DHE-RSA-WITH-ARIA-256-CBC-SHA384 \
TLS-ECDHE-RSA-WITH-ARIA-128-CBC-SHA256 \
TLS-ECDHE-RSA-WITH-ARIA-256-CBC-SHA384 \
TLS-RSA-WITH-ARIA-128-CBC-SHA256 \
TLS-RSA-WITH-ARIA-256-CBC-SHA384 \
"
;;
"PSK")
# *PSK-NULL-SHA suites supported by GnuTLS 3.3.5 but not 3.2.15
M_CIPHERS="$M_CIPHERS \
TLS-DHE-PSK-WITH-ARIA-128-CBC-SHA256 \
TLS-DHE-PSK-WITH-ARIA-256-CBC-SHA384 \
TLS-DHE-PSK-WITH-NULL-SHA \
TLS-ECDHE-PSK-WITH-ARIA-128-CBC-SHA256 \
TLS-ECDHE-PSK-WITH-ARIA-256-CBC-SHA384 \
TLS-ECDHE-PSK-WITH-NULL-SHA \
TLS-PSK-WITH-ARIA-128-CBC-SHA256 \
TLS-PSK-WITH-ARIA-256-CBC-SHA384 \
TLS-PSK-WITH-NULL-SHA \
TLS-RSA-PSK-WITH-ARIA-128-CBC-SHA256 \
TLS-RSA-PSK-WITH-ARIA-128-GCM-SHA256 \
TLS-RSA-PSK-WITH-ARIA-256-CBC-SHA384 \
TLS-RSA-PSK-WITH-ARIA-256-GCM-SHA384 \
TLS-RSA-PSK-WITH-CHACHA20-POLY1305-SHA256 \
TLS-RSA-PSK-WITH-NULL-SHA \
"
;;
esac
}
setup_arguments()
{
O_MODE=""
G_MODE=""
case "$MODE" in
"tls12")
O_MODE="tls1_2"
G_PRIO_MODE="+VERS-TLS1.2"
;;
"tls13")
G_PRIO_MODE="+VERS-TLS1.3"
O_MODE="tls1_3"
OPENSSL_CMD=${OPENSSL_NEXT}
GNUTLS_CLI=${GNUTLS_NEXT_CLI}
GNUTLS_SERV=${GNUTLS_NEXT_SERV}
;;
"dtls12")
O_MODE="dtls1_2"
G_PRIO_MODE="+VERS-DTLS1.2"
G_MODE="-u"
;;
*)
echo "error: invalid mode: $MODE" >&2
exit 1;
esac
# GnuTLS < 3.4 will choke if we try to allow CCM-8
if [ -z "${GNUTLS_MINOR_LT_FOUR-}" ]; then
G_PRIO_CCM="+AES-256-CCM-8:+AES-128-CCM-8:"
else
G_PRIO_CCM=""
fi
if [ `minor_ver "$MODE"` -ge 4 ]
then
O_SERVER_ARGS="-accept $PORT -ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256:TLS_AES_128_CCM_8_SHA256 --$O_MODE"
M_SERVER_ARGS="server_port=$PORT server_addr=0.0.0.0 force_version=$MODE"
G_SERVER_PRIO="NORMAL:${G_PRIO_CCM}${G_PRIO_MODE}"
else
M_SERVER_ARGS="server_port=$PORT server_addr=0.0.0.0 force_version=$MODE"
O_SERVER_ARGS="-accept $PORT -cipher NULL,ALL -$O_MODE"
G_SERVER_PRIO="NORMAL:${G_PRIO_CCM}+NULL:+MD5:+PSK:+DHE-PSK:+ECDHE-PSK:+SHA256:+SHA384:+RSA-PSK:-VERS-TLS-ALL:$G_PRIO_MODE"
fi
G_SERVER_ARGS="-p $PORT --http $G_MODE"
# The default prime for `openssl s_server` depends on the version:
# * OpenSSL <= 1.0.2a: 512-bit
# * OpenSSL 1.0.2b to 1.1.1b: 1024-bit
# * OpenSSL >= 1.1.1c: 2048-bit
# Mbed TLS wants >=1024, so force that for older versions. Don't force
# it for newer versions, which reject a 1024-bit prime. Indifferently
# force it or not for intermediate versions.
case $($OPENSSL_CMD version) in
"OpenSSL 1.0"*)
O_SERVER_ARGS="$O_SERVER_ARGS -dhparam data_files/dhparams.pem"
;;
esac
# with OpenSSL 1.0.1h, -www, -WWW and -HTTP break DTLS handshakes
if is_dtls "$MODE"; then
O_SERVER_ARGS="$O_SERVER_ARGS"
else
O_SERVER_ARGS="$O_SERVER_ARGS -www"
fi
M_CLIENT_ARGS="server_port=$PORT server_addr=127.0.0.1 force_version=$MODE"
O_CLIENT_ARGS="-connect localhost:$PORT -$O_MODE"
G_CLIENT_ARGS="-p $PORT --debug 3 $G_MODE"
G_CLIENT_PRIO="NONE:$G_PRIO_MODE:+COMP-NULL:+CURVE-ALL:+SIGN-ALL"
if [ "X$VERIFY" = "XYES" ];
then
M_SERVER_ARGS="$M_SERVER_ARGS ca_file=data_files/test-ca_cat12.crt auth_mode=required"
O_SERVER_ARGS="$O_SERVER_ARGS -CAfile data_files/test-ca_cat12.crt -Verify 10"
G_SERVER_ARGS="$G_SERVER_ARGS --x509cafile data_files/test-ca_cat12.crt --require-client-cert"
M_CLIENT_ARGS="$M_CLIENT_ARGS ca_file=data_files/test-ca_cat12.crt auth_mode=required"
O_CLIENT_ARGS="$O_CLIENT_ARGS -CAfile data_files/test-ca_cat12.crt -verify 10"
G_CLIENT_ARGS="$G_CLIENT_ARGS --x509cafile data_files/test-ca_cat12.crt"
else
# don't request a client cert at all
M_SERVER_ARGS="$M_SERVER_ARGS ca_file=none auth_mode=none"
G_SERVER_ARGS="$G_SERVER_ARGS --disable-client-cert"
M_CLIENT_ARGS="$M_CLIENT_ARGS ca_file=none auth_mode=none"
O_CLIENT_ARGS="$O_CLIENT_ARGS"
G_CLIENT_ARGS="$G_CLIENT_ARGS --insecure"
fi
case $TYPE in
"ECDSA")
M_SERVER_ARGS="$M_SERVER_ARGS crt_file=data_files/server5.crt key_file=data_files/server5.key"
O_SERVER_ARGS="$O_SERVER_ARGS -cert data_files/server5.crt -key data_files/server5.key"
G_SERVER_ARGS="$G_SERVER_ARGS --x509certfile data_files/server5.crt --x509keyfile data_files/server5.key"
if [ "X$VERIFY" = "XYES" ]; then
M_CLIENT_ARGS="$M_CLIENT_ARGS crt_file=data_files/server6.crt key_file=data_files/server6.key"
O_CLIENT_ARGS="$O_CLIENT_ARGS -cert data_files/server6.crt -key data_files/server6.key"
G_CLIENT_ARGS="$G_CLIENT_ARGS --x509certfile data_files/server6.crt --x509keyfile data_files/server6.key"
else
M_CLIENT_ARGS="$M_CLIENT_ARGS crt_file=none key_file=none"
fi
;;
"RSA")
M_SERVER_ARGS="$M_SERVER_ARGS crt_file=data_files/server2-sha256.crt key_file=data_files/server2.key"
O_SERVER_ARGS="$O_SERVER_ARGS -cert data_files/server2-sha256.crt -key data_files/server2.key"
G_SERVER_ARGS="$G_SERVER_ARGS --x509certfile data_files/server2-sha256.crt --x509keyfile data_files/server2.key"
if [ "X$VERIFY" = "XYES" ]; then
M_CLIENT_ARGS="$M_CLIENT_ARGS crt_file=data_files/cert_sha256.crt key_file=data_files/server1.key"
O_CLIENT_ARGS="$O_CLIENT_ARGS -cert data_files/cert_sha256.crt -key data_files/server1.key"
G_CLIENT_ARGS="$G_CLIENT_ARGS --x509certfile data_files/cert_sha256.crt --x509keyfile data_files/server1.key"
else
M_CLIENT_ARGS="$M_CLIENT_ARGS crt_file=none key_file=none"
fi
;;
"PSK")
# give RSA-PSK-capable server a RSA cert
# (should be a separate type, but harder to close with openssl)
M_SERVER_ARGS="$M_SERVER_ARGS psk=6162636465666768696a6b6c6d6e6f70 ca_file=none crt_file=data_files/server2-sha256.crt key_file=data_files/server2.key"
O_SERVER_ARGS="$O_SERVER_ARGS -psk 6162636465666768696a6b6c6d6e6f70 -nocert"
G_SERVER_ARGS="$G_SERVER_ARGS --x509certfile data_files/server2-sha256.crt --x509keyfile data_files/server2.key --pskpasswd data_files/passwd.psk"
M_CLIENT_ARGS="$M_CLIENT_ARGS psk=6162636465666768696a6b6c6d6e6f70 crt_file=none key_file=none"
O_CLIENT_ARGS="$O_CLIENT_ARGS -psk 6162636465666768696a6b6c6d6e6f70"
G_CLIENT_ARGS="$G_CLIENT_ARGS --pskusername Client_identity --pskkey=6162636465666768696a6b6c6d6e6f70"
;;
esac
}
# is_mbedtls <cmd_line>
is_mbedtls() {
echo "$1" | grep 'ssl_server2\|ssl_client2' > /dev/null
}
# has_mem_err <log_file_name>
has_mem_err() {
if ( grep -F 'All heap blocks were freed -- no leaks are possible' "$1" &&
grep -F 'ERROR SUMMARY: 0 errors from 0 contexts' "$1" ) > /dev/null
then
return 1 # false: does not have errors
else
return 0 # true: has errors
fi
}
# Wait for process $2 to be listening on port $1
if type lsof >/dev/null 2>/dev/null; then
wait_server_start() {
START_TIME=$(date +%s)
if is_dtls "$MODE"; then
proto=UDP
else
proto=TCP
fi
while ! lsof -a -n -b -i "$proto:$1" -p "$2" >/dev/null 2>/dev/null; do
if [ $(( $(date +%s) - $START_TIME )) -gt $DOG_DELAY ]; then
echo "SERVERSTART TIMEOUT"
echo "SERVERSTART TIMEOUT" >> $SRV_OUT
break
fi
# Linux and *BSD support decimal arguments to sleep. On other
# OSes this may be a tight loop.
sleep 0.1 2>/dev/null || true
done
}
else
echo "Warning: lsof not available, wait_server_start = sleep"
wait_server_start() {
sleep 2
}
fi
# start_server <name>
# also saves name and command
start_server() {
case $1 in
[Oo]pen*)
SERVER_CMD="$OPENSSL_CMD s_server $O_SERVER_ARGS"
;;
[Gg]nu*)
SERVER_CMD="$GNUTLS_SERV $G_SERVER_ARGS --priority $G_SERVER_PRIO"
;;
mbed*)
SERVER_CMD="$M_SRV $M_SERVER_ARGS"
if [ "$MEMCHECK" -gt 0 ]; then
SERVER_CMD="valgrind --leak-check=full $SERVER_CMD"
fi
;;
*)
echo "error: invalid server name: $1" >&2
exit 1
;;
esac
SERVER_NAME=$1
log "$SERVER_CMD"
echo "$SERVER_CMD" > $SRV_OUT
# for servers without -www or equivalent
while :; do echo bla; sleep 1; done | $SERVER_CMD >> $SRV_OUT 2>&1 &
PROCESS_ID=$!
wait_server_start "$PORT" "$PROCESS_ID"
}
# terminate the running server
stop_server() {
kill $PROCESS_ID 2>/dev/null
wait $PROCESS_ID 2>/dev/null
if [ "$MEMCHECK" -gt 0 ]; then
if is_mbedtls "$SERVER_CMD" && has_mem_err $SRV_OUT; then
echo " ! Server had memory errors"
SRVMEM=$(( $SRVMEM + 1 ))
return
fi
fi
rm -f $SRV_OUT
}
# kill the running server (used when killed by signal)
cleanup() {
rm -f $SRV_OUT $CLI_OUT
kill $PROCESS_ID >/dev/null 2>&1
kill $WATCHDOG_PID >/dev/null 2>&1
exit 1
}
# wait for client to terminate and set EXIT
# must be called right after starting the client
wait_client_done() {
CLI_PID=$!
( sleep "$DOG_DELAY"; echo "TIMEOUT" >> $CLI_OUT; kill $CLI_PID ) &
WATCHDOG_PID=$!
wait $CLI_PID
EXIT=$?
kill $WATCHDOG_PID
wait $WATCHDOG_PID
echo "EXIT: $EXIT" >> $CLI_OUT
}
# run_client <name> <cipher>
run_client() {
# announce what we're going to do
TESTS=$(( $TESTS + 1 ))
VERIF=$(echo $VERIFY | tr '[:upper:]' '[:lower:]')
TITLE="`echo $1 | head -c1`->`echo $SERVER_NAME | head -c1`"
TITLE="$TITLE $MODE,$VERIF $2"
printf "%s " "$TITLE"
LEN=$(( 72 - `echo "$TITLE" | wc -c` ))
for i in `seq 1 $LEN`; do printf '.'; done; printf ' '
# should we skip?
if [ "X$SKIP_NEXT" = "XYES" ]; then
SKIP_NEXT="NO"
echo "SKIP"
SKIPPED=$(( $SKIPPED + 1 ))
return
fi
# run the command and interpret result
case $1 in
[Oo]pen*)
if [ `minor_ver "$MODE"` -ge 4 ]
then
CLIENT_CMD="$OPENSSL_CMD s_client $O_CLIENT_ARGS -ciphersuites $2"
else
CLIENT_CMD="$OPENSSL_CMD s_client $O_CLIENT_ARGS -cipher $2"
fi
log "$CLIENT_CMD"
echo "$CLIENT_CMD" > $CLI_OUT
printf 'GET HTTP/1.0\r\n\r\n' | $CLIENT_CMD >> $CLI_OUT 2>&1 &
wait_client_done
if [ $EXIT -eq 0 ]; then
RESULT=0
else
# If the cipher isn't supported...
if grep 'Cipher is (NONE)' $CLI_OUT >/dev/null; then
RESULT=1
else
RESULT=2
fi
fi
;;
[Gg]nu*)
# need to force IPv4 with UDP, but keep localhost for auth
if is_dtls "$MODE"; then
G_HOST="127.0.0.1"
else
G_HOST="localhost"
fi
if [ `minor_ver "$MODE"` -ge 4 ]
then
G_CLIENT_PRIO="NONE:${2}:+GROUP-SECP256R1:+GROUP-SECP384R1:+CTYPE-ALL:+ECDHE-ECDSA:+CIPHER-ALL:+MAC-ALL:-SHA1:-AES-128-CBC:+SIGN-ECDSA-SECP256R1-SHA256:+SIGN-ECDSA-SECP384R1-SHA384:+ECDHE-ECDSA:${G_PRIO_MODE}"
CLIENT_CMD="$GNUTLS_CLI $G_CLIENT_ARGS --priority $G_CLIENT_PRIO $G_HOST"
else
CLIENT_CMD="$GNUTLS_CLI $G_CLIENT_ARGS --priority $G_PRIO_MODE:$2 $G_HOST"
fi
log "$CLIENT_CMD"
echo "$CLIENT_CMD" > $CLI_OUT
printf 'GET HTTP/1.0\r\n\r\n' | $CLIENT_CMD >> $CLI_OUT 2>&1 &
wait_client_done
if [ $EXIT -eq 0 ]; then
RESULT=0
else
RESULT=2
# interpret early failure, with a handshake_failure alert
# before the server hello, as "no ciphersuite in common"
if grep -F 'Received alert [40]: Handshake failed' $CLI_OUT; then
if grep -i 'SERVER HELLO .* was received' $CLI_OUT; then :
else
RESULT=1
fi
fi >/dev/null
fi
;;
mbed*)
CLIENT_CMD="$M_CLI $M_CLIENT_ARGS force_ciphersuite=$2"
if [ "$MEMCHECK" -gt 0 ]; then
CLIENT_CMD="valgrind --leak-check=full $CLIENT_CMD"
fi
log "$CLIENT_CMD"
echo "$CLIENT_CMD" > $CLI_OUT
$CLIENT_CMD >> $CLI_OUT 2>&1 &
wait_client_done
case $EXIT in
# Success
"0") RESULT=0 ;;
# Ciphersuite not supported
"2") RESULT=1 ;;
# Error
*) RESULT=2 ;;
esac
if [ "$MEMCHECK" -gt 0 ]; then
if is_mbedtls "$CLIENT_CMD" && has_mem_err $CLI_OUT; then
RESULT=2
fi
fi
;;
*)
echo "error: invalid client name: $1" >&2
exit 1
;;
esac
echo "EXIT: $EXIT" >> $CLI_OUT
# report and count result
case $RESULT in
"0")
echo PASS
;;
"1")
echo SKIP
SKIPPED=$(( $SKIPPED + 1 ))
;;
"2")
echo FAIL
cp $SRV_OUT c-srv-${TESTS}.log
cp $CLI_OUT c-cli-${TESTS}.log
echo " ! outputs saved to c-srv-${TESTS}.log, c-cli-${TESTS}.log"
if [ "${LOG_FAILURE_ON_STDOUT:-0}" != 0 ]; then
echo " ! server output:"
cat c-srv-${TESTS}.log
echo " ! ==================================================="
echo " ! client output:"
cat c-cli-${TESTS}.log
fi
FAILED=$(( $FAILED + 1 ))
;;
esac
rm -f $CLI_OUT
}
#
# MAIN
#
if cd $( dirname $0 ); then :; else
echo "cd $( dirname $0 ) failed" >&2
exit 1
fi
get_options "$@"
# sanity checks, avoid an avalanche of errors
if [ ! -x "$M_SRV" ]; then
echo "Command '$M_SRV' is not an executable file" >&2
exit 1
fi
if [ ! -x "$M_CLI" ]; then
echo "Command '$M_CLI' is not an executable file" >&2
exit 1
fi
if echo "$PEERS" | grep -i openssl > /dev/null; then
if which "$OPENSSL_CMD" >/dev/null 2>&1; then :; else
echo "Command '$OPENSSL_CMD' not found" >&2
exit 1
fi
fi
if echo "$PEERS" | grep -i gnutls > /dev/null; then
for CMD in "$GNUTLS_CLI" "$GNUTLS_SERV"; do
if which "$CMD" >/dev/null 2>&1; then :; else
echo "Command '$CMD' not found" >&2
exit 1
fi
done
fi
for PEER in $PEERS; do
case "$PEER" in
mbed*|[Oo]pen*|[Gg]nu*)
;;
*)
echo "Unknown peers: $PEER" >&2
exit 1
esac
done
# Pick a "unique" port in the range 10000-19999.
PORT="0000$$"
PORT="1$(echo $PORT | tail -c 5)"
# Also pick a unique name for intermediate files
SRV_OUT="srv_out.$$"
CLI_OUT="cli_out.$$"
# client timeout delay: be more patient with valgrind
if [ "$MEMCHECK" -gt 0 ]; then
DOG_DELAY=30
else
DOG_DELAY=10
fi
SKIP_NEXT="NO"
trap cleanup INT TERM HUP
for VERIFY in $VERIFIES; do
for MODE in $MODES; do
for TYPE in $TYPES; do
for PEER in $PEERS; do
setup_arguments
case "$PEER" in
[Oo]pen*)
if test "$OSSL_NO_DTLS" -gt 0 && is_dtls "$MODE"; then
continue;
fi
# OpenSSL <1.0.2 doesn't support DTLS 1.2. Check if OpenSSL
# supports $O_MODE from the s_server help. (The s_client
# help isn't accurate as of 1.0.2g: it supports DTLS 1.2
# but doesn't list it. But the s_server help seems to be
# accurate.)
if ! $OPENSSL_CMD s_server -help 2>&1 | grep -q "^ *-$O_MODE "; then
continue;
fi
reset_ciphersuites
if [ `minor_ver "$MODE"` -ge 4 ]
then
M_CIPHERS="$M_CIPHERS \
TLS1-3-AES-128-GCM-SHA256 \
TLS1-3-AES-256-GCM-SHA384 \
TLS1-3-AES-128-CCM-SHA256 \
TLS1-3-AES-128-CCM-8-SHA256 \
TLS1-3-CHACHA20-POLY1305-SHA256 \
"
O_CIPHERS="$O_CIPHERS \
TLS_AES_128_GCM_SHA256 \
TLS_AES_256_GCM_SHA384 \
TLS_AES_128_CCM_SHA256 \
TLS_AES_128_CCM_8_SHA256 \
TLS_CHACHA20_POLY1305_SHA256 \
"
else
add_common_ciphersuites
add_openssl_ciphersuites
fi
filter_ciphersuites
if [ "X" != "X$M_CIPHERS" ]; then
start_server "OpenSSL"
for i in $M_CIPHERS; do
check_openssl_server_bug $i
run_client mbedTLS $i
done
stop_server
fi
if [ "X" != "X$O_CIPHERS" ]; then
start_server "mbedTLS"
for i in $O_CIPHERS; do
run_client OpenSSL $i
done
stop_server
fi
;;
[Gg]nu*)
reset_ciphersuites
if [ `minor_ver "$MODE"` -ge 4 ]
then
M_CIPHERS="$M_CIPHERS \
TLS1-3-AES-128-GCM-SHA256 \
TLS1-3-AES-256-GCM-SHA384 \
TLS1-3-AES-128-CCM-SHA256 \
TLS1-3-AES-128-CCM-8-SHA256 \
TLS1-3-CHACHA20-POLY1305-SHA256 \
"
G_CIPHERS="$G_CIPHERS \
+AES-128-GCM:+SHA256 \
+AES-256-GCM:+SHA384 \
+AES-128-CCM:+SHA256 \
+AES-128-CCM-8:+SHA256 \
+CHACHA20-POLY1305:+SHA256 \
"
else
add_common_ciphersuites
add_gnutls_ciphersuites
fi
filter_ciphersuites
if [ "X" != "X$M_CIPHERS" ]; then
start_server "GnuTLS"
for i in $M_CIPHERS; do
run_client mbedTLS $i
done
stop_server
fi
if [ "X" != "X$G_CIPHERS" ]; then
start_server "mbedTLS"
for i in $G_CIPHERS; do
run_client GnuTLS $i
done
stop_server
fi
;;
mbed*)
reset_ciphersuites
if [ `minor_ver "$MODE"` -ge 4 ]
then
M_CIPHERS="$M_CIPHERS \
TLS1-3-AES-128-GCM-SHA256 \
TLS1-3-AES-256-GCM-SHA384 \
TLS1-3-AES-128-CCM-SHA256 \
TLS1-3-AES-128-CCM-8-SHA256 \
TLS1-3-CHACHA20-POLY1305-SHA256 \
"
O_CIPHERS="$O_CIPHERS \
TLS_AES_128_GCM_SHA256 \
TLS_AES_256_GCM_SHA384 \
TLS_AES_128_CCM_SHA256 \
TLS_AES_128_CCM_8_SHA256 \
TLS_CHACHA20_POLY1305_SHA256 \
"
else
add_common_ciphersuites
add_openssl_ciphersuites
add_gnutls_ciphersuites
add_mbedtls_ciphersuites
fi
filter_ciphersuites
if [ "X" != "X$M_CIPHERS" ]; then
start_server "mbedTLS"
for i in $M_CIPHERS; do
run_client mbedTLS $i
done
stop_server
fi
;;
*)
echo "Unknown peer: $PEER" >&2
exit 1
;;
esac
done
done
done
done
echo "------------------------------------------------------------------------"
if [ $FAILED -ne 0 -o $SRVMEM -ne 0 ];
then
printf "FAILED"
else
printf "PASSED"
fi
if [ "$MEMCHECK" -gt 0 ]; then
MEMREPORT=", $SRVMEM server memory errors"
else
MEMREPORT=""
fi
PASSED=$(( $TESTS - $FAILED ))
echo " ($PASSED / $TESTS tests ($SKIPPED skipped$MEMREPORT))"
FAILED=$(( $FAILED + $SRVMEM ))
exit $FAILED
|
#!/bin/bash
echo "Waiting nexus to launch on 19999..."
while ! curl -s http://localhost:19999 > /dev/null; do
sleep 1 # wait for 1 second before check again
done
echo "Nexus launched"
|
import { createRouter, createWebHistory } from 'vue-router'
import Home from '@/Components/Views/Home.vue'
import Login from '@/Components/Views/Login.vue'
import Register from '@/Components/Views/Register.vue'
import DahboardLayout from '@/Components/Layouts/Layout.vue'
import Dashboard from '@/Components/Views/Dashboard.vue'
import Reset from '@/Components/Views/ResetPassword.vue'
import Forgot from '@/Components/Views/ForgotPassword.vue'
import UsersIndex from '@/Components/Views/Users/Index.vue'
const router = createRouter({
history: createWebHistory(),
routes: [
// {
// path: '/',
// name: 'home',
// component: Home
// },
{
name:"login",
path:"/login",
component:Login,
meta:{
middleware:"guest",
title:`Login`
}
},
{
name:"register",
path:"/register",
component:Register,
meta:{
middleware:"guest",
title:`Register`
}
},
{
name:"forgot",
path:"/password/forgot",
component:Forgot,
meta:{
middleware:"guest",
title:`Restablecer Contraseña`
}
},
{
name:"reset",
path:"/password/reset/:token",
component:Reset,
meta:{
middleware:"guest",
title:`Restablecer Contraseña`
}
},
{
name:"dashboard",
path:"/",
component:Dashboard,
meta:{
middleware:"auth",
title:'Dashboard'
},
},
{
name:"usersIndex",
path:"/users",
component:UsersIndex,
meta:{
middleware:"auth",
title:'Usuarios'
},
}
],
});
export default router |
#!/usr/bin/env bash
if [[ ! -f main.go ]]; then
echo "invalid dir"
exit 1
fi
if [[ -f logdog ]]; then
echo "logdog exists, remove"
rm logdog
fi
if [[ -f logdog.tar.gz ]]; then
echo "logdog.tar.gz exists, remove"
rm logdog.tar.gz
fi
go build -o logdog ./
tar czf logdog.tar.gz logdog logdog.toml
python3 ./script/upload.py
rm logdog.tar.gz |
<gh_stars>0
package com.moecrow.demo.model.dto;
import lombok.Data;
/**
* @author willz
* @date 2020.11.12
*/
@Data
public class BattleStartMessage {
private String mapId;
}
|
#!/usr/bin/env bash
# Migrate Atlassian Bitbucket Server to Bitbucket Cloud
# Copyright 2018 Shawn Woodford
# https://github.com/swoodford
# Requires curl, git, jq, bc openssl
# It is recommended to run this script as root directly on your Bitbucket Server (Linux only)
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION:
# YOU MUST AGREE TO ALL TERMS IN APACHE 2.0 LICENSE PROVIDED IN LICENSE.md FILE
# THIS WORK IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND
# YOU AGREE TO ACCEPT ALL LIABILITY IN USING THIS WORK AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH RUNNING THIS WORK
# Steps for use:
# Create Bitbucket Cloud Account and Setup Team
# Create OAuth Consumer in Bitbucket Cloud with Full Permisions to Team Account
# Create Admin or System Admin level user for migration on your Bitbucket Server
# Set all required variables below then run ./migrate.sh
# Migration process works in the following way:
# Get list of all Projects and Repos from Bitbucket Server
# Create new Project in Bitbucket Cloud, Create new Repo in Cloud, Backup each Project Repo and all branches locally using git
# Add new git remote cloud, push all branches to cloud, send email to git committers when each repo is migrated
# Migration can be done in one of three ways, see MIGRATION METHOD variables section below
# Setup variables
############################
# BITBUCKET SERVER VARIABLES
############################
# Protocol and Hostname or Protocol and Hostname and Port of your Bitbucket Server Frontend
SERVERHOSTNAME="https://git.example.com:8443"
# Bitbucket Server API URL - Hostname and Rest API path (this script has only been tested with Server API version 1.0)
SERVERAPIURL="$SERVERHOSTNAME/rest/api/1.0"
# Hostname or Hostname and Port of your Bitbucket Server Git Clone URL
SERVERGITCLONEURL="ssh://git@git.example.com:7999"
# Username and password for Bitbucket Server account with Admin or System Admin level permissions on your Bitbucket Server
# This user must be able to read all Projects and all Repos in order to backup & migrate the entire server to cloud
SERVERAPIUSER="admin"
SERVERAPIPASS="password"
# Limit max number of Projects/Repos Bitbucket Server API will return
LIMIT="1000"
############################
# BITBUCKET CLOUD VARIABLES
############################
# Username and password for account with Team Admin level permissions on your Bitbucket Cloud account
# This user must be able to create new Projects and Repos in the cloud team account in order to run the migration
CLOUDAPIUSER="username@example.com"
CLOUDAPIPASS="cloudpassword"
# Your Bitbucket Cloud account Team name
CLOUDAPITEAM="exampleteam"
# Bitbucket Cloud API URL - Protocol and Hostname and Rest API path (this script has only been tested with Cloud API version 2.0)
CLOUDAPIURL="https://api.bitbucket.org/2.0"
CLOUDGITCLONEURL="git@bitbucket.org"
# Bitbucket Cloud OAuth consumer credentials
# You must create an OAuth consumer with full cloud account permissions at this URL:
# https://bitbucket.org/account/user/YOURCLOUDAPITEAM/oauth-consumers/new
# (You must set a Callback URL, set it to https://bitbucket.org/ for example)
# (Check all Permissions boxes to avoid any issues)
OAuthKey="key"
OAuthSecret="secret"
OAuthURL="https://bitbucket.org/site/oauth2/access_token"
############################
# MIGRATION VARIABLES
############################
# Optionally enable Debug Mode which provides more verbose output for troubleshooting
DEBUGMODE=false
# All repos with commits IN THIS YEAR OR LATER will be migrated (applies to migrateALL function only)
CUTOFFYEAR="2000"
# A local directory with plenty of free space to perform a git clone of all repos as a backup prior to migration to cloud
REPOBACKUPDIR="/root/bitbucket-backups"
if ! [ -d $REPOBACKUPDIR ]; then
mkdir "$REPOBACKUPDIR"
fi
# Optionally skip migrating any Git LFS repos that require manual conversion to Git LFS format
# Any repo that is over 2GB in size cannot be migrated to cloud without converting to Git LFS
# List repo slugs using vertical bar (|) as separator
# LFSREPOS="example_LFS_repo_slug1|example_LFS_repo_slug2"
# Optionally skip migrating any repos that have already been migrated
# List repo slugs using vertical bar (|) as separator
# MIGRATEDREPOS="example_repo_slug_to_skip1|example_repo_slug_to_skip2"
# Determines the directory where this script is running from, don't change this
SCRIPTDIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
############################
# MIGRATION METHOD
############################
# Migration can be done in one of three ways:
# Using Function migrateALL, will migrate ALL Projects and ALL Repos found on Bitbucket Server
migrateALL=true
# OR using Function migratePhases which is a plain text file containing a list of
# Project Keys and Repo Slugs separated by a TAB in the text file set in variable PHASEFILE
# This was designed to use values pasted from a spreadsheet with one or more rows containing projects and repo slugs
migratePhases=false
# PHASEFILE="phase1.txt"
# PHASENUMBER="1"
# OR using migrateMultiplePhases which will migrate multiple phases at a time by iterating over each phase file
migrateMultiplePhases=false
# Number of sequential phases to migrate, requires having PHASEFILES like "phase1.txt, phase2.txt, etc."
# NumberOfPhases="1"
############################
# SEND EMAILS USING AWS SES
############################
# Optionally Send an Email to Git Committers using OpenSSL TLS Client and AWS SES with IAM Credentials
# when each repo has completed migration informing them of the migration and the new repo URL
SENDEMAILS=false
# Email address to use in the FROM field of emails, example: user@domain.com
EMAIL_FROM="user"
EMAIL_FROM_DOMAIN="domain.com"
# AWS SES IAM Credentials
# You must create an IAM SMTP User
# To send email through Amazon SES using SMTP, you must create SMTP credentials at this URL:
# https://console.aws.amazon.com/ses/home?region=us-east-1#smtp-settings:
AWS_SMTP_Username="smtpuser"
AWS_SMTP_Password="smtppass"
AWSSESHostname="email-smtp.us-east-1.amazonaws.com:587"
# A local directory where email templates will be generated and stored
EMAILDIR="/root/bitbucket-emails"
if ! [ -d $EMAILDIR ]; then
mkdir "$EMAILDIR"
fi
############################
# FUNCTIONS
############################
# Check Command
function check_command(){
for command in "$@"
do
type -P $command &>/dev/null || fail "Unable to find $command, please install it and run this script again."
done
}
function pause(){
read -p "Press any key to continue..."
# echo pause
}
function fail(){
tput setaf 1; echo "Failure: $*" && tput sgr0
exit 1
}
function failwithoutexit(){
tput setaf 1; echo "Failure: $*" && tput sgr0
}
function warning(){
tput setaf 1; echo "Warning: $*" && tput sgr0
}
# Horizontal Rule
function HorizontalRule(){
echo "============================================================"
}
# Checks git for updates to the migration repo
function self_update(){
cd "$( dirname "${BASH_SOURCE[0]}" )"
if ! git pull | egrep -iq "Already up-to-date.|Already up to date."; then
echo "Update found, please re-run this script."
exit 0
fi
}
# Needed if running on Bitbucket Server
function bitbucketServer(){
# Verify running as root or with sudo
if [ "$(id -u)" != "0" ]; then
if $DEBUGMODE; then
whoami
fi
fail "Please run this script as root."
fi
# Verify the repo backup directory exists
if ! [ -d $REPOBACKUPDIR ]; then
fail "Repo backup directory does not exist: $REPOBACKUPDIR"
fi
## If issues with SSH/git clone:
## Use "ssh-add" to add the correct SSH key to the authentication agent
## Then run "ssh-add -l", find the SHA256 hash and paste it below
# SHA256hash="hash"
# if ! ssh-add -l | grep -q "$SHA256hash"; then
# eval "$(ssh-agent -s)" > /dev/null
# ssh-add /root/.ssh/id_rsa
# fi
}
# Git Checkout, Git Pull, Git Fetch on every branch in the repo
function backup(){
echo "Begin Local Backup"
for branch in `git branch -r | grep -v /HEAD | cut -d / -f2-`; do
CHECKOUT=$(git checkout "$branch")
if [ ! $? -eq 0 ]; then
fail "$CHECKOUT"
fi
if echo "$CHECKOUT" | egrep -iq "fatal"; then
fail "$CHECKOUT"
fi
PULL=$(git pull 2>&1)
if echo "$PULL" | egrep -iq "unmerged"; then
fail "$PULL"
fi
if echo "$PULL" | egrep -iq "configuration"; then
warning "$PULL"
warning "(Branch $branch may no longer exist in remote.)"
fi
echo "$PULL"
YEAR=$(git log -1 --date=short --pretty=format:'%cd' | cut -d \- -f1)
done
git fetch origin
}
# Get the Bitbucket Cloud OAuth Token
function getToken(){
TOKEN=$(curl -sX POST -u "$OAuthKey:$OAuthSecret" "$OAuthURL" -d grant_type=client_credentials)
if [[ -z $TOKEN ]]; then
TOKEN=$(curl -skX POST -u "$OAuthKey:$OAuthSecret" "$OAuthURL" -d grant_type=client_credentials)
fi
if [ ! $? -eq 0 ]; then
fail "$TOKEN"
else
TOKEN=$(echo "$TOKEN" | jq '.access_token' | cut -d \" -f2)
if $DEBUGMODE; then
echo TOKEN: $TOKEN
fi
fi
}
# Creates Each Project in Bitbucket Cloud
function cloudProject(){
echo "Begin cloudProject"
# Check if Project already exists in Cloud
CHECKPROJECT=$(curl -u $CLOUDAPIUSER:$CLOUDAPIPASS $CLOUDAPIURL/teams/$CLOUDAPITEAM/projects/$PROJECTKEY -sL -w "%{http_code}" -o /dev/null)
# Test HTTP status code
if [[ "$CHECKPROJECT" == "200" ]]; then
echo "Project exists in Cloud:" $PROJECTKEY
else
# Get Project Details
PROJECTDETAILS=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY)
PROJECTNAME=$(echo $PROJECTDETAILS | jq '.name'| cut -d \" -f2)
PROJECTDESCRIPTION=$(echo $PROJECTDETAILS | jq '.description'| cut -d \" -f2)
if [[ "$PROJECTDESCRIPTION" == "null" ]]; then
# failwithoutexit PROJECT DESCRIPTION IS NULL!
# pause
PROJECTDESCRIPTION=""
fi
if $DEBUGMODE; then
echo PROJECTKEY $PROJECTKEY
echo PROJECTNAME $PROJECTNAME
echo PROJECTDESCRIPTION $PROJECTDESCRIPTION
pause
fi
getToken
# Create the Project in Cloud
body=$(cat << EOF
{
"name": "$PROJECTNAME",
"key": "$PROJECTKEY",
"description": "$PROJECTDESCRIPTION",
"is_private": true
}
EOF
)
curl -sH "Content-Type: application/json" \
-H "Authorization: Bearer $TOKEN" \
-X POST \
-d "$body" \
$CLOUDAPIURL/teams/$CLOUDAPITEAM/projects/ | jq .
fi
}
# Creates Each Repo in Bitbucket Cloud
function cloudRepo(){
echo "Begin cloudRepo"
# Check if Repo exists in Cloud and create it if needed
CHECKREPOURL=$CLOUDAPIURL/repositories/$CLOUDAPITEAM/$THISSLUG
CHECKREPO=$(curl -u $CLOUDAPIUSER:$CLOUDAPIPASS $CHECKREPOURL -sL -w "%{http_code}" -o /dev/null)
if $DEBUGMODE; then
curl -su $CLOUDAPIUSER:$CLOUDAPIPASS $CHECKREPOURL | jq .
fi
# Test HTTP status code
if [[ "$CHECKREPO" == "200" ]]; then
echo "Repo exists in Cloud:" $THISSLUG
else
if [[ "$CHECKREPO" == "404" ]]; then
echo "Creating repo:" $THISSLUG
# Get Repo Details
REPODETAILS=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY/repos/$THISSLUG)
REPONAME=$(echo $REPODETAILS | jq '.name' | cut -d \" -f2)
if $DEBUGMODE; then
echo REPODETAILS:
echo $REPODETAILS | jq .
pause
fi
getToken
# Create the Repo in Cloud
body=$(cat << EOF
{
"scm": "git",
"project": {
"key": "$PROJECTKEY"
},
"name": "$REPONAME",
"is_private": true
}
EOF
)
CREATEREPO=$(curl -sH "Content-Type: application/json" \
-H "Authorization: Bearer $TOKEN" \
-X POST \
-d "$body" \
$CLOUDAPIURL/repositories/$CLOUDAPITEAM/$THISSLUG)
if echo "$CREATEREPO" | egrep -iq "invalid|error"; then
fail echo "$CREATEREPO" | jq .
fi
else
failwithoutexit "Error checking if repo exists in Cloud:"
curl -Ssu $CLOUDAPIUSER:$CLOUDAPIPASS $CHECKREPOURL | jq .
fi
# Verify Repo was created in Cloud
CHECKREPOURL=$CLOUDAPIURL/repositories/$CLOUDAPITEAM/$THISSLUG
CHECKREPO=$(curl -u $CLOUDAPIUSER:$CLOUDAPIPASS $CHECKREPOURL -sL -w "%{http_code}" -o /dev/null)
# Test HTTP status code
if [[ "$CHECKREPO" == "200" ]]; then
echo "Confirmed Repo Created in Cloud:" $THISSLUG
else
failwithoutexit "Error creating repo in Cloud:"
curl -Ssu $CLOUDAPIUSER:$CLOUDAPIPASS $CHECKREPOURL | jq .
fail
fi
fi
}
function cloudMigrate(){
echo "Begin cloudMigrate"
if ! git remote | grep -qw cloud; then
git remote add cloud "$CLOUDGITCLONEURL":"$CLOUDAPITEAM"/"$THISSLUG".git
fi
PUSHALL=$(git push --all cloud 2>&1)
if echo "$PUSHALL" | egrep -iq "rejected"; then
fail "$PUSHALL"
fi
PUSHTAGS=$(git push --tags cloud 2>&1)
if echo "$PUSHTAGS" | egrep -iq "rejected"; then
fail "$PUSHTAGS"
fi
echo "$PUSHALL"
echo "Completed cloudMigrate"
}
function migratePhases(){
echo "Begin migratePhases"
# Load repo list for this phase
if ! [[ -z $PHASENUMBER ]]; then
PHASE=$(cat phase$PHASENUMBER.txt)
else
PHASE=$(cat $PHASEFILE)
fi
if [ ! $? -eq 0 ]; then
fail "$PHASE"
fi
# Count repos to migrate in this phase
TOTALINPHASE=$(echo "$PHASE" | wc -l | tr -d '\040\011\012\015')
echo
tput setaf 2; HorizontalRule
echo "Total Repos to Migrate in Phase $PHASENUMBER: $TOTALINPHASE"
HorizontalRule && tput sgr0
echo
START=1
for (( COUNT=$START; COUNT<=$TOTALINPHASE; COUNT++ )); do
# Select each project
if echo "$PHASEFILE" | egrep -iq "csv"; then
if $DEBUGMODE; then
fail "CSV File not supported."
fi
PROJECTKEY=$(echo "$PHASE" | nl | grep -w [^0-9][[:space:]]$COUNT | cut -f2 | cut -d \, -f1)
REPO=$(echo "$PHASE" | nl | grep -w [^0-9][[:space:]]$COUNT | cut -f2 | cut -d \, -f2)
else
PROJECTKEY=$(echo "$PHASE" | nl | grep -w [^0-9][[:space:]]$COUNT | cut -f2)
REPO=$(echo "$PHASE" | nl | grep -w [^0-9][[:space:]]$COUNT | cut -f3)
fi
# Get Project Details
PROJECTDETAILS=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY)
PROJECTNAME=$(echo $PROJECTDETAILS | jq '.name'| cut -d \" -f2)
echo
HorizontalRule
echo "Key:"; tput setaf 2; echo "$PROJECTKEY"; tput sgr0
echo "Name:"; tput setaf 2; echo "$PROJECTNAME"; tput sgr0
echo "Repo:"; tput setaf 2; echo "$REPO"; tput sgr0
HorizontalRule
echo
SLUG="$REPO"
THISSLUG="$SLUG"
# Do not migrate LFS repos >2GB!!!
if ! echo "$THISSLUG" | egrep -iq "$LFSREPOS"; then
# If the slug path exists then run the backup function
if [ -d $REPOBACKUPDIR/$PROJECTKEY/$SLUG ]; then
if $DEBUGMODE; then
echo "Path exists: $REPOBACKUPDIR/$PROJECTKEY/$SLUG"
pause
fi
cd $REPOBACKUPDIR/$PROJECTKEY/$SLUG
backup
if [ "$YEAR" -gt "$CUTOFFYEAR" -o "$YEAR" -eq "$CUTOFFYEAR" ]; then
echo "Repo year: $YEAR"
cloudProject
cloudRepo
cloudMigrate
if $SENDEMAILS; then
generateEmail
TO=$(git log --pretty="%ae")
sendEmail
fi
else
echo "Repo year $YEAR is older than cutoff year $CUTOFFYEAR. Not migrating $THISSLUG to cloud!"
fi
fi
# If the slug path doesn't exist then clone the repo and run the backup function
if ! [ -d $REPOBACKUPDIR/$PROJECTKEY/$SLUG ]; then
if $DEBUGMODE; then
echo "Path doesn't exist: $REPOBACKUPDIR/$PROJECTKEY/$SLUG"
pause
fi
CLONEURL="$SERVERGITCLONEURL/$PROJECTKEY/$SLUG.git"
# # List repo clone URL within project
# CLONEURL=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY/repos?limit=$LIMIT --tlsv1 | jq '.values | .[] | .links | .clone | .[] | .href' | cut -d \" -f2 | grep ssh)
if $DEBUGMODE; then
echo "Cloning the repo from:"
echo "$CLONEURL"
fi
cd "$REPOBACKUPDIR"
if ! [ -d "$PROJECTKEY" ]; then
mkdir "$PROJECTKEY"
fi
cd "$PROJECTKEY"
CLONE=$(git clone "$CLONEURL" 2>&1)
if [ ! $? -eq 0 ]; then
fail "$CLONE"
fi
if echo "$CLONE" | egrep -iq "fatal|not|denied"; then
fail "$CLONE"
fi
cd "$SLUG"
backup
if [ "$YEAR" -gt "$CUTOFFYEAR" -o "$YEAR" -eq "$CUTOFFYEAR" ]; then
echo "Repo year: $YEAR"
cloudProject
cloudRepo
cloudMigrate
if $SENDEMAILS; then
generateEmail
TO=$(git log --pretty="%ae")
sendEmail
fi
else
echo "Repo year $YEAR is older than cutoff year $CUTOFFYEAR. Not migrating $THISSLUG to cloud!"
fi
fi
else
warning "LFS Repo $THISSLUG will not be migrated!"
fi
done
}
function migrateALL(){
echo "Begin migrateALL"
# List all projects
PROJECTS=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects?limit=$LIMIT --tlsv1 | jq '.values | .[] | .key' | cut -d \" -f2)
if [[ -z $PROJECTS ]]; then
fail "Unable to list Bitbucket Projects."
fi
if $DEBUGMODE; then
echo "$PROJECTS"
pause
fi
# Count projects
TOTALPROJECTS=$(echo "$PROJECTS" | wc -l | tr -d '\040\011\012\015')
if $DEBUGMODE; then
echo "Total Projects: $TOTALPROJECTS"
pause
fi
TOTALSLUGS=0
START=1
for (( COUNT=$START; COUNT<=$TOTALPROJECTS; COUNT++ )); do
# Select each project
PROJECTKEY=$(echo "$PROJECTS" | nl | grep -w [^0-9][[:space:]]$COUNT | cut -f2)
if [[ -z $PROJECTKEY ]]; then
fail "Unable to select Bitbucket Project."
fi
HorizontalRule
echo "$COUNT Project: $PROJECTKEY"
# Get slugs (individual repos within project)
SLUG=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY/repos?limit=$LIMIT --tlsv1 | jq '.values | .[] | .slug' | cut -d \" -f2)
if [[ -z $SLUG ]]; then
echo "Unable to get Bitbucket project slugs for $PROJECTKEY."
fi
# Count number of repos in the project
[[ -z "$SLUG" ]] && NUMSLUGS="0" || NUMSLUGS=$(echo "$SLUG" | wc -l | tr -d '\040\011\012\015')
if $DEBUGMODE; then
echo "NumSlugs:" "$NUMSLUGS"
fi
TOTALSLUGS=$(($TOTALSLUGS + $NUMSLUGS))
# Case: One Repo in the Project
if [ "$NUMSLUGS" -eq "1" ]; then
echo "Repo:" "$SLUG"
THISSLUG="$SLUG"
# Do not migrate repos that are already migrated and using cloud!!!
if ! echo "$THISSLUG" | egrep -iq "$MIGRATEDREPOS"; then
# Do not migrate LFS repos >2GB!!!
if ! echo "$THISSLUG" | egrep -iq "$LFSREPOS"; then
# If the slug path exists then run the backup function
if [ -d $REPOBACKUPDIR/$PROJECTKEY/$SLUG ]; then
if $DEBUGMODE; then
echo "Path exists:" "$REPOBACKUPDIR/$PROJECTKEY/$SLUG"
pause
fi
cd $REPOBACKUPDIR/$PROJECTKEY/$SLUG
backup
if [ "$YEAR" -gt "$CUTOFFYEAR" -o "$YEAR" -eq "$CUTOFFYEAR" ]; then
getToken
cloudProject
cloudRepo
cloudMigrate
else
echo "Repo year $YEAR is older than cutoff year $CUTOFFYEAR. Not migrating $THISSLUG to cloud!"
fi
fi
# If the slug path doesn't exist then clone the repo and run the backup function
if ! [ -d $REPOBACKUPDIR/$PROJECTKEY/$SLUG ]; then
if $DEBUGMODE; then
echo "Path doesn't exist:" "$REPOBACKUPDIR/$PROJECTKEY/$SLUG"
pause
fi
# List repo clone URL within project
CLONEURL=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY/repos?limit=$LIMIT --tlsv1 | jq '.values | .[] | .links | .clone | .[] | .href' | cut -d \" -f2 | grep ssh)
if $DEBUGMODE; then
echo "Cloning the repo from:"
echo "$CLONEURL"
fi
cd $REPOBACKUPDIR
if ! [ -d $PROJECTKEY ]; then
mkdir $PROJECTKEY
fi
cd $PROJECTKEY
CLONE=$(git clone "$CLONEURL" 2>&1)
if [ ! $? -eq 0 ]; then
fail "$CLONE"
fi
if echo "$CLONE" | egrep -iq "fatal|not|denied"; then
fail "$CLONE"
fi
cd $SLUG
backup
if [ "$YEAR" -gt "$CUTOFFYEAR" -o "$YEAR" -eq "$CUTOFFYEAR" ]; then
getToken
cloudProject
cloudRepo
cloudMigrate
else
echo "Repo year $YEAR is older than cutoff year $CUTOFFYEAR. Not migrating $THISSLUG to cloud!"
fi
fi
else
warning "LFS Repo $THISSLUG will not be migrated!"
fi
else
warning "Repo $THISSLUG has already been migrated to cloud!"
fi
fi
# Case: Multiple Repos in the Project
if [ "$NUMSLUGS" -gt "1" ]; then
if $DEBUGMODE; then
echo "Number of slugs greater than 1!"
pause
fi
# Backup one slug at a time
STARTSLUG=1
for (( SLUGCOUNT=$STARTSLUG; SLUGCOUNT<=$NUMSLUGS; SLUGCOUNT++ )); do
THISSLUG=$(echo "$SLUG" | nl | grep -w [^0-9][[:space:]]$SLUGCOUNT | cut -f2)
# Do not migrate repos that are already migrated and using cloud!!!
if ! echo "$THISSLUG" | egrep -iq "$MIGRATEDREPOS"; then
# Do not migrate LFS repos >2GB!!!
if ! echo "$THISSLUG" | egrep -iq "$LFSREPOS"; then
# If the slug path does exist then run the backup function
if [ -d $REPOBACKUPDIR/$PROJECTKEY/$THISSLUG ]; then
if $DEBUGMODE; then
echo "Path exists:" "$REPOBACKUPDIR/$PROJECTKEY/$THISSLUG"
pause
fi
echo "Repo:" "$THISSLUG"
cd $REPOBACKUPDIR/$PROJECTKEY/$THISSLUG
backup
if [ "$YEAR" -gt "$CUTOFFYEAR" -o "$YEAR" -eq "$CUTOFFYEAR" ]; then
getToken
cloudProject
cloudRepo
cloudMigrate
else
echo "Repo year $YEAR is older than cutoff year $CUTOFFYEAR. Not migrating $THISSLUG to cloud!"
fi
fi
# If the slug path doesn't exist then clone the repo and run the backup function
if ! [ -d $REPOBACKUPDIR/$PROJECTKEY/$THISSLUG ]; then
if $DEBUGMODE; then
echo "Path doesn't exist:" "$REPOBACKUPDIR/$PROJECTKEY/$THISSLUG"
pause
fi
cd $REPOBACKUPDIR
if ! [ -d $PROJECTKEY ]; then
mkdir $PROJECTKEY
fi
cd $PROJECTKEY
# This isn't needed since we already know the slug and can generate the clone url
# # List repo clone URLs within project
# CLONEURLS=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY/repos?limit=$LIMIT --tlsv1 | jq '.values | .[] | .links | .clone | .[] | .href' | cut -d \" -f2 | grep ssh)
# STARTCLONEURL=1
# for (( CLONEURLCOUNT=$STARTCLONEURL; CLONEURLCOUNT<=$NUMSLUGS; CLONEURLCOUNT++ )); do
# THISCLONEURL=$(echo "$CLONEURLS" | nl | grep -w [^0-9][[:space:]]$CLONEURLCOUNT | cut -f2)
# if $DEBUGMODE; then
# echo
# echo "CLONEURLS:"
# echo "$CLONEURLS"
# echo
# echo "CLONEURLCOUNT:"
# echo "$CLONEURLCOUNT"
# echo
# echo "Cloning the repo from:"
# echo "$THISCLONEURL"
# fi
# cd $REPOBACKUPDIR/$PROJECTKEY
THISCLONEURL=$(curl -sku $SERVERAPIUSER:$SERVERAPIPASS $SERVERAPIURL/projects/$PROJECTKEY/repos/$THISSLUG --tlsv1 | jq '.links | .clone | .[] | .href' | cut -d \" -f2 | grep ssh)
CLONE=$(git clone "$THISCLONEURL" 2>&1)
if [ ! $? -eq 0 ]; then
fail "$CLONE"
fi
if echo "$CLONE" | egrep -iq "fatal|not|denied"; then
fail "$CLONE"
fi
cd $THISSLUG
if [ ! $? -eq 0 ]; then
fail "$THISSLUG"
fi
backup
if [ "$YEAR" -gt "$CUTOFFYEAR" -o "$YEAR" -eq "$CUTOFFYEAR" ]; then
getToken
cloudProject
cloudRepo
cloudMigrate
else
echo "Repo year $YEAR is older than cutoff year $CUTOFFYEAR. Not migrating $THISSLUG to cloud!"
fi
# done
fi
else
warning "LFS Repo $THISSLUG will not be migrated!"
fi
else
warning "Repo $THISSLUG has already been migrated to cloud!"
fi
done
fi
# Calculate and display completion percentage
PERCENT=$(echo "scale=2; 100/$TOTALPROJECTS" | bc)
PERCENTCOMPLETED=$(echo "scale=2; $PERCENT*$COUNT" | bc)
PERCENTCOMPLETED=$(echo "($PERCENTCOMPLETED+0.5)/1" | bc)
echo $PERCENTCOMPLETED% of Projects Completed.
done
echo "Total Repos on Server: $TOTALSLUGS"
echo "Total Projects on Server: $TOTALPROJECTS"
# # Not migrating every project/repo so this check is no longer useful
# VERIFY=$(curl -su $CLOUDAPIUSER:$CLOUDAPIPASS "$CLOUDAPIURL/teams/$CLOUDAPITEAM/projects/" | jq '.size')
# if [[ "$TOTALPROJECTS" == "$VERIFY" ]]; then
# echo
# HorizontalRule
# echo "Bitbucket Migration has completed."
# HorizontalRule
# else
# echo
# HorizontalRule
# echo "Unable to verify all projects were migrated."
# HorizontalRule
# fi
}
function generateEmail(){
HorizontalRule
echo "Generating Email Template"
HorizontalRule
(
cat << EOP
Hello, this repository is being migrated to Bitbucket Cloud:
Project: $PROJECTNAME
Repository: $REPO
Please do not push any more commits to the current repo as you will no longer have write permission during the migration.
Old URL:
https://$SERVERHOSTNAME/projects/$PROJECTKEY/repos/$REPO/browse
New URL:
https://bitbucket.org/$CLOUDAPITEAM/$THISSLUG
You will need to update your local git repo using the following command:
git remote set-url origin $CLOUDGITCLONEURL:$CLOUDAPITEAM/$THISSLUG.git
Or if you use Sourcetree, update the repo URL to:
$CLOUDGITCLONEURL:$CLOUDAPITEAM/$THISSLUG.git
EOP
) > $EMAILDIR/$REPO
}
function sendEmail(){
# Clean up To list - convert to lower case, sort, filter unique, remove former employees, filter out non-company email addresses
TO=$(echo "$TO" | tr '[:upper:]' '[:lower:]' | sort | uniq | egrep -vw 'formeremployee1|formeremployee2' | sed -n '/'"$EMAIL_FROM_DOMAIN"'$/p')
if [[ -z $TO ]]; then
echo "No users to email."
return 1
fi
echo "Pausing before sending email to:"
echo "$TO"
read -r -p "Continue? (y/n) " CONTINUE
HorizontalRule
FROM="$EMAIL_FROM@$EMAIL_FROM_DOMAIN"
SUBJECT="$PROJECTNAME - $REPO Bitbucket Cloud Migration"
MESSAGE=$(cat $EMAILDIR/$REPO)
if [[ $CONTINUE =~ ^([yY][eE][sS]|[yY])$ ]]; then
for to in $TO
do
HorizontalRule
echo "Sending mail to $to"
HorizontalRule
(
cat << EOP
EHLO $EMAIL_FROM_DOMAIN
AUTH LOGIN
$AWS_SMTP_Username
$AWS_SMTP_Password
MAIL FROM: $FROM
RCPT TO: $to
DATA
From: $FROM
To: $to
Subject: $SUBJECT
$MESSAGE
.
QUIT
EOP
) > $EMAILDIR/email
openssl s_client -crlf -quiet -starttls smtp -connect $AWSSESHostname < $EMAILDIR/email
done
fi
}
# Update the backup script
self_update
# Check for required applications
check_command curl git jq bc openssl
# Log the Date/Time
echo
echo
HorizontalRule
HorizontalRule
tput setaf 2; HorizontalRule
echo "Beginning Bitbucket Migration"
date +%m-%d-%Y" "%H:%M:%S
HorizontalRule && tput sgr0
HorizontalRule
HorizontalRule
echo
echo
# Prepare to run on Bitbucket Server
bitbucketServer
# Migrate All
if $migrateALL; then
migrateALL
fi
# Migrate Phases
if $migratePhases; then
# PHASEFILE="phase1.txt"
migratePhases
fi
# # Migrate Multiple Phases
if $migrateMultiplePhases; then
for PHASENUMBER in {$NumberOfPhases..1}; do
cd "$SCRIPTDIR"
migratePhases
done
fi
# Log the Date/Time again when completed
echo
echo
HorizontalRule
HorizontalRule
tput setaf 2; HorizontalRule
echo "Completed Bitbucket Migration"
date +%m-%d-%Y" "%H:%M:%S
HorizontalRule && tput sgr0
HorizontalRule
HorizontalRule
echo
echo
|
<filename>lib/legion/data.rb
require 'legion/data/version'
require 'legion/data/settings'
require 'sequel'
require 'legion/data/connection'
require 'legion/data/model'
require 'legion/data/migration'
module Legion
module Data
class << self
def setup
connection_setup
migrate
load_models
setup_cache
end
def connection_setup
return if Legion::Settings[:data][:connected]
Legion::Data::Connection.setup
end
def migrate
Legion::Data::Migration.migrate
end
def load_models
Legion::Data::Models.load
end
def connection
Legion::Data::Connection.sequel
end
def setup_cache
return if Legion::Settings[:data][:cache][:enabled]
return unless defined?(::Legion::Cache)
# Legion::Data::Model::Relationship.plugin :caching, Legion::Cache, ttl: 10
# Legion::Data::Model::Runner.plugin :caching, Legion::Cache, ttl: 60
# Legion::Data::Model::Chain.plugin :caching, Legion::Cache, ttl: 60
# Legion::Data::Model::Function.plugin :caching, Legion::Cache, ttl: 120
# Legion::Data::Model::Extension.plugin :caching, Legion::Cache, ttl: 120
# Legion::Data::Model::Node.plugin :caching, Legion::Cache, ttl: 10
# Legion::Data::Model::TaskLog.plugin :caching, Legion::Cache, ttl: 12
# Legion::Data::Model::Task.plugin :caching, Legion::Cache, ttl: 10
# Legion::Data::Model::User.plugin :caching, Legion::Cache, ttl: 120
# Legion::Data::Model::Group.plugin :caching, Legion::Cache, ttl: 120
# Legion::Logging.info 'Legion::Data connected to Legion::Cache'
end
def shutdown
Legion::Data::Connection.shutdown
end
end
end
end
|
SELECT AVG(quantity)
FROM orders
WHERE region = 'National'; |
function dec(value, context) {
context.addInitializer(function() {
this[context.name + '_' + context.kind + 'Context'] = context;
});
if (context.kind === 'getter') {
return function () {
return value.call(this) + 1;
}
} else {
return function (v) {
return value.call(this, v + 1);
}
}
}
class Foo {
static value = 1;
@dec
static get a() {
return this.value;
}
@dec
static set a(v) {
this.value = v;
}
@dec
static get ['b']() {
return this.value;
}
@dec
static set ['b'](v) {
this.value = v;
}
}
const a_getterContext = Foo['a_getterContext'];
const a_setterContext = Foo['a_setterContext'];
const b_getterContext = Foo['b_getterContext'];
const b_setterContext = Foo['b_setterContext'];
expect(Foo.a).toBe(2);
expect(Foo.b).toBe(2);
Foo.a = 123;
expect(Foo.a).toBe(125);
expect(Foo.b).toBe(125);
Foo.b = 456;
expect(Foo.a).toBe(458);
expect(Foo.b).toBe(458);
expect(a_getterContext.name).toBe('a');
expect(a_getterContext.kind).toBe('getter');
expect(a_getterContext.isStatic).toBe(true);
expect(a_getterContext.isPrivate).toBe(false);
expect(typeof a_getterContext.addInitializer).toBe('function');
expect(typeof a_getterContext.setMetadata).toBe('function');
expect(typeof a_getterContext.getMetadata).toBe('function');
expect(a_setterContext.name).toBe('a');
expect(a_setterContext.kind).toBe('setter');
expect(a_setterContext.isStatic).toBe(true);
expect(a_setterContext.isPrivate).toBe(false);
expect(typeof a_setterContext.addInitializer).toBe('function');
expect(typeof a_setterContext.setMetadata).toBe('function');
expect(typeof a_setterContext.getMetadata).toBe('function');
expect(b_getterContext.name).toBe('b');
expect(b_getterContext.kind).toBe('getter');
expect(b_getterContext.isStatic).toBe(true);
expect(b_getterContext.isPrivate).toBe(false);
expect(typeof b_getterContext.addInitializer).toBe('function');
expect(typeof b_getterContext.setMetadata).toBe('function');
expect(typeof b_getterContext.getMetadata).toBe('function');
expect(b_setterContext.name).toBe('b');
expect(b_setterContext.kind).toBe('setter');
expect(b_setterContext.isStatic).toBe(true);
expect(b_setterContext.isPrivate).toBe(false);
expect(typeof b_setterContext.addInitializer).toBe('function');
expect(typeof b_setterContext.setMetadata).toBe('function');
expect(typeof b_setterContext.getMetadata).toBe('function');
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports["default"] = void 0;
var _react = _interopRequireDefault(require("react"));
var _propTypes = _interopRequireDefault(require("prop-types"));
var _icons = require("@buffetjs/icons");
var _Div = _interopRequireDefault(require("./Div"));
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
function _extends() { _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
function _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }
function _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }
var types = {
biginteger: _icons.NumberIcon,
"boolean": _icons.Bool,
component: _icons.Component,
contentType: _icons.ContentType,
date: _icons.Calendar,
datetime: _icons.Calendar,
decimal: _icons.NumberIcon,
dynamiczone: _icons.DynamicZone,
email: _icons.Email,
"enum": _icons.Enumeration,
enumeration: _icons.Enumeration,
file: _icons.Media,
files: _icons.Media,
"float": _icons.NumberIcon,
integer: _icons.NumberIcon,
json: _icons.Json,
JSON: _icons.Json,
media: _icons.Media,
number: _icons.NumberIcon,
password: _icons.Password,
relation: _icons.Relation,
richtext: _icons.RichText,
singleType: _icons.SingleType,
string: _icons.Text,
text: _icons.Text,
time: _icons.Calendar,
timestamp: _icons.Calendar,
uid: _icons.Uid
};
var AttributeIcon = function AttributeIcon(_ref) {
var type = _ref.type,
rest = _objectWithoutProperties(_ref, ["type"]);
var Compo = types[type];
if (!types[type]) {
return null;
}
return _react["default"].createElement(_Div["default"], _extends({}, rest, {
type: type
}), _react["default"].createElement(Compo, null));
};
AttributeIcon.propTypes = {
type: _propTypes["default"].string.isRequired
};
var _default = AttributeIcon;
exports["default"] = _default; |
/**
*
*/
package jframe.pushy.impl;
import jframe.pushy.Fields;
import jframe.pushy.MultiPushyConf;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.relayrides.pushy.apns.ApnsEnvironment;
import com.relayrides.pushy.apns.PushManager;
import com.relayrides.pushy.apns.PushManagerConfiguration;
import com.relayrides.pushy.apns.util.SSLContextUtil;
import com.relayrides.pushy.apns.util.SimpleApnsPushNotification;
/**
* @author dzh
* @date Aug 29, 2015 2:11:43 PM
* @since 1.0
*/
public class PushManagerWrapper implements Fields {
static Logger LOG = LoggerFactory.getLogger(PushManagerWrapper.class);
PushManager<SimpleApnsPushNotification> pushManager;
private MultiPushyConf conf;
private String group;
public PushManagerWrapper init(String group, MultiPushyConf conf) {
this.conf = conf;
this.group = group;
return this;
}
public void start() {
try {
PushManagerConfiguration pushConf = new PushManagerConfiguration();
int connCount = Integer.parseInt(conf.getConf(group,
KEY_PUSH_CONN_COUNT, "100"));
pushConf.setConcurrentConnectionCount(connCount);
pushManager = new PushManager<SimpleApnsPushNotification>(
getEnvironment(conf.getConf(group, KEY_HOST),
conf.getConf(group, KEY_HOST_PORT),
conf.getConf(group, KEY_FEEDBACK),
conf.getConf(group, KEY_FEEDBACK_PORT)),
SSLContextUtil.createDefaultSSLContext(
conf.getConf(group, KEY_IOS_AUTH),
conf.getConf(group, KEY_IOS_PASSWORD)), null, null,
null, pushConf, "PushManager-" + group);
pushManager.start();
} catch (Exception e) {
LOG.error(e.getMessage());
}
}
public PushManager<SimpleApnsPushNotification> getPushManager() {
return pushManager;
}
public void stop() {
try {
if (pushManager != null) {
// TODO
pushManager.shutdown(30 * 1000);
}
} catch (Exception e) {
LOG.error(e.getMessage());
}
}
public static ApnsEnvironment getEnvironment(String host, String port,
String feedback, String fdPort) {
return new ApnsEnvironment(host, Integer.parseInt(port), feedback,
Integer.parseInt(fdPort));
}
}
|
<reponame>riordanalfredo/MeetupsForPetsIONIC<gh_stars>0
import org.jsoup.Jsoup;
import org.jsoup.nodes.Attribute;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.w3c.dom.Attr;
import java.io.*;
import java.nio.file.*;
public class Parser {
private static String projectPath;
private static String filePath;
private static String oldDirectory;
private static String newDirectory;
public static void main(String[] args) throws IOException {
// get current working space directory
String currentPath = System.getProperty("user.dir");
Path projectPathString = Paths.get(currentPath).getParent();
// initialise all paths
projectPath = projectPathString.toString() ;
filePath = "src\\pages\\add-pet\\add-pet.ts";
oldDirectory = "PetsMeetupsV3";
newDirectory = "PetsMeetupsV4"; // target directory
System.out.println(projectPathString);
renameFiles(projectPath, filePath);
// TODO: make this input variable mutable!
File input = new File(projectPath + "\\"+ oldDirectory + "\\" + filePath);
// Only parse if file is html
if (input.isFile() && input.getName().endsWith(".html")) {
//
// parse button tags
//
String buttonsv4 = migrateButtons(projectPath, filePath);
System.out.println(buttonsv4);
//
// parse navbar tags
//
String fileV4Navbar = parseNavbar(input);
System.out.println(fileV4Navbar);
/*
* It seems that the `input` variable here is not modified after calling
* `parseButtons` function.
* TODO: Need to find a way to handle running multiple various functions
* to generate a final result of from a single `input` variable.
*/
/*
* TODO: Proposed solution to using multiple parse fucntions
* Instead of having each function use a separate Input variable,
* use a common one. Isolate the parts of the code that deal with specific
* components into functions like migrateButtons, etc. Have the parse function
* call the other methods in sequence.
*/
}
}
private static void renameFiles(String projectPath, String filePath) {
String[] filePathComponents = filePath.split("\\\\");
String directory = projectPath + "\\" + oldDirectory + "\\";
for (int i = 0; i < filePathComponents.length - 1; i++) {
directory += filePathComponents[i] + "\\";
}
File pageDirectory = new File(directory);
File[] allFiles = pageDirectory.listFiles();
for (File file: allFiles) {
String filename = file.getName();
if (filename.endsWith(".html") ||
(filename.endsWith(".ts") && !filename.contains(".module")) ||
filename.endsWith(".scss")) {
String[] fileComponents = filename.split("\\.");
String prefix = fileComponents[0];
String suffix = fileComponents[1];
String newFilename = prefix + ".page." + suffix;
file.renameTo(new File(directory + newFilename));
}
}
}
private static String migrateLabel(String projectPath, String filePath) {
oldDirectory = "PetsMeetupsV3";
// TODO: make this input variable mutable!
File input = new File(projectPath + "\\"+ oldDirectory + "\\" + filePath);
Document doc;
try {
doc = Jsoup.parse(input, "UTF-8", "");
Elements labels = doc.select("ion-label");
for (Element label: labels) {
Element v4Label = new Element("ion-label");
for (Attribute attribute: label.attributes()) {
if (!attribute.toString().equals("ion-label")) {
if (attribute.toString().equals("floating")) {
v4Label.attr("position", "floating");
} else {
v4Label.attr(attribute.getKey(), attribute.getValue());
}
}
}
label.replaceWith(v4Label);
String labelText = label.text();
v4Label.append(labelText);
}
return doc.toString();
} catch (IOException e) {
e.printStackTrace();
}
return "";
}
private static String migrateButtons(String projectPath, String filePath) {
oldDirectory = "PetsMeetupsV3";
// TODO: make this input variable mutable!
File input = new File(projectPath + "\\"+ oldDirectory + "\\" + filePath);
// DOM structure of the file
Document doc;
try{
doc = Jsoup.parse(input, "UTF-8", "");
// Select all buttons
Elements buttons = doc.select("button");
for (Element button : buttons){
// Ionic 3 button
System.out.println(button.toString());
// Ionic 4 button
Element v4Button = new Element("ion-button");
// Loop through all attributes of v3 button
for (Attribute attribute : button.attributes()) {
// We only want to change ion-buttons, not regular ones
if (!attribute.toString().equals("ion-button")) {
// Change attributes from v3 to v4 according to docs
// Any that don't change can just have the old key and value
// from the v3 button
if (attribute.toString().equals("clear")) {
v4Button.attr("fill", "clear");
} else {
v4Button.attr(attribute.getKey(), attribute.getValue());
}
}
}
System.out.println(v4Button.toString() + "\n");
// Replace the old button with the new button in the DOM structure
button.replaceWith(v4Button);
// Adding children / text
String v3ButtonText = button.text();
System.out.println("v3 button children: " + v3ButtonText);
v4Button.append(v3ButtonText);
}
// The new DOM structure can be given to the model
// to be generated using Acceleo
System.out.println(doc.toString());
return doc.toString();
} catch (IOException e) {
e.printStackTrace();
}
return "";
}
private static String parseNavbar(File f){
// DOM structure of the file
Document doc;
try {
doc = Jsoup.parse(f, "UTF-8", "");
// Select all navbar
Elements navbars = doc.select("ion-navbar");
for (Element v3Navbar : navbars) {
Element v4Navbar = new Element("ion-toolbar");
//Loop through old tag attributes to get all attributes
for (Attribute attribute : v3Navbar.attributes()) {
v4Navbar.attr(attribute.getKey(), attribute.getValue());
}
// Replace old tags with new tags
v3Navbar.replaceWith(v4Navbar);
// Adding children into new tags
Elements v3NavbarChildren = v3Navbar.children();
for( Element v3Child : v3NavbarChildren){
v4Navbar.appendChild(v3Child);
}
}
return doc.toString();
}
catch (IOException e){
e.printStackTrace();
}
return "error in parsing Navbar";
}
}
|
<filename>src/hooks/useNetworkListener.js
import { useState, useEffect } from 'react';
import NetInfo from "@react-native-community/netinfo";
import device from 'src/utils/device';
export default () => {
const [ connected, setConnected ] = useState(true);
useEffect(() => {
const webWeAreLive = () => setConnected(true);
const webWeAreDead = () => setConnected(false);
// subscribe - web
if (device('web')) {
window.addEventListener('online', webWeAreLive);
window.addEventListener('offline', webWeAreDead);
}
// subscribe - mobile
const mobileUnsubscribe = device('web')
? null
: NetInfo.addEventListener(state => {
setConnected(state.isInternetReachable);
});
return () => {
// unsubscribe - mobile
if (mobileUnsubscribe) {
mobileUnsubscribe();
}
// unsubscribe - web
if (device('web')) {
window.removeEventListener('online', webWeAreLive);
window.removeEventListener('offline', webWeAreDead);
}
}
}, [])
return connected;
} |
#!/usr/bin/env bash
usage() {
cat <<'HEREDOC'
NAME
delete-linux-account.sh -- remove a Linux account and the home directory of a user
SYNOPSIS
sudo delete-linux-account.sh [-n] username group
delete-linux-account.sh -h: display this help
DESCRIPTION
Remove a Linux account and the home directory. If a user does not exist the list of users in the group is displayed.
Requires root permissions: execute under `sudo`.
username an end user name, conforming to Linux and Postgres requirements
The following options are available:
-n do not fail if the user doesn't exist
EXIT STATUS
The utility exits with one of the following values:
0 successfully removed
1 error, e.g. the username doesn't exist
AUTHOR(S)
Written by Dmitriy "DK" Korobskiy.
HEREDOC
exit 1
}
set -e
set -o pipefail
# If a character is followed by a colon, the option is expected to have an argument
while getopts nh OPT; do
case "$OPT" in
n)
readonly NO_FAIL_MODE=true
;;
*) # -h or `?`: an unknown option
usage
;;
esac
done
shift $((OPTIND - 1))
# Process positional parameters
[[ $2 == "" ]] && usage
readonly DELETED_USER="$1"
readonly GROUP="$2"
# Get a script directory, same as by $(dirname $0)
#readonly SCRIPT_DIR=${0%/*}
#readonly ABSOLUTE_SCRIPT_DIR=$(cd "${SCRIPT_DIR}" && pwd)
#readonly WORK_DIR=${1:-${ABSOLUTE_SCRIPT_DIR}/build} # $1 with the default
#if [[ ! -d "${WORK_DIR}" ]]; then
# mkdir "${WORK_DIR}"
# chmod g+w "${WORK_DIR}"
#fi
#cd "${WORK_DIR}"
echo -e "\n## Running under ${USER}@${HOSTNAME} in ${PWD} ##\n"
declare -ri NON_EXISTING_USER_EXIT_CODE=6
set +e
userdel -r "${DELETED_USER}"
declare -ri EXIT_CODE=$?
set -e
if (( EXIT_CODE == 0 )); then
echo "The Linux user '${DELETED_USER}' has been removed along with their home directory."
exit 0
fi
if [[ "$NO_FAIL_MODE" == "" ]] || (( EXIT_CODE != NON_EXISTING_USER_EXIT_CODE )); then
echo "Failed!"
echo "$GROUP users:"
lid --group --onlynames "$GROUP" | sort
exit 1
fi
|
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
. ./test-lib.sh
setup_initgit
setup_gitgit
(
set -e
cd git-git
git checkout -q --track -b work origin
echo "some work done on a branch" >> test
git add test; git commit -q -m "branch work"
echo "some other work done on a branch" >> test
git add test; git commit -q -m "branch work"
test_expect_success "git-cl upload wants a server" \
"$GIT_CL upload 2>&1 | grep -q 'You must configure'"
git config rietveld.server localhost:8080
test_expect_success "git-cl status has no issue" \
"$GIT_CL status | grep -q 'no issue'"
# Prevent the editor from coming up when you upload.
export EDITOR=$(which true)
test_expect_success "upload succeeds (needs a server running on localhost)" \
"$GIT_CL upload -m test master | \
grep -q 'Issue created'"
test_expect_success "git-cl status now knows the issue" \
"$GIT_CL status | grep -q 'Issue number'"
# Check to see if the description contains the local commit messages.
# Should contain 'branch work' x 2.
test_expect_success "git-cl status has the right description for the log" \
"$GIT_CL status --field desc | [ $( egrep -q '^branch work$' -c ) -eq 2 ]
test_expect_success "git-cl status has the right subject from message" \
"$GIT_CL status --field desc | \
[ $( egrep -q '^test$' --byte-offset) | grep '^0:' ]
test_expect_success "git-cl push ok" \
"$GIT_CL push -f"
git checkout -q master > /dev/null 2>&1
git pull -q > /dev/null 2>&1
test_expect_success "committed code has proper description" \
"git show | [ $( egrep -q '^branch work$' -c ) -eq 2 ]
test_expect_success "issue no longer has a branch" \
"git cl status | grep -q 'work: None'"
cd $GITREPO_PATH
test_expect_success "upstream repo has our commit" \
"git log master 2>/dev/null | [ $( egrep -q '^branch work$' -c ) -eq 2 ]
)
SUCCESS=$?
cleanup
if [ $SUCCESS == 0 ]; then
echo PASS
fi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.