text stringlengths 1 1.05M |
|---|
#!/bin/bash
#SBATCH --time=24:00:00
#SBATCH --nodes=1 --ntasks-per-node=2 --cpus-per-task=1
#SBATCH --mem=20G
MAX_SEED=$1
DATASET=$2
HP_SAMPLING=$3
CONTAMINATION=$4
module load Julia/1.5.1-linux-x86_64
module load Python/3.8.2-GCCcore-9.3.0
julia ./gan.jl ${MAX_SEED} $DATASET ${HP_SAMPLING} $CONTAMINATION
|
import matplotlib.pyplot as plt
def plot_suspensions(suspensions, ch_from, ch_to, refnumbers, state):
# Create a new figure
plt.figure()
# Plot the suspensions and their connections
for i in range(len(suspensions)):
plt.plot([ch_from[i], ch_to[i]], [i, i], 'bo-') # Plot the connection line
plt.text(ch_from[i], i, f"Ref: {refnumbers[i]}", ha='right', va='center') # Display reference number
plt.text(ch_to[i], i, f"State: {state[i]}", ha='left', va='center') # Display state
# Set plot labels and title
plt.xlabel('Channel')
plt.ylabel('Suspension')
plt.title('Suspension Connections and States')
# Show the plot
plt.show()
# Example usage
suspensions = ['A', 'B', 'C', 'D']
ch_from = [1, 2, 3, 4]
ch_to = [2, 3, 4, 5]
refnumbers = [101, 102, 103, 104]
state = ['OK', 'Faulty', 'OK', 'Faulty']
plot_suspensions(suspensions, ch_from, ch_to, refnumbers, state) |
#!/usr/bin/env bash
set -x
rm -f /etc/motd
echo "
__ __ ___. .__ .__ .__
/ \ / \ ____\_ |__ | |__ |__|_____ ______ |__| ____
\ \/\/ // __ \| __ \| | \| \____ \\____ \| |/ __ \
\ /\ ___/| \_\ \ Y \ | |_> > |_> > \ ___/
\__/\ / \___ >___ /___| /__| __/| __/|__|\___ >
\/ \/ \/ \/ |__| |__| \/
" > /etc/motd
exit 0
|
#include <stdio.h>
int partition (int arr[], int low, int high)
{
int pivot = arr[high]; // pivot
int i = (low - 1); // Index of smaller element
for (int j = low; j <= high- 1; j++)
{
// If current element is smaller than the pivot
if (arr[j] < pivot)
{
i++; // increment index of smaller element
int temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
}
}
// swap arr[i+1] and arr[high] (or pivot)
int temp = arr[i + 1];
arr[i + 1] = arr[high];
arr[high] = temp;
return (i + 1);
}
/* The main function that implements QuickSort
arr[] --> Array to be sorted,
low --> Starting index,
high --> Ending index */
void quickSort(int arr[], int low, int high)
{
if (low < high)
{
int pi = partition(arr, low, high);
quickSort(arr, low, pi - 1);
quickSort(arr, pi + 1, high);
}
} |
/**
* Display the specified resource.
*
* @param int $id
* @return \Illuminate\Http\Response
*/
public function show($id)
{
$resource = Resource::find($id);
if ($resource) {
return response()->json(['data' => $resource], 200);
} else {
return response()->json(['message' => 'Resource not found'], 404);
}
} |
#!/bin/bash
#------------------------------------------------------------------------
# Utility methods
#
fatal()
{
echo "credentials-local.sh: fatal: $1" 1>&2
exit 1
}
info()
{
echo "credentials-local.sh: info: $1" 1>&2
}
if [ -z "${LYRASIS_AWS_ACCESS_ID}" ]
then
fatal "LYRASIS_AWS_ACCESS_ID is not defined"
fi
if [ -z "${LYRASIS_AWS_SECRET_KEY}" ]
then
fatal "LYRASIS_AWS_SECRET_KEY is not defined"
fi
#------------------------------------------------------------------------
# Copy credentials into place.
#
info "installing keystore"
cp -v ".ci/credentials/APK Signing/keystore.jks" \
"release.jks" || exit 1
#------------------------------------------------------------------------
# Add the NYPL nexus properties to the project properties.
#
mkdir -p "${HOME}/.gradle" ||
fatal "could not create ${HOME}/.gradle"
cat ".ci/credentials/APK Signing/keystore.properties" >> "${HOME}/.gradle/gradle.properties" ||
fatal "could not read keystore properties"
CREDENTIALS_PATH=$(realpath ".ci/credentials") ||
fatal "could not resolve credentials path"
SIMPLYE_CREDENTIALS="${CREDENTIALS_PATH}/Certificates/SimplyE/Android"
if [ ! -d "${SIMPLYE_CREDENTIALS}" ]
then
fatal "${SIMPLYE_CREDENTIALS} does not exist, or is not a directory"
fi
cat >> "${HOME}/.gradle/gradle.properties" <<EOF
org.librarysimplified.drm.enabled=true
org.lyrasis.aws.access_key_id=${LYRASIS_AWS_ACCESS_ID}
org.lyrasis.aws.secret_access_key=${LYRASIS_AWS_SECRET_KEY}
org.librarysimplified.app.assets.raybooks=${SIMPLYE_CREDENTIALS}
EOF
#------------------------------------------------------------------------
# Addding slack webhook to environment
#SLACK_WEBHOOK_URL=$(<.ci/credentials/SimplyE/slack-webhook.url) ||
# fatal "Slack Webhook url not found."
#cat >> ".env" <<EOF
#SLACK_WEBHOOK_URL="${SLACK_WEBHOOK_URL}"
#EOF
|
<reponame>newleaders/minitest-rails-capybara
namespace :test do
desc "Run tests for Rails 5.0"
task "5.0" do
sh "rm -f Gemfile.lock"
ENV["RAILS_VERSION"] = "5.0"
sh "bundle && bundle exec rake test"
sh "rm -f Gemfile.lock"
end
desc "Run tests for Rails head"
task "head" do
sh "rm -f Gemfile.lock"
ENV["RAILS_VERSION"] = "head"
sh "bundle && bundle exec rake test"
sh "rm -f Gemfile.lock"
end
desc "Run tests for all Rails versions"
task "all" do
sh "rake test:5.0"
sh "rake test:head"
end
end
|
#!/bin/bash
# This script locks in Swarm at the below version
SWARM_VERSION=1.2.0
read -p "This script will remove any existing Swarm config, are you sure? [Yy] " -n 1 -r
echo # (optional) move to a new line
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
exit 1
fi
echo "Installing Consul on ceph1"
vagrant ssh ceph1 -c "sudo docker rm -f consul-server"
vagrant ssh ceph1 -c "sudo docker run -d \
-p "8500:8500" \
-h "consul" \
--name=consul-server \
progrium/consul -server -bootstrap"
# wait for consul to come up
sleep 5
echo "Installing Swarm Manager on ceph1"
vagrant ssh ceph1 -c "sudo docker rm -f swarm-manager1"
vagrant ssh ceph1 -c "sudo docker run -d \
--name=swarm-manager1 \
-p 3375:3375 swarm:$SWARM_VERSION -experimental manage \
--host=0.0.0.0:3375 \
--replication --advertise 192.168.5.2:3375 \
consul://192.168.5.2:8500"
echo "Installing Swarm Manager on ceph2"
vagrant ssh ceph2 -c "sudo docker rm -f swarm-manager2"
vagrant ssh ceph2 -c "sudo docker run -d \
--name=swarm-manager2 \
-p 3375:3375 swarm:$SWARM_VERSION -experimental manage \
--host=0.0.0.0:3375 \
--replication --advertise 192.168.5.3:3375 \
consul://192.168.5.2:8500"
echo "Installing Swarm Agent on ceph2"
vagrant ssh ceph2 -c "sudo docker rm -f swarm-agent1"
vagrant ssh ceph2 -c "sudo docker run -d \
--name=swarm-agent1 \
--restart=always swarm:$SWARM_VERSION -experimental join \
--advertise=192.168.5.3:2375 \
consul://192.168.5.2:8500"
echo "Installing Swarm Agent on ceph3"
vagrant ssh ceph3 -c "sudo docker rm -f swarm-agent2"
vagrant ssh ceph3 -c "sudo docker run -d \
--name=swarm-agent2 \
--restart=always swarm:$SWARM_VERSION -experimental join \
--advertise=192.168.5.4:2375 \
consul://192.168.5.2:8500"
echo "Installing Swarm Agent on ceph4"
vagrant ssh ceph4 -c "sudo docker rm -f swarm-agent3"
vagrant ssh ceph4 -c "sudo docker run -d \
--name=swarm-agent3 \
--restart=always swarm:$SWARM_VERSION -experimental join \
--advertise=192.168.5.5:2375 \
consul://192.168.5.2:8500"
echo "Done: Swarm available at tcp://192.168.5.2:3375"
|
'use strict';
chrome.browserAction.onClicked.addListener(function (tab) {
chrome.tabs.sendMessage(tab.id, "canonicalUrl", function (canonicalUrl) {
const url = (canonicalUrl || tab.url).replace(/^https?:\/\//, '');
chrome.tabs.create({
url: "https://twitter.com/search?f=live&q=url%3A" + encodeURIComponent(url)
});
});
});
chrome.tabs.onUpdated.addListener(function (tabId, changeInfo, tab) {
if (!changeInfo.url) return;
if (changeInfo.url.lastIndexOf("http", 0) === 0) {
chrome.browserAction.enable(tabId);
} else {
chrome.browserAction.disable(tabId);
}
});
|
#!/usr/bin/env bash
cd parallels/src && zip -r ../parallels.alfredworkflow ./ -x "*.DS_Store" |
class Reservation {
private String name;
private int numberOfPeople;
private String date;
// A reservation can only be made for the same day or later
private String time;
public Reservation(String name, int numberOfPeople, String date, String time) {
this.name = name;
this.numberOfPeople = numberOfPeople;
this.date = date;
this.time = time;
}
public String getName() {
return name;
}
public int getNumberOfPeople() {
return numberOfPeople;
}
public String getDate() {
return date;
}
public String getTime() {
return time;
}
} |
#!/bin/bash
apt-get upgrade -s -q |
sed -n -e '
# make label for kept back packages
s/\(.*kept.*\)/#kept-back/
# make label for upgradeable packages
s/\(.*be upgraded.*\)/#upgradeable/
# remove all lines which are not labels or do not hold package names
/\(#kept-back\|#upgradeable\|\s\{2\}\)/!d
# for lines having package names (having 2 spaces in first position)
/\s\{2\}/{
# copy line to hold buffer
h
# switch buffers
x
# remove new line from buffer
s/\n//
# remove all two sequential empty spaces
s/\s\{2\}//
# put each word into a new line with two empty spaces in front
s/\s\?\(\S*\)/ \1\n/g
# print
p
}
# print line for all lines not holding package names
/\s\{2\}/!p' |
sed -e '
/^\s*$/d
/^#/{
# put label into holdspace
h
# delete to start another cycle
d
}
# append package name into holdspace
G
# make json string from package name and package status
s/^\s\s\(.*\)\n#\(.*\)/{\\"name\\":\\"\1\\",\\"status\\":\\"\2\\"}/' |
# replace all new line by ,
tr '\n' ',' |
sed '
# make json array from line and wrap it into a json object
s/\(.*\),/\1/' |
xargs printf '{"packages": [%b]}'
|
<reponame>filcloud/filecoin-specs
package interpreter
import "errors"
import actor "github.com/filecoin-project/specs/systems/filecoin_vm/actor"
import addr "github.com/filecoin-project/specs/systems/filecoin_vm/actor/address"
import vmr "github.com/filecoin-project/specs/systems/filecoin_vm/runtime"
import sysactors "github.com/filecoin-project/specs/systems/filecoin_vm/sysactors"
var (
ErrActorNotFound = errors.New("Actor Not Found")
)
var staticActorCodeRegistry = &actorCodeRegistry{}
// CodeCIDs for system actors
var (
InitActorCodeCID = actor.CodeCID("filecoin/1.0/InitActor")
CronActorCodeCID = actor.CodeCID("filecoin/1.0/CronActor")
AccountActorCodeCID = actor.CodeCID("filecoin/1.0/AccountActor")
StoragePowerActorCodeCID = actor.CodeCID("filecoin/1.0/StoragePowerActor")
StorageMinerActorCodeCID = actor.CodeCID("filecoin/1.0/StorageMinerActor")
StorageMarketActorCodeCID = actor.CodeCID("filecoin/1.0/StorageMarketActor")
PaymentChannelActorCodeCID = actor.CodeCID("filecoin/1.0/PaymentChannelActor")
)
// Addresses for singleton system actors
var (
InitActorAddr = &addr.Address_I{} // TODO
CronActorAddr = &addr.Address_I{} // TODO
StoragePowerActorAddr = &addr.Address_I{} // TODO
StorageMarketActorAddr = &addr.Address_I{} // TODO
PaymentChannelActorAddr = &addr.Address_I{} // TODO
)
// init is called in Go during initialization of a program.
// this is an idiomatic way to do this. Implementations should approach this
// howevery they wish. The point is to initialize a static registry with
// built in pure types that have the code for each actor. Once we have
// a way to load code from the StateTree, use that instead.
func init() {
registerBuiltinActors(staticActorCodeRegistry)
}
func registerBuiltinActors(r *actorCodeRegistry) {
// TODO
cron := &sysactors.CronActorCode_I{}
r.RegisterActor(InitActorCodeCID, &sysactors.InitActorCode_I{})
r.RegisterActor(CronActorCodeCID, cron)
// wire in CRON actions.
// TODO: there's probably a better place to put this, but for now, do it here.
cron.Actors_ = append(cron.Actors_, StoragePowerActorAddr)
cron.Actors_ = append(cron.Actors_, StorageMarketActorAddr)
}
// ActorCode is the interface that all actor code types should satisfy.
// It is merely a method dispatch interface.
type ActorCode interface {
InvokeMethod(input vmr.InvocInput, method actor.MethodNum, params actor.MethodParams) vmr.InvocOutput
}
type actorCodeRegistry struct {
code map[actor.CodeCID]ActorCode
}
func (r *actorCodeRegistry) RegisterActor(cid actor.CodeCID, actor ActorCode) {
r.code[cid] = actor
}
func (r *actorCodeRegistry) LoadActor(cid actor.CodeCID) (ActorCode, error) {
a, ok := r.code[cid]
if !ok {
return nil, ErrActorNotFound
}
return a, nil
}
|
<filename>Example.cpp
// Just a small state machine (3 states) as demonstration.
//
// Compile like this:
// g++ Macho.cpp Example.cpp
#include "Macho.hpp"
#include <iostream>
using namespace std;
namespace Example {
////////////////////////////////////////////////////////
// State declarations
// Machine's top state
TOPSTATE(Top) {
// Top state variables (visible to all substates)
struct Box {
Box() : data(0) {}
long data;
};
STATE(Top)
// Machine's event protocol
virtual void event1(int i) {}
virtual void event2(long l) {}
private:
// special actions
void entry();
void exit();
void init();
void init(int);
};
// A superstate
SUBSTATE(Super, Top) {
STATE(Super)
// This state has history
HISTORY()
private:
// Entry and exit actions of state
void entry();
void exit();
};
// A substate
SUBSTATE(StateA, Super) {
// State variables
struct Box {
Box() : data(0) {}
int data;
};
STATE(StateA)
// Event handler
void event1(int i);
private:
void entry();
void init(int);
void exit();
};
// A substate
SUBSTATE(StateB, Super) {
STATE(StateB)
void event2(long l);
private:
void entry();
void exit();
};
////////////////////////////////////////////////////////
// Event handler implementations
// Top state
void Top::entry() { cout << "Top::entry" << endl; }
void Top::exit() { cout << "Top::exit" << endl; }
void Top::init() {
// Initialize state with box
setState<StateA>(44);
}
void Top::init(int i) {
box().data = i;
init();
}
// Super state
void Super::entry() { cout << "Super::entry" << endl; }
void Super::exit() { cout << "Super::exit" << endl; }
// StateA state
void StateA::entry() { cout << "StateA::entry" << endl; }
void StateA::init(int i) { cout << "StateA::init " << i << endl; }
void StateA::exit() { cout << "StateA::exit" << endl; }
void StateA::event1(int i) {
box().data = i;
cout << "StateA::box().data: " << box().data << endl;
setState<StateB>();
}
// StateB state
void StateB::entry() { cout << "StateB::entry" << endl; }
void StateB::exit() { cout << "StateB::exit" << endl; }
void StateB::event2(long l) {
Top::box().data = l;
cout << "Top::box().data: " << Top::box().data << endl;
setState<StateA>(42);
}
} // namespace Example
//////////////////////////////////////////////////////////////////////
// Test run
int main() {
using namespace Macho;
// Initialize state machine with some data
Machine<Example::Top> m(State<Example::Top>(11));
// Dispatch some events
m->event1(42);
m->event2(43);
// Inspect state machine
cout << "m.box().data: " << m.box().data << endl;
return 0;
}
/*
Output is:
Top::entry
Super::entry
StateA::entry
StateA::init 44
StateA::box().data: 42
StateA::exit
StateB::entry
Top::box().data: 43
StateB::exit
StateA::entry
StateA::init 42
m.box().data: 43
StateA::exit
Super::exit
Top::exit
*/
|
#!/bin/bash
snmpwalk -Os -c private -v1 localhost 1.3.6.1.4.1.1457.4.1.1.8
exit 0
|
#!/bin/bash
# read options, will exit if parameters are malformed
temp=`getopt -n 'setup' -o g:s:v: --long group:,serviceName:,version: -- "$@"`
eval set -- "$temp"
# default parameters
group="org.jazzcommunity.example"
serviceName="ExampleService"
version="1.0.0"
# handle optional parameters
while true; do
case "$1" in
-g | --group ) group="$2"; shift; shift;;
-s | --serviceName ) serviceName="$2"; shift; shift;;
-v | --version ) version="$2"; shift; shift;;
-- ) shift; break;;
* ) break;;
esac
done
mvn clean install
if [ $? -ne 0 ]; then
echo "Maven failed, check log"
exit 1
fi
cd target
mvn archetype:generate -B \
"-DarchetypeCatalog=local" \
"-DarchetypeGroupId=org.jazzcommunity.service.archetype" \
"-DarchetypeArtifactId=org.jazzcommunity.service.archetype" \
"-Dversion=$version" \
"-DgroupId=$group" \
"-DartifactId=$group.parent" \
"-Dpackage=$group" \
"-DserviceName=$serviceName"
if [ $? -ne 0 ]; then
echo "Maven failed, check log"
exit 1
fi
cd "$group.parent"
|
#!/bin/bash
setup_dir=$(dirname "${BASH_SOURCE[0]}")
# copy synapse configs
"${setup_dir}/copy_synapse_configs.sh"
# initialize postgres
"${setup_dir}/init_postgres.sh"
# tear down containers
docker-compose down
echo
echo "Initialization complete. When ready, run: docker-compose up -d"
echo
|
<filename>src/si/modri/WorkLogger/TabWorkspaceController.java
package si.modri.WorkLogger;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.event.ActionEvent;
import javafx.event.EventType;
import javafx.fxml.FXML;
import javafx.fxml.Initializable;
import javafx.scene.canvas.Canvas;
import javafx.scene.control.*;
import javafx.scene.control.cell.PropertyValueFactory;
import javafx.scene.control.cell.TextFieldTableCell;
import javafx.scene.layout.AnchorPane;
import javafx.scene.layout.VBox;
import javafx.stage.FileChooser;
import org.controlsfx.control.Notifications;
import org.controlsfx.dialog.Dialogs;
import org.controlsfx.validation.ValidationResult;
import org.controlsfx.validation.ValidationSupport;
import org.joda.time.DateTime;
import org.joda.time.Minutes;
import java.io.File;
import java.net.URL;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.ResourceBundle;
public class TabWorkspaceController implements Initializable {
@FXML public TableView tv_entries;
@FXML public TableColumn tc_start;
@FXML public TableColumn tc_end;
@FXML public TableColumn tc_duration;
@FXML public Canvas c_week_graph;
@FXML public DatePicker dp_from_date;
@FXML public DatePicker dp_to_date;
@FXML public Button btn_save;
@FXML public TextField tf_earning;
@FXML public Button btn_add_entry;
@FXML public Label l_totalTime;
@FXML public Label l_totalEarning;
@FXML public VBox vb_add_entry;
@FXML public DatePicker dp_add_start_date;
@FXML public TextField tf_add_start_h;
@FXML public TextField tf_add_start_min;
@FXML public DatePicker dp_add_end_date;
@FXML public TextField tf_add_end_h;
@FXML public TextField tf_add_end_min;
@FXML public AnchorPane ap_week_graph_container;
//@FXML public Label l_avgPerDay;
//@FXML public Label l_maxDay;
//@FXML public Label l_minDay;
private double ap_week_graph_container_width;
private double ap_week_graph_container_height;
Tab ownerTab;
WorkspaceManager workspaceManager;
CanvasWeekDraw canvasWeekDraw;
ValidationSupport validationSupport = new ValidationSupport();
private String workspaceName;
private double earning;
/**
* Load data for passed workspace.
* @param workspaceName
*/
public void loadWorkspace(String workspaceName, Tab ownerTab, SettingsManager settingsManager){
this.workspaceName = workspaceName;
workspaceManager = new WorkspaceManager(workspaceName + ".bin", true);
for(SettingsManager.Workspace w: settingsManager.getWorkspaces()){
if(w.name.equals(workspaceName)) {
tf_earning.setText(w.earning + "");
earning = w.earning;
}
}
fillEntriesTable();
canvasWeekDraw.setData(workspaceManager.entries);
canvasWeekDraw.draw();
this.ownerTab = ownerTab;
calculateStatistics();
}
@Override
public void initialize(URL url, ResourceBundle resourceBundle) {
//init table view
tc_start.setCellFactory(TextFieldTableCell.forTableColumn());
tc_start.setCellValueFactory(
new PropertyValueFactory<WorkspaceManager.TimeEntry, String>("tStartTime")
);
tc_end.setCellFactory(TextFieldTableCell.forTableColumn());
tc_end.setCellValueFactory(
new PropertyValueFactory<WorkspaceManager.TimeEntry, String>("tEndTime")
);
tc_duration.setCellFactory(TextFieldTableCell.forTableColumn());
tc_duration.setCellValueFactory(
new PropertyValueFactory<WorkspaceManager.TimeEntry, String>("tDuration")
);
canvasWeekDraw = new CanvasWeekDraw(c_week_graph);
onResetRangeClick(false);
ap_week_graph_container_width = ap_week_graph_container.getWidth();
ap_week_graph_container_height = ap_week_graph_container.getHeight();
ap_week_graph_container.addEventHandler(EventType.ROOT, event -> {
if(ap_week_graph_container_width != ap_week_graph_container.getWidth() || ap_week_graph_container_height != ap_week_graph_container.getHeight()){
ap_week_graph_container_width = ap_week_graph_container.getWidth();
ap_week_graph_container_height = ap_week_graph_container.getHeight();
c_week_graph.setWidth(ap_week_graph_container_width - 10);
c_week_graph.setHeight(ap_week_graph_container_height - 10);
}
});
validationSupport.registerValidator(dp_from_date, false, (Control c, LocalDate newValue) ->
ValidationResult.fromWarningIf( dp_from_date, "'From date' should be before 'to date'.", !dp_from_date.getValue().isBefore(dp_to_date.getValue())));
validationSupport.registerValidator(dp_to_date, false, (Control c, LocalDate newValue) ->
ValidationResult.fromWarningIf( dp_to_date, "'To date' should be after 'from date'", !dp_from_date.getValue().isBefore(dp_to_date.getValue())));
validationSupport.registerValidator(dp_add_start_date, false, (Control c, LocalDate newValue) -> {
boolean a = false;
if(dp_add_start_date.getValue() != null && dp_add_end_date.getValue() != null)
a = dp_add_end_date.getValue().isBefore(dp_add_start_date.getValue());
return ValidationResult.fromWarningIf(dp_add_start_date, "'Start date' should be before 'end date'.", a);
});
validationSupport.registerValidator(dp_add_end_date, false, (Control c, LocalDate newValue) -> {
boolean a = false;
if(dp_add_start_date.getValue() != null && dp_add_end_date.getValue() != null)
a = dp_add_end_date.getValue().isBefore(dp_add_start_date.getValue());
return ValidationResult.fromWarningIf(dp_add_end_date, "'End date' should be after 'start date'", a);
});
vb_add_entry.managedProperty().bind(vb_add_entry.visibleProperty());
vb_add_entry.setVisible(false);
}
/**
* Refresh displayed data for currently selected workspace.
*/
public void onRefreshWorkspaceClick(){
workspaceManager.read();
fillEntriesTable();
}
/**
* Put workspace data into table 'tv_entries'.
*/
public void fillEntriesTable() {
ObservableList<WorkspaceManager.TimeEntry> data = FXCollections.observableArrayList();
for(Map.Entry<String, WorkspaceManager.TimeEntry> e: workspaceManager.entries.entrySet()){
data.add(e.getValue());
}
tv_entries.setItems(data);
}
/**
* Edit entry
* @param stCellEditEvent
*/
public void onEntriesColumnEditCommit(TableColumn.CellEditEvent<WorkspaceManager.TimeEntry, String> stCellEditEvent) {
String colId = stCellEditEvent.getTableColumn().getId();
String oldKey = stCellEditEvent.getRowValue().getKey();
if(colId.equals("tc_start")){
stCellEditEvent.getRowValue().settStartTime(stCellEditEvent.getNewValue());
} else if(colId.equals("tc_end")){
stCellEditEvent.getRowValue().settEndTime(stCellEditEvent.getNewValue());
}
String newKey = stCellEditEvent.getRowValue().getKey();
workspaceManager.entries.put(newKey, workspaceManager.entries.get(oldKey));
workspaceManager.entries.remove(oldKey);
refreshTableEntriesView();
btn_save.setDisable(false);
}
/**
* Trick for refreshing table (redraw).
*/
private void refreshTableEntriesView(){
tc_duration.setVisible(false);
tc_duration.setVisible(true);
canvasWeekDraw.draw();
}
/**
* Import ics file.
*/
public void importFile(){
FileChooser fileChooser = new FileChooser();
fileChooser.setTitle("Select *.ics files");
fileChooser.getExtensionFilters().add(new FileChooser.ExtensionFilter("*.ics","*.ics"));
List<File> list = fileChooser.showOpenMultipleDialog(null);
if (list != null) {
for (File file : list) {
if(workspaceManager.loadFromIcsFile(file)){
Notifications.create()
.text("Imported.")
.showInformation();
} else {
Notifications.create()
.title("Error")
.text("Failed to import.")
.showWarning();
}
}
btn_save.setDisable(false);
refreshAll();
}
}
/**
* Save.
*/
public void onSaveClick() {
if(!workspaceManager.save()){
Dialogs.create()
.lightweight()
.owner(ownerTab)
.title("Error")
.message("Couldn't save.")
.showWarning();
/*Alert dlg = new Alert(Alert.AlertType.WARNING, "");
dlg.initModality(Modality.NONE);
dlg.initOwner(null);
dlg.setTitle("Error");
dlg.getDialogPane().setContentText("Couldn't save.");
dlg.getDialogPane().setHeaderText("Damaged config file.");
dlg.show();*/
} else {
Notifications.create()
.text("Workspace " + workspaceName + " saved.")
.showInformation();
btn_save.setDisable(true);
}
}
public void onFromDateSelect() {
if(canvasWeekDraw.setStart(dp_from_date.getValue())){
calculateStatistics();
}
}
public void onToDateSelect() {
if(canvasWeekDraw.setEnd(dp_to_date.getValue())) {
calculateStatistics();
}
}
/**
* Reset range from to
*/
public void onResetRangeClick() {
onResetRangeClick(false);
}
/**
* Reset range from to
* @param cancelDraw DEPRECIATED
*/
public void onResetRangeClick(boolean cancelDraw) {
if(cancelDraw)
canvasWeekDraw.setForceCancelDraw(true); //do not redraw // FIXME CURRENTLY NOT USED
canvasWeekDraw.resetRange();
LocalDate date1 = canvasWeekDraw.getStart().toDate().toInstant().atZone(ZoneId.systemDefault()).toLocalDate();
dp_from_date.setValue(date1);
LocalDate date2 = canvasWeekDraw.getEnd().toDate().toInstant().atZone(ZoneId.systemDefault()).toLocalDate();
dp_to_date.setValue(date2);
calculateStatistics();
}
/**
* Calculates new statistics.
*/
public void calculateStatistics() {
if(workspaceManager == null)
return;
long totalTime = 0;
double totalEarning = 0;
double earningPerMinute = earning / 60;
long endTime = canvasWeekDraw.getEnd().getMillis();
for(Map.Entry<String, WorkspaceManager.TimeEntry> entry : workspaceManager.entries.tailMap( canvasWeekDraw.getStart().getMillis() + "" ).entrySet()) {
WorkspaceManager.TimeEntry timeEntry = entry.getValue();
if(timeEntry.startTime.getMillis() > endTime) {
break;
}
int minuteDuration = Minutes.minutesBetween(timeEntry.startTime, timeEntry.endTime).getMinutes();
totalTime += minuteDuration;
totalEarning += minuteDuration * earningPerMinute;
}
l_totalTime.setText(WorkspaceManager.getDuration(totalTime * 1000 * 60));
l_totalEarning.setText(String.format("%.2f", totalEarning) + " \u20ac");
//l_avgPerDay.setText("");
//l_maxDay.setText("");
//l_minDay.setText("");
}
/**
* Recalculate statistics data.
*/
public void reCalculateStatistics() {
try {
earning = Double.parseDouble(tf_earning.getText());
calculateStatistics();
} catch (Exception e){}
}
/**
* Show entry dialog.
* @param actionEvent
*/
public void addEntryDialog(ActionEvent actionEvent) {
hideShowAddEntryDialog(true);
}
/**
* Remove value from table.
* @param actionEvent
*/
public void onRemoveEntryClick(ActionEvent actionEvent) {
ObservableList<WorkspaceManager.TimeEntry> selectedItems = tv_entries.getSelectionModel().getSelectedItems();
selectedItems.forEach(timeEntry -> {
workspaceManager.entries.remove(timeEntry.getKey());
});
selectedItems.forEach(tv_entries.getItems()::remove);
btn_save.setDisable(false);
}
/**
* On Cancel button click, inside new entry add dialog.
* @param actionEvent
*/
public void cancel_add_entry_dialog(ActionEvent actionEvent) {
hideShowAddEntryDialog(false);
}
/**
* Hide/show dialog for adding new entry
* @param show
*/
private void hideShowAddEntryDialog(boolean show){
btn_add_entry.setDisable(show);
vb_add_entry.setVisible(show);
tf_add_start_h.setText("00");
tf_add_start_min.setText("00");
tf_add_end_h.setText("00");
tf_add_end_min.setText("00");
dp_add_start_date.setValue(LocalDate.now());
dp_add_end_date.setValue(LocalDate.now());
}
/**
* On add entry button click, add values to table and close dialog
* @param actionEvent
*/
public void add_entry_from_dialog(ActionEvent actionEvent) {
try{
DateTime startDate = new DateTime(Date.from(dp_add_start_date.getValue().atStartOfDay(ZoneId.systemDefault()).toInstant()));
DateTime endDate = new DateTime(Date.from(dp_add_end_date.getValue().atStartOfDay(ZoneId.systemDefault()).toInstant()));
int num_start_h = Integer.parseInt(tf_add_start_h.getText());
int num_start_min = Integer.parseInt(tf_add_start_min.getText());
int num_end_h = Integer.parseInt(tf_add_end_h.getText());
int num_end_min = Integer.parseInt(tf_add_end_min.getText());
startDate = startDate.plusHours(num_start_h).plusMinutes(num_start_min);
endDate = endDate.plusHours(num_end_h).plusMinutes(num_end_min);
WorkspaceManager.TimeEntry timeEntry = new WorkspaceManager(). new TimeEntry(startDate, endDate);
workspaceManager.entries.put(timeEntry.getKey(), timeEntry);
btn_save.setDisable(false);
refreshAll();
hideShowAddEntryDialog(false);
} catch (Exception e){
}
}
public void refreshAll(){
fillEntriesTable();
canvasWeekDraw.draw();
calculateStatistics();
}
} |
<gh_stars>0
package cn.stylefeng.roses.kernel.config.api;
import cn.hutool.db.Entity;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
/**
* 系统配置元数据获取的api
*
* @author fengshuonan
* @date 2021/3/27 21:15
*/
public interface SysConfigDataApi {
/**
* 获取系统配置表中的所有数据
*
* @param conn 原始数据库连接
* @return 所有记录的list
* @author fengshuonan
* @date 2021/3/27 21:15
*/
List<Entity> getConfigs(Connection conn) throws SQLException;
/**
* 获取所有配置list的sql
*
* @author fengshuonan
* @date 2021/3/27 21:19
*/
String getConfigListSql();
}
|
<reponame>Purlemon/oatpp
/***************************************************************************
*
* Project _____ __ ____ _ _
* ( _ ) /__\ (_ _)_| |_ _| |_
* )(_)( /(__)\ )( (_ _)(_ _)
* (_____)(__)(__)(__) |_| |_|
*
*
* Copyright 2018-present, <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************************/
#ifndef oatpp_utils_ConversionUtils_hpp
#define oatpp_utils_ConversionUtils_hpp
#include "oatpp/core/data/mapping/type/Primitive.hpp"
#include "oatpp/core/Types.hpp"
#include "oatpp/core/base/Countable.hpp"
#include "oatpp/core/base/Environment.hpp"
#include <string>
namespace oatpp { namespace utils { namespace conversion {
/**
* String to 32-bit integer.
* @param str - string as `const char*`.
* @return - 32-bit integer value.
*/
v_int32 strToInt32(const char* str);
/**
* String to 32-bit integer.
* @param str - string as `oatpp::String`.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - 32-bit integer value.
*/
v_int32 strToInt32(const oatpp::String& str, bool& success);
/**
* String to 32-bit unsigned integer.
* @param str - string as `const char*`.
* @return - 32-bit unsigned integer value.
*/
v_uint32 strToUInt32(const char* str);
/**
* String to 32-bit unsigned integer.
* @param str - string as `oatpp::String`.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - 32-bit unsigned integer value.
*/
v_uint32 strToUInt32(const oatpp::String& str, bool& success);
/**
* String to 64-bit integer.
* @param str - string as `const char*`.
* @return - 64-bit integer value.
*/
v_int64 strToInt64(const char* str);
/**
* String to 64-bit integer.
* @param str - string as `oatpp::String`.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - 64-bit integer value.
*/
v_int64 strToInt64(const oatpp::String& str, bool& success);
/**
* String to 64-bit unsigned integer.
* @param str - string as `const char*`.
* @return - 64-bit unsigned integer value.
*/
v_uint64 strToUInt64(const char* str);
/**
* String to 64-bit unsigned integer.
* @param str - string as `oatpp::String`.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - 64-bit unsigned integer value.
*/
v_uint64 strToUInt64(const oatpp::String& str, bool& success);
/**
* Convert 32-bit integer to it's string representation.
* @param value - 32-bit integer value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @return - length of the resultant string.
*/
v_buff_size int32ToCharSequence(v_int32 value, p_char8 data, v_buff_size n);
/**
* Convert 32-bit unsigned integer to it's string representation.
* @param value - 32-bit unsigned integer value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @return - length of the resultant string.
*/
v_buff_size uint32ToCharSequence(v_uint32 value, p_char8 data, v_buff_size n);
/**
* Convert 64-bit integer to it's string representation.
* @param value - 64-bit integer value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @return - length of the resultant string.
*/
v_buff_size int64ToCharSequence(v_int64 value, p_char8 data, v_buff_size n);
/**
* Convert 64-bit unsigned integer to it's string representation.
* @param value - 64-bit unsigned integer value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @return - length of the resultant string.
*/
v_buff_size uint64ToCharSequence(v_uint64 value, p_char8 data, v_buff_size n);
/**
* Convert 32-bit integer to it's string representation.
* @param value - 32-bit integer value.
* @return - value as `oatpp::String`
*/
oatpp::String int32ToStr(v_int32 value);
/**
* Convert 32-bit unsigned integer to it's string representation.
* @param value - 32-bit unsigned integer value.
* @return - value as `oatpp::String`
*/
oatpp::String uint32ToStr(v_uint32 value);
/**
* Convert 64-bit integer to it's string representation.
* @param value - 64-bit integer value.
* @return - value as `oatpp::String`
*/
oatpp::String int64ToStr(v_int64 value);
/**
* Convert 64-bit unsigned integer to it's string representation.
* @param value - 64-bit unsigned integer value.
* @return - value as `oatpp::String`
*/
oatpp::String uint64ToStr(v_uint64 value);
/**
* Convert 32-bit integer to it's string representation.
* @param value - 32-bit integer value.
* @return - value as `std::string`
*/
std::string int32ToStdStr(v_int32 value);
/**
* Convert 32-bit unsigned integer to it's string representation.
* @param value - 32-bit unsigned integer value.
* @return - value as `std::string`
*/
std::string uint32ToStdStr(v_uint32 value);
/**
* Convert 64-bit integer to it's string representation.
* @param value - 64-bit integer value.
* @return - value as `std::string`
*/
std::string int64ToStdStr(v_int64 value);
/**
* Convert 64-bit unsigned integer to it's string representation.
* @param value - 64-bit unsigned integer value.
* @return - value as `std::string`
*/
std::string uint64ToStdStr(v_uint64 value);
/**
* Write value of primitive type (int, float, etc.) as it's string representation with pattern.
* @tparam T - primitive value type (int, float, etc.).
* @param value - actual value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @param pattern - pattern as for `snprintf`.
* @return - length of the resultant string.
*/
template<typename T>
v_buff_size primitiveToCharSequence(T value, p_char8 data, v_buff_size n, const char *pattern) {
return snprintf((char*)data, n, pattern, value);
}
/**
* Write value of primitive type (int, float, etc.) as it's string representation with pattern.
* @tparam T - primitive value type (int, float, etc.).
* @param value - actual value.
* @param pattern - pattern as for `sprintf`.
* @return - length of the resultant string.
*/
template<typename T>
oatpp::String primitiveToStr(T value, const char* pattern){
v_char8 buff [100];
auto size = primitiveToCharSequence(value, &buff[0], 100, pattern);
if(size > 0){
return oatpp::String((const char*)&buff[0], size);
}
return nullptr;
}
/**
* String to 32-bit float.
* @param str - string as `const char*`.
* @return - 32-bit float value.
*/
v_float32 strToFloat32(const char* str);
/**
* String to 32-bit float.
* @param str - string as `oatpp::String`.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - 32-bit float value.
*/
v_float32 strToFloat32(const oatpp::String& str, bool& success);
/**
* String to 64-bit float.
* @param str - string as `const char*`.
* @return - 64-bit float value.
*/
v_float64 strToFloat64(const char* str);
/**
* String to 64-bit float.
* @param str - string as `oatpp::String`.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - 64-bit float value.
*/
v_float64 strToFloat64(const oatpp::String& str, bool& success);
/**
* Convert 32-bit float to it's string representation.
* @param value - 32-bit float value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @return - length of the resultant string.
*/
v_buff_size float32ToCharSequence(v_float32 value, p_char8 data, v_buff_size n, const char* format = OATPP_FLOAT_STRING_FORMAT);
/**
* Convert 64-bit float to it's string representation.
* @param value - 64-bit float value.
* @param data - buffer to write data to.
* @param n - buffer size.
* @return - length of the resultant string.
*/
v_buff_size float64ToCharSequence(v_float64 value, p_char8 data, v_buff_size n, const char* format = OATPP_FLOAT_STRING_FORMAT);
/**
* Convert 32-bit float to it's string representation.
* @param value - 32-bit float value.
* @return - value as `oatpp::String`
*/
oatpp::String float32ToStr(v_float32 value, const char* format = OATPP_FLOAT_STRING_FORMAT);
/**
* Convert 64-bit float to it's string representation.
* @param value - 64-bit float value.
* @return - value as `oatpp::String`
*/
oatpp::String float64ToStr(v_float64 value, const char* format = OATPP_FLOAT_STRING_FORMAT);
/**
* Convert boolean to it's string representation.
* @param value - boolean value.
* @return - value as `oatpp::String`;
*/
oatpp::String boolToStr(bool value);
/**
* parse string to boolean value.
* @param str - string to parse.
* @param success - out parameter. `true` if operation was successful. `false` otherwise.
* @return - boolean value.
*/
bool strToBool(const oatpp::String& str, bool& success);
}}}
#endif /* oatpp_utils_ConversionUtils_hpp */
|
pub fn add(a: i32, b: i32) -> i32 {
a + b
}
pub fn subtract(a: i32, b: i32) -> i32 {
a - b
}
pub fn multiply(a: i32, b: i32) -> i32 {
a * b
}
pub fn divide(a: i32, b: i32) -> i32 {
a / b
}
pub fn modulo(a: i32, b: i32) -> i32 {
a % b
} |
import java.util.ArrayList;
import java.util.HashMap;
public class HeapGeneric<T extends Comparable<T>> {
private ArrayList<T> data;
private boolean isMin;
private HashMap<T, Integer> map;
public HeapGeneric() {
this(false);
}
public HeapGeneric(boolean isMin) {
this.data = new ArrayList<>();
this.isMin = isMin;
this.map = new HashMap<>();
}
public void add(T item) {
data.add(item);
map.put(item, data.size() - 1);
heapifyUp(data.size() - 1);
}
private void heapifyUp(int index) {
T item = data.get(index);
while (index > 0) {
int parentIndex = (index - 1) / 2;
T parent = data.get(parentIndex);
if ((isMin && item.compareTo(parent) < 0) || (!isMin && item.compareTo(parent) > 0)) {
// Swap the item with its parent
data.set(index, parent);
data.set(parentIndex, item);
map.put(parent, index);
map.put(item, parentIndex);
index = parentIndex;
} else {
break;
}
}
}
private void heapifyDown(int index) {
int size = data.size();
T item = data.get(index);
while (index < size / 2) {
int leftChildIdx = 2 * index + 1;
int rightChildIdx = 2 * index + 2;
int smallest = leftChildIdx;
if (rightChildIdx < size && (isMin && data.get(rightChildIdx).compareTo(data.get(leftChildIdx)) < 0) ||
(!isMin && data.get(rightChildIdx).compareTo(data.get(leftChildIdx)) > 0)) {
smallest = rightChildIdx;
}
if ((isMin && data.get(smallest).compareTo(item) < 0) || (!isMin && data.get(smallest).compareTo(item) > 0)) {
// Swap the item with its smallest child
data.set(index, data.get(smallest));
data.set(smallest, item);
map.put(data.get(index), index);
map.put(item, smallest);
index = smallest;
} else {
break;
}
}
}
public void remove(T item) {
if (map.containsKey(item)) {
int index = map.get(item);
int size = data.size();
T lastItem = data.get(size - 1);
data.set(index, lastItem);
map.put(lastItem, index);
data.remove(size - 1);
map.remove(item);
if (index < size - 1) {
heapifyUp(index);
heapifyDown(index);
}
}
}
} |
package com.avalon.caverns.core.init;
public class TileEntityTypeInit {
}
|
<gh_stars>0
import React, { useEffect, useMemo } from "react"
import { Divider, Typography } from '@material-ui/core'
import { VideoTileState } from "amazon-chime-sdk-js";
import { useAppState } from "../../../../providers/AppStateProvider";
import { RendererForRecorder } from "./helper/RendererForRecorder";
export type FocustTarget = "SharedContent" | "Speaker"
type Props = {
width: number
height: number
setRecorderCanvas?: (c:HTMLCanvasElement|null)=>void
};
export const RecorderView = ({ width, height, setRecorderCanvas }: Props) => {
const { videoTileStates, activeSpeakerId, meetingSession } = useAppState()
const renderer = useMemo(()=>{return new RendererForRecorder(meetingSession!)},[]) // eslint-disable-line
const contentsTiles = Object.values(videoTileStates).filter(tile=>{return tile.isContent})
const activeSpekerTile = activeSpeakerId && videoTileStates[activeSpeakerId] ? videoTileStates[activeSpeakerId] : null
const targetTiles = (contentsTiles.length > 0 ? contentsTiles : [activeSpekerTile]).filter(tile=>{return tile!==null}) as VideoTileState[]
const targetTilesId = targetTiles.reduce<string>((sum,cur)=>{return `${sum}-${cur.boundAttendeeId}`},"")
//// setup renderer
useEffect(() => {
const dstCanvas = document.getElementById("recorderCanvas") as HTMLCanvasElement
renderer.init(dstCanvas)
renderer.start()
return () => {
console.log("destroy renderer", renderer)
renderer.destroy()
}
}, []) // eslint-disable-line
//// setTargetTileNum
useEffect(()=>{
console.log("TARGET CHANGE!", targetTilesId)
const videoElems = [...Array(targetTiles.length)].map((v,i)=>{return document.getElementById(`video${i}`) as HTMLVideoElement})
console.log(videoElems)
targetTiles.forEach((tile,index)=>{
if(tile.tileId){
meetingSession?.audioVideo.bindVideoElement(tile.tileId, videoElems[index])
}
})
renderer.setSrcVideoElements(videoElems)
},[targetTilesId]) // eslint-disable-line
// notify recorder canvas to parent (to access from sidebar pane)
useEffect(() => {
console.log("set recorder canvas")
const dstCanvas = document.getElementById("recorderCanvas") as HTMLCanvasElement
setRecorderCanvas!(dstCanvas)
return () => {
console.log("remove recorder canvas")
setRecorderCanvas!(null)
}
}, []) // eslint-disable-line
return (
<div style={{ width: width, height: height }}>
<div style={{ width: "100%", height: "70%", objectFit: "contain", background:"#bbbbbb"}}>
<canvas width="1920" height="1080" id="recorderCanvas" style={{ width: "100%", height: "100%", border: "medium solid #ffaaaa"}} />
</div>
<div style={{ width: "100%", height: "20%", objectFit: "contain" }}>
<Divider />
<Typography variant="body2" color="textSecondary">
resources
</Typography>
<video id="video0" style={{width:50, height:50}}/>
<video id="video1" style={{width:50, height:50}}/>
<video id="video2" style={{width:50, height:50}}/>
<video id="video3" style={{width:50, height:50}}/>
<video id="video4" style={{width:50, height:50}}/>
<video id="video5" style={{width:50, height:50}}/>
<video id="video6" style={{width:50, height:50}}/>
<video id="video7" style={{width:50, height:50}}/>
<video id="video8" style={{width:50, height:50}}/>
<video id="video9" style={{width:50, height:50}}/>
</div>
</div>
)
}
|
use std::thread;
const NTHREADS: usize = 5;
fn main() {
let mut children = vec![];
// Spawn NTHREADS number of threads
for i in 0..NTHREADS {
// Spin up another thread
children.push(thread::spawn(move || {
println!("this is thread number {}", i);
}));
}
// Wait for each thread to finish
for child in children {
// Wait for the thread to finish. Return a result.
let _ = child.join();
}
} |
#!/bin/bash
# Repast Simphony Model Starter
# By Michael J. North and Jonathan Ozik
# 11/12/2007
# Note the Repast Simphony Directories.
PWD="${0%/*}"
cd $PWD
REPAST_SIMPHONY_ROOT=$PWD/repast.simphony/repast.simphony.runtime_$REPAST_VERSION
REPAST_SIMPHONY_LIB=$REPAST_SIMPHONY_ROOT/lib
# Define the Core Repast Simphony Directories and JARs
CP=$CP:$REPAST_SIMPHONY_ROOT/bin
CP=$CP:$REPAST_SIMPHONY_LIB/saf.core.runtime.jar
CP=$CP:$REPAST_SIMPHONY_LIB/commons-logging-1.1.2.jar
CP=$CP:$REPAST_SIMPHONY_LIB/javassist-3.17.1-GA.jar
CP=$CP:$REPAST_SIMPHONY_LIB/jpf.jar
CP=$CP:$REPAST_SIMPHONY_LIB/jpf-boot.jar
CP=$CP:$REPAST_SIMPHONY_LIB/log4j-1.2.16.jar
CP=$CP:$REPAST_SIMPHONY_LIB/xpp3_min-1.1.4c.jar
CP=$CP:$REPAST_SIMPHONY_LIB/xstream-1.4.7.jar
CP=$CP:$REPAST_SIMPHONY_LIB/xmlpull-1.1.3.1.jar
CP=$CP:$REPAST_SIMPHONY_LIB/commons-cli-1.2.jar
CP=$CP:$PWD/groovylib/$Groovy_All_Jar
# Change to the Default Repast Simphony Directory
cd Heatbugs
# Start the Model
java -Xss10M -Xmx400M -cp $CP repast.simphony.runtime.RepastMain ./Heatbugs.rs
|
def group_by_property(array, property):
# create a dictionary to store the results
results = {}
for obj in array:
# get the value of the chosen property from the object
value = obj[property]
# check if the value exists in the dictionary
if value not in results:
# if not, add the value to the dictionary as a key and set the value to an empty list
results[value] = []
# add the object to the list corresponding to the property value
results[value].append(obj)
return results |
<reponame>lgoldstein/communitychest
/*
*
*/
package net.community.chest.jfree.jcommon.util;
import java.util.NoSuchElementException;
import org.jfree.util.Rotation;
import net.community.chest.dom.AbstractXmlValueStringInstantiator;
import net.community.chest.lang.StringUtil;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* @author <NAME>.
* @since Feb 1, 2009 2:52:58 PM
*/
public class RotationValueStringInstantiator extends AbstractXmlValueStringInstantiator<Rotation> {
public RotationValueStringInstantiator ()
{
super(Rotation.class);
}
/*
* @see net.community.chest.convert.ValueStringInstantiator#convertInstance(java.lang.Object)
*/
@Override
public String convertInstance (Rotation inst) throws Exception
{
if (null == inst)
return null;
final RotationType rt=RotationType.fromRotation(inst);
if (null == rt)
throw new NoSuchElementException("convertInstance(" + inst + ") uknown value");
return rt.toString();
}
/*
* @see net.community.chest.convert.ValueStringInstantiator#newInstance(java.lang.String)
*/
@Override
public Rotation newInstance (String v) throws Exception
{
final String s=StringUtil.getCleanStringValue(v);
if ((null == s) || (s.length() <= 0))
return null;
final RotationType rt=RotationType.fromString(s);
if (null == rt)
throw new NoSuchElementException("newInstance(" + s + ") uknown value");
return rt.getRotation();
}
public static final RotationValueStringInstantiator DEFAULT=new RotationValueStringInstantiator();
}
|
<gh_stars>0
package com.udacity.jdnd.course3.critter.controllers;
import com.udacity.jdnd.course3.critter.dto.CustomerDTO;
import com.udacity.jdnd.course3.critter.dto.EmployeeDTO;
import com.udacity.jdnd.course3.critter.dto.EmployeeRequestDTO;
import com.udacity.jdnd.course3.critter.entities.Customer;
import com.udacity.jdnd.course3.critter.entities.Employee;
import com.udacity.jdnd.course3.critter.entities.Pet;
import com.udacity.jdnd.course3.critter.exceptions.PetNotFoundException;
import com.udacity.jdnd.course3.critter.services.CustomerService;
import com.udacity.jdnd.course3.critter.services.EmployeeService;
import com.udacity.jdnd.course3.critter.services.PetService;
import com.udacity.jdnd.course3.critter.enums.EmployeeSkill;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import java.time.DayOfWeek;
import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Handles web requests related to Users.
*
* Includes requests for both customers and employees. Splitting this into separate user and customer controllers
* would be fine too, though that is not part of the required scope for this class.
*/
@RestController
@RequestMapping("/user")
public class UserController {
@Autowired
private CustomerService customerService;
@Autowired
private EmployeeService employeeService;
@Autowired
private PetService petService;
@PostMapping("/customer")
public CustomerDTO saveCustomer(@RequestBody CustomerDTO customerDTO){
Customer customer = convertToCustomer(customerDTO);
Long savedId = customerService.saveCustomer(customer);
return convertToCustomerDTO(customerService.getOne(savedId));
}
@GetMapping("/customer")
public List<CustomerDTO> getAllCustomers(){
List<Customer> customers = this.customerService.getAllCustomers();
List<CustomerDTO> customerDTOList = new ArrayList<>();
for(Customer customer:customers){
customerDTOList.add(convertToCustomerDTO(customer));
}
return customerDTOList;
}
@GetMapping("/customer/pet/{petId}")
public CustomerDTO getOwnerByPet(@PathVariable long petId){
Pet pet = this.petService.findPetById(petId);
Customer owner = pet.getOwner();
return this.convertToCustomerDTO(owner);
}
@PostMapping("/employee")
public EmployeeDTO saveEmployee(@RequestBody EmployeeDTO employeeDTO) {
Employee employee = convertToEmployee(employeeDTO);
Long saved = employeeService.saveEmployee(employee);
return convertToEmployeeDTO(this.employeeService.findById(saved));
}
@GetMapping("/employee/{employeeId}")
public EmployeeDTO getEmployee(@PathVariable long employeeId) {
Employee employee = this.employeeService.findById(employeeId);
return this.convertToEmployeeDTO(employee);
}
@PutMapping("/employee/{employeeId}")
public void setAvailability(@RequestBody Set<DayOfWeek> daysAvailable, @PathVariable long employeeId) {
Employee employee = this.employeeService.findById(employeeId);
employee.setDaysAvailable(daysAvailable);
this.employeeService.saveEmployee(employee);
}
@GetMapping("/employee/availability")
public List<EmployeeDTO> findEmployeesForService(@RequestBody EmployeeRequestDTO employeeDTO) {
Set<EmployeeSkill> skills = employeeDTO.getSkills();
LocalDate availableDay = employeeDTO.getDate();
List<Employee> employees = employeeService.findEmployeesForService(employeeDTO.getSkills()
,employeeDTO.getDate());
return employees.stream().map(e -> convertToEmployeeDTO(e)).collect(Collectors.toList());
}
private Customer convertToCustomer(CustomerDTO customerDTO){
Customer customer = new Customer();
BeanUtils.copyProperties(customerDTO,customer);
List<Long> petsIds = customerDTO.getPetIds();
if(petsIds != null){
customer.setPets(petsIds.stream().map(id -> petService.getOne(id)).collect(Collectors.toList()));
}
return customer;
}
private Employee convertToEmployee(EmployeeDTO employeeDTO){
Employee employee = new Employee();
BeanUtils.copyProperties(employeeDTO,employee);
return employee;
}
private CustomerDTO convertToCustomerDTO(Customer customer){
CustomerDTO dto = new CustomerDTO();
BeanUtils.copyProperties(customer,dto);
List<Pet> pets = customer.getPets();
if(pets != null){
List<Long> ids = new ArrayList<>();
for(Pet pet: pets){
ids.add(pet.getId());
}
dto.setPetIds(ids);
}
return dto;
}
private EmployeeDTO convertToEmployeeDTO(Employee employee){
EmployeeDTO dto = new EmployeeDTO();
BeanUtils.copyProperties(employee,dto);
return dto;
}
}
|
<reponame>NithinBiliya/ipo-calculator
var app = angular.module('ipo-calculator', []);
app.controller('MainCtrl', function($scope) {
$scope.hniMinInvestmentLimit=200000;
$scope.daysInYear=365;
$scope.loanInvestmentAmount=0;
$scope.loanInterestRate=7;
$scope.daysForCashRedemption=5;
$scope.daysForListing=11;
$scope.calculate=function() {
$scope.sharesApplied=$scope.investmentAmount/$scope.issuePrice;
if($scope.niiSubscription>1)
$scope.sharesAllocated=Math.floor($scope.sharesApplied/$scope.niiSubscription);
else
$scope.sharesAllocated=$scope.sharesApplied;
$scope.interestTillCashRedemption=$scope.loanInvestmentAmount*($scope.loanInterestRate*$scope.daysForCashRedemption/$scope.daysInYear)/100;
$scope.interestTillListing=($scope.loanInvestmentAmount-(($scope.sharesApplied-$scope.sharesAllocated)*$scope.issuePrice))*($scope.loanInterestRate*($scope.daysForListing-$scope.daysForCashRedemption)/$scope.daysInYear)/100;
if($scope.interestTillListing<0) {
$scope.interestTillListing=0;
}
$scope.totalProfit=$scope.sharesAllocated*$scope.listingPrice-$scope.sharesAllocated*$scope.issuePrice-$scope.interestTillCashRedemption-$scope.interestTillListing;
}
$scope.changeSelect=function() {
if($scope.selectMinValue===true) {
$scope.minLotsToApply=Math.ceil($scope.hniMinInvestmentLimit/($scope.issuePrice*$scope.lotSize));
$scope.minSharesApplied=$scope.minLotsToApply*$scope.lotSize;
$scope.minInvestmentAmount=$scope.minSharesApplied*$scope.issuePrice;
$scope.investmentAmount=$scope.minInvestmentAmount;
$scope.loanInvestmentAmount=0;
$scope.personalInvestmentAmount=$scope.investmentAmount;
} else {
$scope.investmentAmount="";
}
}
$scope.calculateMinAmount=function() {
$scope.minLotsToApply=Math.ceil($scope.hniMinInvestmentLimit/($scope.issuePrice*$scope.lotSize));
$scope.minSharesApplied=$scope.minLotsToApply*$scope.lotSize;
$scope.minInvestmentAmount=$scope.minSharesApplied*$scope.issuePrice;
}
$scope.investmentCalculation=function(field) {
if(field==="loan") {
$scope.personalInvestmentAmount=$scope.investmentAmount-$scope.loanInvestmentAmount;
} else {
$scope.loanInvestmentAmount=$scope.investmentAmount-$scope.personalInvestmentAmount;
}
}
});
|
const parseCsv = require("../src/parseCsv");
const readCsv = require("../src/readCsv");
test("Parses a CSV string", async () => {
const parsed = await parseCsv("name,age\nJohn,30");
expect(parsed.errors.length).toBe(0);
});
test("Parses a read file", async () => {
const data = await readCsv(`${__dirname}/../sample-data/simple.csv`);
const parsed = await parseCsv(data);
expect(parsed.errors.length).toBe(0);
});
test("Throws an error on row with too few fields.", async () => {
await expect(parseCsv("name,age,salary\nJohn,30")).rejects.toThrow(
"Parse error: Row 2: Too few fields: expected 3 fields but parsed 2"
);
});
test("Throws an error on row with too many fields", async () => {
await expect(parseCsv("name,age\nJohn,30,75000")).rejects.toThrow(
"Parse error: Row 2: Too many fields: expected 2 fields but parsed 3"
);
});
test("Throws an error when delimiter cannot be detected", async () => {
await expect(parseCsv("")).rejects.toThrow(
"Parse error: Unable to auto-detect delimiting character; defaulted to ','"
);
await expect(parseCsv("name age\nJohn 30")).rejects.toThrow(
"Parse error: Unable to auto-detect delimiting character; defaulted to ','"
);
});
test("Throws an error when quoted fields are malformed", async () => {
await expect(parseCsv('a,"b,c\nd,e,f')).rejects.toThrow(
"Parse error: Row 2: Quoted field unterminated"
);
await expect(parseCsv('a,"b,"c\nd,e,f')).rejects.toThrow(
"Parse errors:\n - Row 2: Trailing quote on quoted field is malformed\n - Row 2: Quoted field unterminated"
);
await expect(parseCsv('a,"b"c,d\ne,f,g')).rejects.toThrow(
"Parse errors:\n - Row 2: Trailing quote on quoted field is malformed\n - Row 2: Quoted field unterminated"
);
await expect(parseCsv('a,"b,c\nd"e,f,g')).rejects.toThrow(
"Parse errors:\n - Row 2: Trailing quote on quoted field is malformed\n - Row 2: Quoted field unterminated"
);
});
|
#!/usr/bin/env zsh
#
# zsh-async
#
# version: 1.5.2
# author: Mathias Fredriksson
# url: https://github.com/mafredri/zsh-async
#
# Produce debug output from zsh-async when set to 1.
typeset -g ASYNC_DEBUG=${ASYNC_DEBUG:-0}
# Wrapper for jobs executed by the async worker, gives output in parseable format with execution time
_async_job() {
# Disable xtrace as it would mangle the output.
setopt localoptions noxtrace
# Store start time for job.
float -F duration=$EPOCHREALTIME
# Run the command and capture both stdout (`eval`) and stderr (`cat`) in
# separate subshells. When the command is complete, we grab write lock
# (mutex token) and output everything except stderr inside the command
# block, after the command block has completed, the stdin for `cat` is
# closed, causing stderr to be appended with a $'\0' at the end to mark the
# end of output from this job.
local stdout stderr ret tok
{
stdout=$(eval "$@")
ret=$?
duration=$(( EPOCHREALTIME - duration )) # Calculate duration.
# Grab mutex lock, stalls until token is available.
read -r -k 1 -p tok || exit 1
# Return output (<job_name> <return_code> <stdout> <duration> <stderr>).
print -r -n - ${(q)1} $ret ${(q)stdout} $duration
} 2> >(stderr=$(cat) && print -r -n - " "${(q)stderr}$'\0')
# Unlock mutex by inserting a token.
print -n -p $tok
}
# The background worker manages all tasks and runs them without interfering with other processes
_async_worker() {
# Reset all options to defaults inside async worker.
emulate -R zsh
# Make sure monitor is unset to avoid printing the
# pids of child processes.
unsetopt monitor
# Redirect stderr to `/dev/null` in case unforseen errors produced by the
# worker. For example: `fork failed: resource temporarily unavailable`.
# Some older versions of zsh might also print malloc errors (know to happen
# on at least zsh 5.0.2 and 5.0.8) likely due to kill signals.
exec 2>/dev/null
# When a zpty is deleted (using -d) all the zpty instances created before
# the one being deleted receive a SIGHUP, unless we catch it, the async
# worker would simply exit (stop working) even though visible in the list
# of zpty's (zpty -L).
TRAPHUP() {
return 0 # Return 0, indicating signal was handled.
}
local -A storage
local unique=0
local notify_parent=0
local parent_pid=0
local coproc_pid=0
local processing=0
local -a zsh_hooks zsh_hook_functions
zsh_hooks=(chpwd periodic precmd preexec zshexit zshaddhistory)
zsh_hook_functions=(${^zsh_hooks}_functions)
unfunction $zsh_hooks &>/dev/null # Deactivate all zsh hooks inside the worker.
unset $zsh_hook_functions # And hooks with registered functions.
unset zsh_hooks zsh_hook_functions # Cleanup.
child_exit() {
local -a pids
pids=(${${(v)jobstates##*:*:}%\=*})
# If coproc (cat) is the only child running, we close it to avoid
# leaving it running indefinitely and cluttering the process tree.
if (( ! processing )) && [[ $#pids = 1 ]] && [[ $coproc_pid = $pids[1] ]]; then
coproc :
coproc_pid=0
fi
# On older version of zsh (pre 5.2) we notify the parent through a
# SIGWINCH signal because `zpty` did not return a file descriptor (fd)
# prior to that.
if (( notify_parent )); then
# We use SIGWINCH for compatibility with older versions of zsh
# (pre 5.1.1) where other signals (INFO, ALRM, USR1, etc.) could
# cause a deadlock in the shell under certain circumstances.
kill -WINCH $parent_pid
fi
}
# Register a SIGCHLD trap to handle the completion of child processes.
trap child_exit CHLD
# Process option parameters passed to worker
while getopts "np:u" opt; do
case $opt in
n) notify_parent=1;;
p) parent_pid=$OPTARG;;
u) unique=1;;
esac
done
killjobs() {
local tok
local -a pids
pids=(${${(v)jobstates##*:*:}%\=*})
# No need to send SIGHUP if no jobs are running.
(( $#pids == 0 )) && continue
(( $#pids == 1 )) && [[ $coproc_pid = $pids[1] ]] && continue
# Grab lock to prevent half-written output in case a child
# process is in the middle of writing to stdin during kill.
(( coproc_pid )) && read -r -k 1 -p tok
kill -HUP -$$ # Send to entire process group.
coproc : # Quit coproc.
coproc_pid=0 # Reset pid.
}
local request
local -a cmd
while :; do
# Wait for jobs sent by async_job.
read -r -d $'\0' request || {
# Since we handle SIGHUP above (and thus do not know when `zpty -d`)
# occurs, a failure to read probably indicates that stdin has
# closed. This is why we propagate the signal to all children and
# exit manually.
kill -HUP -$$ # Send SIGHUP to all jobs.
exit 0
}
# Check for non-job commands sent to worker
case $request in
_unset_trap) notify_parent=0; continue;;
_killjobs) killjobs; continue;;
esac
# Parse the request using shell parsing (z) to allow commands
# to be parsed from single strings and multi-args alike.
cmd=("${(z)request}")
# Name of the job (first argument).
local job=$cmd[1]
# If worker should perform unique jobs
if (( unique )); then
# Check if a previous job is still running, if yes, let it finnish
for pid in ${${(v)jobstates##*:*:}%\=*}; do
if [[ ${storage[$job]} == $pid ]]; then
continue 2
fi
done
fi
# Guard against closing coproc from trap before command has started.
processing=1
# Because we close the coproc after the last job has completed, we must
# recreate it when there are no other jobs running.
if (( ! coproc_pid )); then
# Use coproc as a mutex for synchronized output between children.
coproc cat
coproc_pid="$!"
# Insert token into coproc
print -n -p "t"
fi
# Run job in background, completed jobs are printed to stdout.
_async_job $cmd &
# Store pid because zsh job manager is extremely unflexible (show jobname as non-unique '$job')...
storage[$job]="$!"
processing=0 # Disable guard.
done
}
#
# Get results from finnished jobs and pass it to the to callback function. This is the only way to reliably return the
# job name, return code, output and execution time and with minimal effort.
#
# usage:
# async_process_results <worker_name> <callback_function>
#
# callback_function is called with the following parameters:
# $1 = job name, e.g. the function passed to async_job
# $2 = return code
# $3 = resulting stdout from execution
# $4 = execution time, floating point e.g. 2.05 seconds
# $5 = resulting stderr from execution
#
async_process_results() {
setopt localoptions unset noshwordsplit noksharrays noposixidentifiers noposixstrings
local worker=$1
local callback=$2
local caller=$3
local -a items
local null=$'\0' data
integer -l len pos num_processed
typeset -gA ASYNC_PROCESS_BUFFER
# Read output from zpty and parse it if available.
while zpty -r -t $worker data 2>/dev/null; do
ASYNC_PROCESS_BUFFER[$worker]+=$data
len=${#ASYNC_PROCESS_BUFFER[$worker]}
pos=${ASYNC_PROCESS_BUFFER[$worker][(i)$null]} # Get index of NULL-character (delimiter).
# Keep going until we find a NULL-character.
if (( ! len )) || (( pos > len )); then
continue
fi
while (( pos <= len )); do
# Take the content from the beginning, until the NULL-character and
# perform shell parsing (z) and unquoting (Q) as an array (@).
items=("${(@Q)${(z)ASYNC_PROCESS_BUFFER[$worker][1,$pos-1]}}")
# Remove the extracted items from the buffer.
ASYNC_PROCESS_BUFFER[$worker]=${ASYNC_PROCESS_BUFFER[$worker][$pos+1,$len]}
if (( $#items == 5 )); then
$callback "${(@)items}" # Send all parsed items to the callback.
else
# In case of corrupt data, invoke callback with *async* as job
# name, non-zero exit status and an error message on stderr.
$callback "async" 1 "" 0 "$0:$LINENO: error: bad format, got ${#items} items (${(@q)items})"
fi
(( num_processed++ ))
len=${#ASYNC_PROCESS_BUFFER[$worker]}
if (( len > 1 )); then
pos=${ASYNC_PROCESS_BUFFER[$worker][(i)$null]} # Get index of NULL-character (delimiter).
fi
done
done
(( num_processed )) && return 0
# Avoid printing exit value when `setopt printexitvalue` is active.`
[[ $caller = trap || $caller = watcher ]] && return 0
# No results were processed
return 1
}
# Watch worker for output
_async_zle_watcher() {
setopt localoptions noshwordsplit
typeset -gA ASYNC_PTYS ASYNC_CALLBACKS
local worker=$ASYNC_PTYS[$1]
local callback=$ASYNC_CALLBACKS[$worker]
if [[ -n $callback ]]; then
async_process_results $worker $callback watcher
fi
}
#
# Start a new asynchronous job on specified worker, assumes the worker is running.
#
# usage:
# async_job <worker_name> <my_function> [<function_params>]
#
async_job() {
setopt localoptions noshwordsplit noksharrays noposixidentifiers noposixstrings
local worker=$1; shift
local -a cmd
cmd=("$@")
if (( $#cmd > 1 )); then
cmd=(${(q)cmd}) # Quote special characters in multi argument commands.
fi
# Quote the cmd in case RC_EXPAND_PARAM is set.
zpty -w $worker "$cmd"$'\0'
}
# This function traps notification signals and calls all registered callbacks
_async_notify_trap() {
setopt localoptions noshwordsplit
local k
for k in ${(k)ASYNC_CALLBACKS}; do
async_process_results $k ${ASYNC_CALLBACKS[$k]} trap
done
}
#
# Register a callback for completed jobs. As soon as a job is finnished, async_process_results will be called with the
# specified callback function. This requires that a worker is initialized with the -n (notify) option.
#
# usage:
# async_register_callback <worker_name> <callback_function>
#
async_register_callback() {
setopt localoptions noshwordsplit nolocaltraps
typeset -gA ASYNC_CALLBACKS
local worker=$1; shift
ASYNC_CALLBACKS[$worker]="$*"
# Enable trap when the ZLE watcher is unavailable, allows
# workers to notify (via -n) when a job is done.
if [[ ! -o interactive ]] || [[ ! -o zle ]]; then
trap '_async_notify_trap' WINCH
fi
}
#
# Unregister the callback for a specific worker.
#
# usage:
# async_unregister_callback <worker_name>
#
async_unregister_callback() {
typeset -gA ASYNC_CALLBACKS
unset "ASYNC_CALLBACKS[$1]"
}
#
# Flush all current jobs running on a worker. This will terminate any and all running processes under the worker, use
# with caution.
#
# usage:
# async_flush_jobs <worker_name>
#
async_flush_jobs() {
setopt localoptions noshwordsplit
local worker=$1; shift
# Check if the worker exists
zpty -t $worker &>/dev/null || return 1
# Send kill command to worker
async_job $worker "_killjobs"
# Clear the zpty buffer.
local junk
if zpty -r -t $worker junk '*'; then
(( ASYNC_DEBUG )) && print -n "async_flush_jobs $worker: ${(V)junk}"
while zpty -r -t $worker junk '*'; do
(( ASYNC_DEBUG )) && print -n "${(V)junk}"
done
(( ASYNC_DEBUG )) && print
fi
# Finally, clear the process buffer in case of partially parsed responses.
typeset -gA ASYNC_PROCESS_BUFFER
unset "ASYNC_PROCESS_BUFFER[$worker]"
}
#
# Start a new async worker with optional parameters, a worker can be told to only run unique tasks and to notify a
# process when tasks are complete.
#
# usage:
# async_start_worker <worker_name> [-u] [-n] [-p <pid>]
#
# opts:
# -u unique (only unique job names can run)
# -n notify through SIGWINCH signal
# -p pid to notify (defaults to current pid)
#
async_start_worker() {
setopt localoptions noshwordsplit
local worker=$1; shift
zpty -t $worker &>/dev/null && return
typeset -gA ASYNC_PTYS
typeset -h REPLY
typeset has_xtrace=0
# Make sure async worker is started without xtrace
# (the trace output interferes with the worker).
[[ -o xtrace ]] && {
has_xtrace=1
unsetopt xtrace
}
if (( ! ASYNC_ZPTY_RETURNS_FD )) && [[ -o interactive ]] && [[ -o zle ]]; then
# When zpty doesn't return a file descriptor (on older versions of zsh)
# we try to guess it anyway.
integer -l zptyfd
exec {zptyfd}>&1 # Open a new file descriptor (above 10).
exec {zptyfd}>&- # Close it so it's free to be used by zpty.
fi
zpty -b $worker _async_worker -p $$ $@ || {
async_stop_worker $worker
return 1
}
# Re-enable it if it was enabled, for debugging.
(( has_xtrace )) && setopt xtrace
if [[ $ZSH_VERSION < 5.0.8 ]]; then
# For ZSH versions older than 5.0.8 we delay a bit to give
# time for the worker to start before issuing commands,
# otherwise it will not be ready to receive them.
sleep 0.001
fi
if [[ -o interactive ]] && [[ -o zle ]]; then
if (( ! ASYNC_ZPTY_RETURNS_FD )); then
REPLY=$zptyfd # Use the guessed value for the file desciptor.
fi
ASYNC_PTYS[$REPLY]=$worker # Map the file desciptor to the worker.
zle -F $REPLY _async_zle_watcher # Register the ZLE handler.
# Disable trap in favor of ZLE handler when notify is enabled (-n).
async_job $worker _unset_trap
fi
}
#
# Stop one or multiple workers that are running, all unfetched and incomplete work will be lost.
#
# usage:
# async_stop_worker <worker_name_1> [<worker_name_2>]
#
async_stop_worker() {
setopt localoptions noshwordsplit
local ret=0 worker k v
for worker in $@; do
# Find and unregister the zle handler for the worker
for k v in ${(@kv)ASYNC_PTYS}; do
if [[ $v == $worker ]]; then
zle -F $k
unset "ASYNC_PTYS[$k]"
fi
done
async_unregister_callback $worker
zpty -d $worker 2>/dev/null || ret=$?
# Clear any partial buffers.
typeset -gA ASYNC_PROCESS_BUFFER
unset "ASYNC_PROCESS_BUFFER[$worker]"
done
return $ret
}
#
# Initialize the required modules for zsh-async. To be called before using the zsh-async library.
#
# usage:
# async_init
#
async_init() {
(( ASYNC_INIT_DONE )) && return
typeset -g ASYNC_INIT_DONE=1
zmodload zsh/zpty
zmodload zsh/datetime
# Check if zsh/zpty returns a file descriptor or not,
# shell must also be interactive with zle enabled.
typeset -g ASYNC_ZPTY_RETURNS_FD=0
[[ -o interactive ]] && [[ -o zle ]] && {
typeset -h REPLY
zpty _async_test :
(( REPLY )) && ASYNC_ZPTY_RETURNS_FD=1
zpty -d _async_test
}
}
async() {
async_init
}
async "$@"
|
<filename>tapestry-core/src/test/java/org/apache/tapestry5/integration/app1/base/ParameterBaseClass.java
package org.apache.tapestry5.integration.app1.base;
import org.apache.tapestry5.annotations.Parameter;
public abstract class ParameterBaseClass
{
@Parameter
private String value;
}
|
if [ "$#" -lt 1 ]; then
echo "No file specified"
exit
fi
mysqladmin -u root drop codecepty-symphonycms-db -f
mysqladmin -u root create codecepty-symphonycms-db
mysql -u root codecepty-symphonycms-db < tests/_data/$1.sql
|
<reponame>lananh265/social-network
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_backup_table = void 0;
var ic_backup_table = {
"viewBox": "0 0 24 24",
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "rect",
"attribs": {
"fill": "none",
"height": "24",
"width": "24"
},
"children": [{
"name": "rect",
"attribs": {
"fill": "none",
"height": "24",
"width": "24"
},
"children": []
}]
}]
}, {
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "path",
"attribs": {
"d": "M20,6v14H6v2h14c1.1,0,2-0.9,2-2V6H20z"
},
"children": [{
"name": "path",
"attribs": {
"d": "M20,6v14H6v2h14c1.1,0,2-0.9,2-2V6H20z"
},
"children": []
}]
}, {
"name": "path",
"attribs": {
"d": "M16,2H4C2.9,2,2,2.9,2,4v12c0,1.1,0.9,2,2,2h12c1.1,0,2-0.9,2-2V4C18,2.9,17.1,2,16,2z M9,16H4v-5h5V16z M16,16h-5v-5h5 V16z M16,9H4V4h12V9z"
},
"children": [{
"name": "path",
"attribs": {
"d": "M16,2H4C2.9,2,2,2.9,2,4v12c0,1.1,0.9,2,2,2h12c1.1,0,2-0.9,2-2V4C18,2.9,17.1,2,16,2z M9,16H4v-5h5V16z M16,16h-5v-5h5 V16z M16,9H4V4h12V9z"
},
"children": []
}]
}]
}]
}]
}]
};
exports.ic_backup_table = ic_backup_table; |
package com.emc.mongoose.base.storage.driver;
import static com.emc.mongoose.base.Constants.KEY_CLASS_NAME;
import static com.emc.mongoose.base.Constants.KEY_STEP_ID;
import com.emc.mongoose.base.concurrent.DaemonBase;
import com.emc.mongoose.base.data.DataInput;
import com.emc.mongoose.base.config.IllegalConfigurationException;
import com.emc.mongoose.base.item.Item;
import com.emc.mongoose.base.item.op.Operation;
import com.emc.mongoose.base.item.op.data.DataOperation;
import com.emc.mongoose.base.logging.Loggers;
import com.emc.mongoose.base.storage.Credential;
import com.github.akurilov.commons.concurrent.ThreadUtil;
import com.github.akurilov.commons.io.Input;
import com.github.akurilov.confuse.Config;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import org.apache.logging.log4j.CloseableThreadContext;
/** Created by kurila on 11.07.16. */
public abstract class StorageDriverBase<I extends Item, O extends Operation<I>> extends DaemonBase
implements StorageDriver<I, O> {
private final DataInput itemDataInput;
protected final String stepId;
private final BlockingQueue<O> opsResultsQueue;
protected final int concurrencyLimit;
protected final int ioWorkerCount;
protected final String namespace;
protected final Credential credential;
protected final boolean verifyFlag;
protected final ConcurrentMap<String, Credential> pathToCredMap = new ConcurrentHashMap<>(1);
private final ConcurrentMap<String, String> pathMap = new ConcurrentHashMap<>(1);
protected Function<String, String> requestNewPathFunc = this::requestNewPath;
protected final ConcurrentMap<Credential, String> authTokens = new ConcurrentHashMap<>(1);
protected Function<Credential, String> requestAuthTokenFunc = this::requestNewAuthToken;
protected StorageDriverBase(
final String stepId,
final DataInput itemDataInput,
final Config storageConfig,
final boolean verifyFlag)
throws IllegalConfigurationException {
this.itemDataInput = itemDataInput;
final Config driverConfig = storageConfig.configVal("driver");
final Config limitConfig = driverConfig.configVal("limit");
final int outputQueueCapacity = limitConfig.intVal("queue-output");
this.opsResultsQueue = new ArrayBlockingQueue<>(outputQueueCapacity);
this.stepId = stepId;
this.namespace = storageConfig.stringVal("namespace");
final Config authConfig = storageConfig.configVal("auth");
this.credential = Credential.getInstance(authConfig.stringVal("uid"), authConfig.stringVal("secret"));
final String authToken = authConfig.stringVal("token");
if (authToken != null) {
if (this.credential == null) {
this.authTokens.put(Credential.NONE, authToken);
} else {
this.authTokens.put(credential, authToken);
}
}
this.concurrencyLimit = limitConfig.intVal("concurrency");
this.verifyFlag = verifyFlag;
final int confWorkerCount = driverConfig.intVal("threads");
if (confWorkerCount > 0) {
ioWorkerCount = confWorkerCount;
} else if (concurrencyLimit > 0) {
ioWorkerCount = Math.min(concurrencyLimit, ThreadUtil.getHardwareThreadCount());
} else {
ioWorkerCount = ThreadUtil.getHardwareThreadCount();
}
}
protected abstract String requestNewPath(final String path);
protected abstract String requestNewAuthToken(final Credential credential);
protected boolean prepare(final O op) {
op.reset();
if (op instanceof DataOperation) {
((DataOperation) op).item().dataInput(itemDataInput);
}
final String dstPath = op.dstPath();
final Credential credential = op.credential();
if (credential != null) {
pathToCredMap.putIfAbsent(dstPath == null ? "" : dstPath, credential);
if (requestAuthTokenFunc != null) {
authTokens.computeIfAbsent(credential, requestAuthTokenFunc);
}
}
if (requestNewPathFunc != null) {
// NOTE: in the distributed mode null dstPath becomes empty one
if (dstPath != null && !dstPath.isEmpty()) {
if (null == pathMap.computeIfAbsent(dstPath, requestNewPathFunc)) {
Loggers.ERR.debug("Failed to compute the destination path for the operation: {}", op);
op.status(Operation.Status.FAIL_UNKNOWN);
// return false;
}
}
}
return true;
}
protected boolean handleCompleted(final O op) {
if (isStopped()) {
return false;
} else {
if (Loggers.MSG.isTraceEnabled()) {
Loggers.MSG.trace("{}: Load operation completed", op);
}
final O opResult = op.result();
if (opsResultsQueue.offer(opResult)) {
return true;
} else {
Loggers.ERR.error(
"{}: Load operations results queue overflow, dropping the result", toString());
return false;
}
}
}
@Override
public final int concurrencyLimit() {
return concurrencyLimit;
}
@Override
public final O get() {
return opsResultsQueue.poll();
}
@Override
public final int get(final List<O> buffer, final int limit) {
return opsResultsQueue.drainTo(buffer, limit);
}
@Override
public final long skip(final long count) {
int n = (int) Math.min(count, Integer.MAX_VALUE);
final List<O> tmpBuff = new ArrayList<>(n);
n = opsResultsQueue.drainTo(tmpBuff, n);
tmpBuff.clear();
return n;
}
@Override
public final boolean hasRemainingResults() {
return !opsResultsQueue.isEmpty();
}
@Override
public Input<O> getInput() {
return this;
}
@Override
protected void doClose() throws IOException, IllegalStateException {
try (final CloseableThreadContext.Instance logCtx = CloseableThreadContext.put(KEY_STEP_ID, stepId)
.put(KEY_CLASS_NAME, StorageDriverBase.class.getSimpleName())) {
itemDataInput.close();
final int opResultsQueueSize = opsResultsQueue.size();
if (opResultsQueueSize > 0) {
Loggers.ERR.warn(
"{}: Load operations results queue contains {} unhandled elements",
toString(),
opResultsQueueSize);
}
opsResultsQueue.clear();
authTokens.clear();
pathToCredMap.clear();
pathMap.clear();
super.doClose();
Loggers.MSG.debug("{}: closed", toString());
}
}
@Override
public String toString() {
return "storage/driver/" + concurrencyLimit + "/%s/" + hashCode();
}
}
|
#!/bin/bash
# Copyright 2017 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
semvercompareOldVer=""
semvercompareNewVer=""
# Verify that the semver for the chart was increased
semvercompare() {
printf "\nChecking the Chart version has increased for the chart at ${1}\n"
# Checkout the Chart.yaml file on master to read the version for comparison
# Sending the output to a file and the error to /dev/null so that these
# messages do not clutter up the end user output
$(git show k8s/master:$1/Chart.yaml 1> /tmp/Chart.yaml 2> /dev/null)
## If the chart is new git cannot checkout the chart. In that case return
if [ $? -ne 0 ]; then
echo "Unable to find Chart on master. New chart detected."
return
fi
semvercompareOldVer=`yq r /tmp/Chart.yaml version`
semvercompareNewVer=`yq r $1/Chart.yaml version`
# Pre-releases may not be API compatible. So, when tools compare versions
# they often skip pre-releases. vert can force looking at pre-releases by
# adding a dash on the end followed by pre-release. -0 on the end will force
# looking for all valid pre-releases since a prerelease cannot start with a 0.
# For example, 1.2.3-0 will include looking for pre-releases.
local ret
local out
if [[ $semvercompareOldVer == *"-"* ]]; then # Found the - to denote it has a pre-release
out=$(vert ">$semvercompareOldVer" $semvercompareNewVer)
ret=$?
else
# No pre-release was found so we increment the patch version and attach a
# -0 to enable pre-releases being found.
local ov=( ${semvercompareOldVer//./ } ) # Turn the version into an array
((ov[2]+=1)) # Increment the patch release
out=$(vert ">${ov[0]}.${ov[1]}.${ov[2]}-0" $semvercompareNewVer)
ret=$?
fi
if [ $ret -ne 0 ]; then
echo "Error please increment the new chart version to be greater than the existing version of $semvercompareOldVer"
exitCode=1
else
echo "New higher version $semvercompareNewVer found"
fi
# Clean up
rm /tmp/Chart.yaml
}
|
#!/bin/bash
NUM_YRS=$1
ALINE="grid_ID,"
#concatenate string grid_ID with commas
for i in `seq 1 $NUM_YRS`
do
ALINE=$ALINE$i","
done
#remove the last comma from the header line
HLINE=`echo $ALINE | sed 's/,$//'`
for i in `find summaries/ -type f -iname *.OUT`
do
bname=`basename $i Summary.OUT`
#echo $bname
awk -v numyrs=$NUM_YRS -v prepend=$bname '
BEGIN{
offset=4
}
NR>offset&&NR<=offset+numyrs{
str1=str1","$21
}
NR>offset+numyrs&&NR<=offset+numyrs*2{
str2=str2","$21
}
NR>offset+numyrs*2&&NR<=offset+numyrs*3{
str3=str3","$21
}
NR>offset+numyrs*3&&NR<=offset+numyrs*4{
str4=str4","$21
}
NR>offset+numyrs*4&&NR<=offset+numyrs*5{
str5=str5","$21
}
NR>offset+numyrs*5&&NR<=offset+numyrs*6{
str6=str6","$21
}
NR>offset+numyrs*6&&NR<=offset+numyrs*7{
str7=str7","$21
}
NR>offset+numyrs*7&&NR<=offset+numyrs*8{
str8=str8","$21
}
END{
print prepend","str1 >> "H1_1.csv"
print prepend","str2 >> "H1_2.csv"
print prepend","str3 >> "H1_3.csv"
print prepend","str4 >> "H1_4.csv"
print prepend","str5 >> "H1_5.csv"
print prepend","str6 >> "H1_6.csv"
print prepend","str7 >> "H1_7.csv"
print prepend","str8 >> "H1_8.csv"
}
' $i
done
for i in `/bin/ls -1 *.csv`
do
#remove double commas
sed -i 's/,//' $i
#insert hline as header
sed -i "1 i $HLINE" $i
#remove blank lines (ie. lines which do not have a comma)
sed -i -n -e '/,/p' $i
done
|
<reponame>gitKrystan/rubyCoinCombinations
require('capybara/rspec')
require('./app')
Capybara.app = Sinatra::Application
set(:show_exceptions, false)
describe('the coin combo path', {:type => :feature}) do
it('processes the user entry and returns the proper coin combination') do
visit('/')
fill_in('change', :with => 2)
click_button('Send')
expect(page).to have_content('2 pennies')
end
#
# it('processes the user entry and returns the proper coin combination') do
# visit('/')
# fill_in('change', :with => 16)
# click_button('Send')
# expect(page).to have_content('1 dime, 1 nickle, and 1 penny')
# end
end
# it('returns a list of coins with commas when 3 coin types are required') do
# expect(16.coin_combo()).to(eq("1 dime, 1 nickle, and 1 penny"))
# end
#
|
<filename>src/app/auth/store/actions/user.actions.js<gh_stars>0
import history from '@history';
import {setDefaultSettings, setInitialSettings} from 'app/store/actions/fuse';
import _ from '@lodash';
import store from 'app/store';
import * as Actions from 'app/store/actions';
import jwtService from 'app/services/jwtService';
export const SET_USER_DATA = '[USER] SET DATA';
export const REMOVE_USER_DATA = '[USER] REMOVE DATA';
export const USER_LOGGED_OUT = '[USER] LOGGED OUT';
export function setUserData(user)
{
return (dispatch) => {
/*
Set User Settings
*/
dispatch(setDefaultSettings(user.data.result));
/*
Set User Data
*/
dispatch({
type : SET_USER_DATA,
payload: user
})
}
}
/**
* Update User Settings
*/
export function updateUserSettings(settings)
{
return (dispatch, getState) => {
const oldUser = getState().auth.user;
const user = _.merge({}, oldUser, {data: {settings}});
updateUserData(user);
return dispatch(setUserData(user));
}
}
/**
* Update User Shortcuts
*/
export function updateUserShortcuts(shortcuts)
{
return (dispatch, getState) => {
const user = getState().auth.user;
const newUser = {
...user,
data: {
...user.data,
shortcuts
}
};
updateUserData(newUser);
return dispatch(setUserData(newUser));
}
}
/**
* Remove User Data
*/
export function removeUserData()
{
return {
type: REMOVE_USER_DATA
}
}
/**
* Logout
*/
export function logoutUser()
{
return (dispatch, getState) => {
const user = getState().auth.user;
if ( !user.role || user.role.length === 0 )// is guest
{
return null;
}
history.push({
pathname: '/'
});
switch ( user.from )
{
default:
{
jwtService.logout();
}
}
dispatch(setInitialSettings());
dispatch({
type: USER_LOGGED_OUT
})
}
}
/**
* Update User Data
*/
function updateUserData(user)
{
if ( !user.role || user.role.length === 0 )// is guest
{
return;
}
switch ( user.from )
{
default:
{
jwtService.updateUserData(user)
.then(() => {
store.dispatch(Actions.showMessage({message: "User data saved with api"}));
})
.catch(error => {
store.dispatch(Actions.showMessage({message: error.message}));
});
break;
}
}
}
|
<reponame>kqummp/Filter
const filter = require('../index.js');
const expect = require('chai').expect;
describe('judgeMediumPassword', function(){
it('judgeMediumPasswordTest#1', function(){
let value = "<PASSWORD>";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#2', function(){
let value = 123;
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#3', function(){
let value = 123.123;
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#4', function(){
let value = "<PASSWORD>";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#5', function(){
let value = "<PASSWORD>";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#6', function(){
let value = "<PASSWORD>";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.true;
});
it('judgeMediumPasswordTest#7', function(){
let value = "<PASSWORD>";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#8', function(){
let value = "<PASSWORD>!";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#9', function(){
let value = "<PASSWORD>?";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#10', function(){
let value = "<PASSWORD>23123:";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#11', function(){
let value = "<PASSWORD>$";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#12', function(){
let value = "<PASSWORD>$";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#13', function(){
let value = "<PASSWORD>!";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#14', function(){
let value = "%";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
it('judgeMediumPasswordTest#15', function(){
let value = "<PASSWORD>";
let result = filter.judgeMediumPassword(value);
expect(result).to.be.false;
});
});
|
<gh_stars>0
import * as THREE from "three";
import "./lib/OrbitControls";
import "./lib/GPUParticleSystem";
import * as helpers from "./helpers";
import Simple from "./particles/simple";
const scene = new THREE.Scene();
const clock = new THREE.Clock();
// const animMap = {
// simple: new Simple(scene),
// };
const animations = [new Simple(scene).activate()];
if (window.ref) {
cancelAnimationFrame(window.ref);
}
// function startAnimation (name:string) {
// animations.splice(0).forEach(a => a.deactivate());
// let anim = animMap[name];
// anim.activate();
// animations.push(anim);
// }
// addButton("Linear",()=>startAnimation("linear"));
// addButton("Circular",()=>startAnimation("circular"));
// addButton("Espiral",()=>startAnimation("espiral"));
// addButton("Catmull Room Spline",()=>startAnimation("catmull"));
// addButton("Particles",()=>startAnimation("simple"));
const appEl = document.getElementById("app");
scene.add(helpers.addLight());
// scene.add(new THREE.AmbientLight(0x404040));
// const dl = new THREE.DirectionalLight(0xc0c0c0);
// dl.position.set(0, 0, 0);
// scene.add(dl);
const renderer = new THREE.WebGLRenderer({
alpha: true,
antialias: true
});
appEl.appendChild(renderer.domElement);
const camera = new THREE.PerspectiveCamera(35, 0, 0.0001, 10000);
camera.position.x = 0;
camera.position.y = 0;
camera.position.z = 150;
const controls = new THREE.OrbitControls(camera, renderer.domElement);
window.addEventListener("resize", onResize);
onResize();
loop();
function onResize(evt?: any) {
const width = window.innerWidth;
const height = window.innerHeight;
const dpr = window.devicePixelRatio > 1 ? 2 : 1;
camera.aspect = width / height;
camera.updateProjectionMatrix();
renderer.setPixelRatio(dpr);
renderer.setSize(width, height);
}
function loop(time: number = 0) {
window.ref = requestAnimationFrame(loop);
const delta = clock.getDelta();
animations.forEach(a => a.animate(time, delta));
controls.update(); // only required if controls.enableDamping = true, or if controls.autoRotate = true
renderer.render(scene, camera);
}
|
import React, { Component, useRef, useState, useEffect } from 'react';
import { ReactComponent as ChevronBack } from '../images/icons/chevron-back-sharp.svg';
import { ReactComponent as ChevronForward } from '../images/icons/chevron-forward-sharp.svg';
let slidedTheSlider = false;
const childByPos = [];
export const PartialSlider = (props) => {
const { item } = props;
const { name, children } = item;
const unqId = 'smpb-partial-slider-' + item.entity_id;
const dataParsed = item.dataParsed || {};
const [currentIndex, setCurrentIndex] = useState(0);
const containerRef = useRef(null);
const handleScroll = (index) => {
if (currentIndex !== index) {
setCurrentIndex(index);
}
};
const numberOfChildren =
children instanceof Array ? children.length : children ? 1 : 0;
const scrollToIndex = (index) => {
if (numberOfChildren <= 1) {
// no where to scroll
} else if (children[index]) {
const elements = document.querySelector(
`.${unqId}.partial-slider-child-container`,
).children;
const target = elements.item(index);
target.scrollIntoView({ block: 'nearest', inline: 'start' });
}
};
const [numberOfSteps, setNumberOfSteps] = useState(0);
useEffect(() => {
// first sliding event
if (currentIndex === 0) {
if (!slidedTheSlider) return;
} else slidedTheSlider = true;
// scroll by js
scrollToIndex(currentIndex);
}, [currentIndex]);
// calculate the steps
useEffect(() => {
// wait for images to render, for better sure, set the min width to each child item
setTimeout(function () {
const childContainerEl = document.querySelector(
`.${unqId}.partial-slider-child-container`,
);
if (childContainerEl) {
const elements = Array.from(childContainerEl.children);
let itemToMinus = 0;
let widthFromEnd = 0;
for (let indx = elements.length - 1; indx >= 0; indx--) {
const target = elements[indx];
childByPos[elements.length - (1 + indx)] = widthFromEnd;
widthFromEnd += target.offsetWidth;
if (widthFromEnd < childContainerEl.offsetWidth) {
itemToMinus++;
}
}
if (itemToMinus < numberOfChildren)
setNumberOfSteps(numberOfChildren - itemToMinus);
else setNumberOfSteps(numberOfChildren);
}
}, 1000);
}, []);
if (!numberOfChildren) return '';
const onSliderTouchEnd = () => {
if (containerRef && containerRef.current) {
const containerScrollLeft = containerRef.current.scrollLeft;
let nearestVal = 999999;
let nearestIndx = 0;
childByPos.map((childItmByPos, childIndx) => {
const distance = Math.abs(childItmByPos - containerScrollLeft);
if (nearestVal >= distance) {
nearestIndx = childIndx;
nearestVal = distance;
}
});
if (currentIndex !== nearestIndx) handleScroll(nearestIndx);
else scrollToIndex(nearestIndx);
}
};
let indicators = [];
if (numberOfSteps && dataParsed.showSliderIndicator) {
for (let index = 0; index <= numberOfSteps; index++) {
indicators.push(
<div
key={index}
className={`partial-slider-dot ${
index === currentIndex ? 'active' : ''
}`}
onClick={(e) => handleScroll(index)}
/>,
);
}
indicators = <div className='partial-slider-dots'>{indicators}</div>;
}
return (
<React.Fragment>
{dataParsed &&
dataParsed.showSliderNavBtn &&
numberOfSteps &&
currentIndex > 0 ? (
<div
className='partial-slider-navic partial-slider-back-ic'
onClick={(e) => {
if (currentIndex > 0) handleScroll(currentIndex - 1);
}}
>
<ChevronBack />
</div>
) : (
''
)}
{dataParsed &&
dataParsed.showSliderNavBtn &&
numberOfSteps &&
currentIndex < numberOfSteps ? (
<div
className='partial-slider-navic partial-slider-next-ic'
onClick={(e) => {
if (currentIndex < numberOfSteps) handleScroll(currentIndex + 1);
}}
>
<ChevronForward />
</div>
) : (
''
)}
<div
className={`${unqId} partial-slider-child-container`}
ref={containerRef}
onTouchEnd={onSliderTouchEnd}
>
{props.children}
</div>
{indicators}
</React.Fragment>
);
};
|
<reponame>smagill/opensphere-desktop<gh_stars>10-100
package io.opensphere.kml.common.util;
import java.io.InputStream;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import gnu.trove.list.TLongList;
import gnu.trove.list.array.TLongArrayList;
import io.opensphere.core.cache.CacheDeposit;
import io.opensphere.core.cache.CacheModificationListener;
import io.opensphere.core.cache.DefaultCacheDeposit;
import io.opensphere.core.cache.accessor.InputStreamAccessor;
import io.opensphere.core.cache.accessor.PropertyAccessor;
import io.opensphere.core.cache.accessor.SerializableAccessor;
import io.opensphere.core.cache.accessor.UnserializableAccessor;
import io.opensphere.core.cache.matcher.PropertyMatcher;
import io.opensphere.core.cache.matcher.StringPropertyMatcher;
import io.opensphere.core.cache.util.PropertyDescriptor;
import io.opensphere.core.data.DataRegistry;
import io.opensphere.core.data.DataRegistryListener;
import io.opensphere.core.data.util.DataModelCategory;
import io.opensphere.core.data.util.QueryTracker;
import io.opensphere.core.data.util.QueryTracker.QueryStatus;
import io.opensphere.core.data.util.QueryTrackerListenerAdapter;
import io.opensphere.core.data.util.SimpleIdQuery;
import io.opensphere.core.data.util.SimpleQuery;
import io.opensphere.core.util.Constants;
import io.opensphere.core.util.collections.New;
import io.opensphere.core.util.lang.Nulls;
import io.opensphere.kml.common.model.KMLDataEvent;
import io.opensphere.kml.common.model.KMLDataSource;
/**
* Helper class for interacting with the data registry.
*/
public final class KMLDataRegistryHelper
{
/** Property descriptor for the active flag. */
public static final PropertyDescriptor<Boolean> ACTIVE_PROPERTY_DESCRIPTOR = PropertyDescriptor.create("Active",
Boolean.class);
/** The KMLDataEvent property descriptor. */
public static final PropertyDescriptor<KMLDataEvent> DATA_EVENT_PROPERTY_DESCRIPTOR = PropertyDescriptor
.create("KMLDataEvent", KMLDataEvent.class);
/** Property descriptor for the data source name in the data registry. */
public static final PropertyDescriptor<String> DATA_SOURCE_NAME_PROPERTY_DESCRIPTOR = PropertyDescriptor.create("name",
String.class);
/** Property descriptor for the data source in the data registry. */
public static final PropertyDescriptor<KMLDataSource> DATA_SOURCE_PROPERTY_DESCRIPTOR = PropertyDescriptor
.create("datasource", KMLDataSource.class);
/** Property descriptor for display names. */
public static final PropertyDescriptor<String> DISPLAY_NAME_PROPERTY_DESCRIPTOR = PropertyDescriptor.create("DISPLAY_NAME",
String.class);
/** Property descriptor for icons. */
public static final PropertyDescriptor<InputStream> ICON_PROPERTY_DESCRIPTOR = PropertyDescriptor.create("Icon",
InputStream.class);
/** The data model category family for KML. */
public static final String KML_CATEGORY_FAMILY = "KML";
/** The data model category family for KML icons. */
public static final String KML_ICON_CATEGORY_FAMILY = "KML_ICON";
/** The data model category family for old KML data. */
public static final String KML_OLD_CATEGORY_FAMILY = "KML_OLD";
/** Property descriptor for URLs. */
public static final PropertyDescriptor<String> URL_PROPERTY_DESCRIPTOR = PropertyDescriptor.create("URL", String.class);
/**
* Add a listener for data source changes in the data registry.
*
* @param dataRegistry The data registry.
* @param dataRegistryListener The listener.
*/
public static void addDataSourceChangeListener(DataRegistry dataRegistry,
DataRegistryListener<KMLDataSource> dataRegistryListener)
{
dataRegistry.addChangeListener(dataRegistryListener, getDataSourceDataModelCategory(Nulls.STRING, Nulls.STRING),
DATA_SOURCE_PROPERTY_DESCRIPTOR);
}
/**
* Clear some data in the data registry.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
*/
public static void clearData(DataRegistry dataRegistry, KMLDataSource dataSource)
{
DataModelCategory dataModelCategory = getKmlCategory(dataSource, Nulls.STRING);
SimpleIdQuery query = new SimpleIdQuery(dataModelCategory, dataSource.getPath(), URL_PROPERTY_DESCRIPTOR);
long[] ids = dataRegistry.performLocalQuery(query);
if (ids.length > 0)
{
dataRegistry.removeModels(ids);
}
}
/**
* Clear some data in the data registry.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
*/
public static void clearIconData(DataRegistry dataRegistry, KMLDataSource dataSource)
{
DataModelCategory dataModelCategory = getIconCategory(dataSource, Nulls.STRING);
dataRegistry.removeModels(dataModelCategory, false);
}
/**
* Create a data registry deposit for an icon input stream.
*
* @param categorySource The source for the data model category.
* @param dataSource The data source.
* @param url The url used to retrieve the data.
* @param iconInputStream The input stream.
* @return The cache deposit.
*/
public static CacheDeposit<InputStream> createCacheDeposit(String categorySource, KMLDataSource dataSource, String url,
InputStream iconInputStream)
{
PropertyAccessor<InputStream, String> urlAccessor = SerializableAccessor
.<InputStream, String>getSingletonAccessor(URL_PROPERTY_DESCRIPTOR, url);
PropertyAccessor<InputStream, InputStream> inputStreamAccessor = InputStreamAccessor
.getHomogeneousAccessor(ICON_PROPERTY_DESCRIPTOR);
Collection<? extends PropertyAccessor<InputStream, ?>> accessors = Arrays.asList(urlAccessor, inputStreamAccessor);
DataModelCategory category = getIconCategory(dataSource, categorySource);
return new DefaultCacheDeposit<InputStream>(category, accessors, Collections.singleton(iconInputStream), true,
CacheDeposit.SESSION_END, true);
}
/**
* Create a data registry deposit for KML data.
*
* @param categorySource The source for the data model category.
* @param dataSource The data source.
* @param url The url used to retrieve the data.
* @param displayName The display name for this KML data.
* @param event The data event.
* @return The cache deposit.
*/
public static CacheDeposit<KMLDataEvent> createCacheDeposit(String categorySource, KMLDataSource dataSource, String url,
String displayName, KMLDataEvent event)
{
PropertyAccessor<KMLDataEvent, String> urlAccessor = SerializableAccessor
.<KMLDataEvent, String>getSingletonAccessor(URL_PROPERTY_DESCRIPTOR, url);
PropertyAccessor<KMLDataEvent, String> displayNameAccessor = SerializableAccessor
.<KMLDataEvent, String>getSingletonAccessor(DISPLAY_NAME_PROPERTY_DESCRIPTOR, displayName);
PropertyAccessor<KMLDataEvent, KMLDataEvent> dataEventAccessor = UnserializableAccessor
.getHomogeneousAccessor(DATA_EVENT_PROPERTY_DESCRIPTOR);
PropertyAccessor<KMLDataEvent, Boolean> activeAccessor = SerializableAccessor
.<KMLDataEvent, Boolean>getSingletonAccessor(ACTIVE_PROPERTY_DESCRIPTOR, Boolean.FALSE);
Collection<? extends PropertyAccessor<KMLDataEvent, ?>> accessors = Arrays.asList(urlAccessor, dataEventAccessor,
activeAccessor, displayNameAccessor);
DataModelCategory category = getKmlCategory(dataSource, categorySource);
return new DefaultCacheDeposit<KMLDataEvent>(category, accessors, Collections.singleton(event), true,
CacheDeposit.SESSION_END, true);
}
/**
* Deposit KML data sources into the data registry.
*
* @param dataRegistry The data registry.
* @param source The source of the data sources.
* @param serverId The server id.
* @param kmlDataSources The data sources.
*/
public static void depositDataSources(DataRegistry dataRegistry, String source, String serverId,
Collection<? extends KMLDataSource> kmlDataSources)
{
Date expiration = CacheDeposit.SESSION_END;
Collection<PropertyAccessor<KMLDataSource, ?>> accessors = New.collection();
accessors.add(UnserializableAccessor.getHomogeneousAccessor(DATA_SOURCE_PROPERTY_DESCRIPTOR));
accessors.add(new SerializableAccessor<KMLDataSource, String>(DATA_SOURCE_NAME_PROPERTY_DESCRIPTOR)
{
@Override
public String access(KMLDataSource input)
{
return input.getName();
}
});
CacheDeposit<KMLDataSource> deposit = new DefaultCacheDeposit<KMLDataSource>(
getDataSourceDataModelCategory(source, serverId), accessors, kmlDataSources, true, expiration, true);
dataRegistry.addModels(deposit);
for (KMLDataSource ds : kmlDataSources)
{
ds.waitForHandler(Constants.MILLI_PER_UNIT);
}
}
/**
* Query the data registry for some KML data and mark the data active.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
* @param url The url.
* @param displayName Optional display name to use for the results.
* @return The tracker for the query.
*/
public static QueryTracker queryAndActivate(final DataRegistry dataRegistry, KMLDataSource dataSource, String url,
String displayName)
{
DataModelCategory dataModelCategory = getKmlCategory(dataSource, Nulls.STRING);
// Listener that will be called once the query is complete.
QueryTrackerListenerAdapter listener = new QueryTrackerListenerAdapter()
{
/** Flag indicating if the listener has already been called. */
private final AtomicBoolean myDone = new AtomicBoolean();
@Override
public void statusChanged(QueryTracker tracker, QueryStatus status)
{
tracker.logException();
if (myDone.compareAndSet(false, true) && status == QueryStatus.SUCCESS)
{
activate(dataRegistry, tracker.getIds());
}
}
};
// Set the URL and display name parameters on the query.
List<PropertyMatcher<?>> params = New.list(2);
params.add(new StringPropertyMatcher(URL_PROPERTY_DESCRIPTOR, url));
if (displayName != null)
{
params.add(new StringPropertyMatcher(DISPLAY_NAME_PROPERTY_DESCRIPTOR, displayName));
}
// Run the query.
SimpleIdQuery query = new SimpleIdQuery(dataModelCategory, params);
QueryTracker tracker = dataRegistry.submitQuery(query);
tracker.addListener(listener);
// Just in case the query finished before the tracker was added.
if (tracker.isDone())
{
listener.statusChanged(tracker, tracker.getQueryStatus());
}
return tracker;
}
/**
* Query the data registry for some KML data and deactivate the data.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
* @param url Optional url.
*/
public static void queryAndDeactivate(DataRegistry dataRegistry, KMLDataSource dataSource, String url)
{
DataModelCategory dataModelCategory = getKmlCategory(dataSource, Nulls.STRING);
SimpleIdQuery query = url == null ? new SimpleIdQuery(dataModelCategory)
: new SimpleIdQuery(dataModelCategory, url, URL_PROPERTY_DESCRIPTOR);
long[] ids = dataRegistry.performLocalQuery(query);
if (ids.length > 0)
{
deactivate(dataRegistry, ids);
}
}
/**
* Query the data registry for some old KML data. This also removes the old
* data.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
* @param url The url.
* @return The old data, or {@code null} if none was found.
*/
public static KMLDataEvent queryAndRemoveOldData(final DataRegistry dataRegistry, KMLDataSource dataSource, String url)
{
DataModelCategory dataModelCategory = getKmlOldCategory(dataSource, Nulls.STRING);
SimpleQuery<KMLDataEvent> query = new SimpleQuery<KMLDataEvent>(dataModelCategory, DATA_EVENT_PROPERTY_DESCRIPTOR,
new StringPropertyMatcher(URL_PROPERTY_DESCRIPTOR, url));
long[] ids = dataRegistry.performLocalQuery(query);
if (ids.length > 0)
{
dataRegistry.removeModels(ids);
}
return ids.length > 0 ? query.getResults().get(0) : null;
}
/**
* Query the data registry for an icon.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
* @param url The url for the icon.
* @return The input stream, or {@code null}.
*/
public static InputStream queryAndReturn(DataRegistry dataRegistry, KMLDataSource dataSource, String url)
{
DataModelCategory dataModelCategory = getIconCategory(dataSource, Nulls.STRING);
List<PropertyMatcher<?>> params = New.list(2);
params.add(new StringPropertyMatcher(URL_PROPERTY_DESCRIPTOR, url));
SimpleQuery<InputStream> query = new SimpleQuery<>(dataModelCategory, ICON_PROPERTY_DESCRIPTOR, params);
dataRegistry.performQuery(query).logException();
if (!query.getResults().isEmpty())
{
return query.getResults().get(0);
}
return null;
}
/**
* Reload some data in the data registry.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
*/
public static void reloadData(DataRegistry dataRegistry, KMLDataSource dataSource)
{
DataModelCategory dataModelCategory = getKmlCategory(dataSource, Nulls.STRING);
SimpleQuery<KMLDataEvent> query = new SimpleQuery<KMLDataEvent>(dataModelCategory, DATA_EVENT_PROPERTY_DESCRIPTOR,
new StringPropertyMatcher(URL_PROPERTY_DESCRIPTOR, dataSource.getPath()));
long[] ids = dataRegistry.performLocalQuery(query);
if (ids.length > 0)
{
List<KMLDataEvent> results = query.getResults();
dataRegistry.removeModels(ids);
// Put the old data in the data registry for use in the reload.
if (results.size() == 1)
{
depositOldData(dataRegistry, dataSource, dataSource.getPath(), results.get(0));
}
}
// Perform the query to load the data.
queryAndActivate(dataRegistry, dataSource, dataSource.getPath(), Nulls.STRING);
}
/**
* Remove data sources from the data registry.
*
* @param dataRegistry The data registry.
* @param sourceFilter Optional filter on the sources removed.
* @param kmlDataSources The data sources.
*/
public static void removeDataSources(DataRegistry dataRegistry, String sourceFilter,
Collection<? extends KMLDataSource> kmlDataSources)
{
SimpleQuery<KMLDataSource> query = new SimpleQuery<KMLDataSource>(
getDataSourceDataModelCategory(sourceFilter, Nulls.STRING), DATA_SOURCE_PROPERTY_DESCRIPTOR);
long[] ids = dataRegistry.performLocalQuery(query);
if (ids.length > 0)
{
TLongList idsToRemove = new TLongArrayList(ids.length);
for (int index = 0; index < ids.length; ++index)
{
if (kmlDataSources.contains(query.getResults().get(index)))
{
idsToRemove.add(ids[index]);
}
}
dataRegistry.removeModels(idsToRemove.toArray());
}
}
/**
* Remove a data source from the data registry.
*
* @param dataRegistry The data registry.
* @param sourceFilter Optional filter on the sources removed.
* @param dataSourceNameFilter Optional filter on the data source name.
*/
public static void removeDataSources(DataRegistry dataRegistry, String sourceFilter, String dataSourceNameFilter)
{
dataRegistry.removeModels(getDataSourceDataModelCategory(sourceFilter, dataSourceNameFilter), false);
}
/**
* Creates an KML DataModelCategory.
*
* @param dataSource the data source
* @param source the DataModelCategory source
* @return the DataModelCategory
*/
public static DataModelCategory getKmlCategory(KMLDataSource dataSource, String source)
{
return new DataModelCategory(source, KML_CATEGORY_FAMILY, dataSource.getPath());
}
/**
* Creates an KML old DataModelCategory.
*
* @param dataSource the data source
* @param source the DataModelCategory source
* @return the DataModelCategory
*/
public static DataModelCategory getKmlOldCategory(KMLDataSource dataSource, String source)
{
return new DataModelCategory(source, KML_OLD_CATEGORY_FAMILY, dataSource.getPath());
}
/**
* Creates an icon DataModelCategory.
*
* @param dataSource the data source
* @param source the DataModelCategory source
* @return the DataModelCategory
*/
public static DataModelCategory getIconCategory(KMLDataSource dataSource, String source)
{
return new DataModelCategory(source, KML_ICON_CATEGORY_FAMILY, dataSource.getPath());
}
/**
* Activate some KML data.
*
* @param dataRegistry The data registry.
* @param ids The data registry ids for the data.
*/
private static void activate(DataRegistry dataRegistry, long[] ids)
{
if (ids.length > 0)
{
Collection<Boolean> input = Collections.singleton(Boolean.TRUE);
Collection<? extends PropertyAccessor<Boolean, Boolean>> accessors = Collections
.singleton(SerializableAccessor.getHomogeneousAccessor(ACTIVE_PROPERTY_DESCRIPTOR));
dataRegistry.updateModels(ids, input, accessors, (CacheModificationListener)null);
}
}
/**
* Deactivate some KML data.
*
* @param dataRegistry The data registry.
* @param ids The data registry ids for the data.
*/
private static void deactivate(DataRegistry dataRegistry, long[] ids)
{
Collection<Boolean> input = Collections.singleton(Boolean.FALSE);
Collection<? extends PropertyAccessor<Boolean, Boolean>> accessors = Collections
.singleton(SerializableAccessor.getHomogeneousAccessor(ACTIVE_PROPERTY_DESCRIPTOR));
dataRegistry.updateModels(ids, input, accessors, (CacheModificationListener)null);
}
/**
* Deposit old KML data for use in a reload.
*
* @param dataRegistry The data registry.
* @param dataSource The data source.
* @param url The url used to retrieve the data.
* @param event The data event.
*/
private static void depositOldData(DataRegistry dataRegistry, KMLDataSource dataSource, String url, KMLDataEvent event)
{
PropertyAccessor<KMLDataEvent, String> urlAccessor = SerializableAccessor
.<KMLDataEvent, String>getSingletonAccessor(URL_PROPERTY_DESCRIPTOR, url);
PropertyAccessor<KMLDataEvent, KMLDataEvent> dataEventAccessor = UnserializableAccessor
.getHomogeneousAccessor(DATA_EVENT_PROPERTY_DESCRIPTOR);
Collection<? extends PropertyAccessor<KMLDataEvent, ?>> accessors = Arrays.asList(urlAccessor, dataEventAccessor);
DataModelCategory category = getKmlOldCategory(dataSource, KMLDataRegistryHelper.class.getSimpleName());
DefaultCacheDeposit<KMLDataEvent> deposit = new DefaultCacheDeposit<KMLDataEvent>(category, accessors,
Collections.singleton(event), true, CacheDeposit.SESSION_END, true);
dataRegistry.addModels(deposit);
}
/**
* Get the data model category for a data source.
*
* @param source The source of the data source.
* @param dataSourceName The name of the data source.
* @return The data model category for use in the data registry.
*/
private static DataModelCategory getDataSourceDataModelCategory(String source, String dataSourceName)
{
return new DataModelCategory(source, KMLDataSource.class.getName(), dataSourceName);
}
/** Disallow instantiation. */
private KMLDataRegistryHelper()
{
}
}
|
#!/bin/bash
# this is run by netlify. there is no need to run this manually.
# log
# Generate index.html and /temp assets for GH Pages branch
npm i
grunt build
grunt build-gh-pages
mkdir css
mkdir js
cp node_modules/bootstrap/dist/css/bootstrap.min.css css/bootstrap.min.css
mv temp/bootstrap-slider.css css/bootstrap-slider.css
mv temp/bootstrap-slider.js js/bootstrap-slider.js |
import { Component } from '@angular/core';
@Component({
selector: 'app-root',
template:
<h1>Expenses</h1>
<input [(ngModel)]="expense" placeholder="Enter expense">
<button (click)="addExpense()">Add</button>
<p>Total: {{ total }}</p>
<canvas id="chart"></canvas>
})
export class AppComponent {
expense = 0;
total = 0;
expenses = [];
ctx;
addExpense() {
this.total += Number(this.expense);
this.expenses.push(Number(this.expense));
this.expense = 0;
this.drawChart();
}
ngOnInit() {
this.ctx = document.getElementById('chart').getContext('2d');
}
drawChart() {
const data = {
labels: ["January", "February",
"March", "April", "May", "June"],
datasets: [
{
label: 'Expenses',
data: this.expenses,
backgroundColor: "rgb(255, 99, 132)"
}
]
};
const options = {
scales: {
yAxes: [{
ticks: {
beginAtZero: true
}
}]
}
};
const chart = new Chart(this.ctx, {
type: 'bar',
data: data,
options: options
});
}
} |
<!DOCTYPE html>
<html>
<head>
<title>Books</title>
</head>
<body>
<h1>Books</h1>
<div>
<p>Welcome to the Books website! Here you can find information on the latest books and Authors.</p>
</div>
<div>
<h2>Authors</h2>
<ul>
<li>John Grisham</li>
<li>Stephen King</li>
<li>Dan Brown</li>
</ul>
</div>
<div>
<h2>New Releases</h2>
<ul>
<li>The 4-Hour Work Week by Timothy Ferriss</li>
<li>The Fault in Our Stars by John Green</li>
<li>The Hunger Games by Suzanne Collins</li>
</ul>
</div>
</body>
</html> |
#!/bin/sh
set -e
BUILD_SCRIPT_LOCATION=$(cd "$(dirname "$0")"; pwd)
. ${BUILD_SCRIPT_LOCATION}/../jenkins/common.sh
# signing server endpoints
rpm_signing_server="https://cvm-sign02.cern.ch/cgi-bin/rpm/sign-rpm"
deb_signing_server="https://cvm-sign02.cern.ch/cgi-bin/deb/sign-deb"
# This script works as well for aufs packages
if [ "x${AUFS_BUILD_LOCATION}" != "x" ]; then
CVMFS_BUILD_LOCATION="${AUFS_BUILD_LOCATION}/rpmbuild"
elif [ "x${AUFS_UTIL_BUILD_LOCATION}" != "x" ]; then
CVMFS_BUILD_LOCATION="${AUFS_UTIL_BUILD_LOCATION}/rpmbuild"
fi
# sanity checks
[ ! -z $CVMFS_BUILD_LOCATION ] || die "CVMFS_BUILD_LOCATION missing"
[ ! -z $CVMFS_CI_PLATFORM_LABEL ] || die "CVMFS_CI_PLATFORM_LABEL missing"
# discover what to do for the platform
package_type="unknown"
if [ x"$CVMFS_CI_PLATFORM_LABEL" = x"docker" ]; then
# on a docker host we need to guess which package type to sign since it might
# not be the package type of the host system
rpms="$(find $CVMFS_BUILD_LOCATION -name '*.rpm' | wc -l)"
debs="$(find $CVMFS_BUILD_LOCATION -name '*.deb' | wc -l)"
containers="$(find $CVMFS_BUILD_LOCATION -name '*.docker.tar.gz' | wc -l)"
snapshotters="$(find $CVMFS_BUILD_LOCATION -name 'cvmfs_snapshotter.*.x86_64' | wc -l)"
[ $rpms -gt 0 ] || [ $debs -gt 0 ] || [ $containers -gt 0 ] || [ $snapshotters -gt 0 ] || \
die "Neither RPMs nor DEBs nor containers nor snapshotters found"
if [ $snapshotters -gt 0 ]; then
package_type="snapshotter"
elif [ $containers -gt 0 ]; then
package_type="container"
elif [ $rpms -gt $debs ]; then
package_type="rpm"
else
package_type="deb"
fi
else
# on a bare metal build machine we just assume the package type to be the
# system's default package type
package_type="$(get_package_type)"
fi
sign_rpm() {
local rpm_directory="${CVMFS_BUILD_LOCATION}/RPMS"
local source_rpm_directory="${CVMFS_BUILD_LOCATION}/SRPMS"
[ -d $rpm_directory ] || return 1
echo "looking for RPMs in ${rpm_directory} and ${source_rpm_directory}..."
for rpm in $(find "$rpm_directory" "$source_rpm_directory" -type f | \
grep -e '.*\.rpm$'); do
local unsigned_rpm="$(echo "$rpm" | sed -e 's/^\(.*\)\.rpm$/\1.nosig.rpm/')"
echo "renaming ${rpm} to ${unsigned_rpm}..."
mv $rpm $unsigned_rpm || return 2
echo "signing ${unsigned_rpm} saving into ${rpm}..."
curl --data-binary @$unsigned_rpm \
--cacert $CACERT \
--cert $CERT \
--key $KEY \
--silent \
"$rpm_signing_server" > $rpm || return 3
if ! is_macos; then
echo "validating ${rpm}..."
rpm -K $rpm || return 4
else
echo "skip package validation"
fi
echo "removing ${unsigned_rpm}..."
rm -f $unsigned_rpm || return 5
done
}
sign_deb() {
[ -d "${CVMFS_BUILD_LOCATION}" ] || return 1
echo "looking for DEBs in ${CVMFS_BUILD_LOCATION}..."
for deb in $(find "${CVMFS_BUILD_LOCATION}" -type f | \
grep -e '.*\.deb$'); do
local unsigned_deb="$(echo "$deb" | sed -e 's/^\(.*\)\.deb$/\1.nosig.deb/')"
echo "renaming ${deb} to ${unsigned_deb}..."
mv $deb $unsigned_deb || return 2
echo "signing ${unsigned_deb} saving into ${deb}..."
curl --data-binary @$unsigned_deb \
--cacert $CACERT \
--cert $CERT \
--key $KEY \
--silent \
"$deb_signing_server" > $deb || return 3
if ! is_macos; then
echo "validating ${deb}..."
dpkg-sig -c $deb | grep -q GOODSIG || return 4
else
echo "skip package validation"
fi
echo "removing ${unsigned_deb}..."
rm -f $unsigned_deb || return 5
done
}
CERT=/etc/pki/tls/certs/$(hostname -s).crt
KEY=/etc/pki/tls/private/$(hostname -s).key
CACERT=/etc/pki/tls/certs/cern-ca-bundle.crt
if [ -f $HOME/cernvm/$(hostname -s).crt ]; then
CERT="$HOME/cernvm/$(hostname -s).crt"
KEY="$HOME/cernvm/$(hostname -s).key"
CACERT="$HOME/cernvm/cern-ca-bundle.crt"
echo "Using foreign certificate $CERT"
fi
if [ ! -f $CERT ]; then
echo "WARNING: NO HOST CERTIFICATE FOUND!"
echo " Expected $CERT"
echo " Not signing packages!"
exit 0
fi
case "$package_type" in
rpm)
sign_rpm || die "fail (error code: $?)"
;;
deb)
sign_deb || die "fail (error code: $?)"
;;
container)
echo "TODO: sign docker container"
;;
*)
echo "signing is not supported for $package_type"
;;
esac
|
#!/usr/bin/env bash
wasm-pack build --out-dir web-src/pkg --target web
|
#!/bin/sh
set -e -x
PFKARCH=$( sh ../scripts/architecture )
export PFKARCH
if [ ! -d rxvt-unicode ] ; then
echo 'no rxvt-unicode dir, skipping rxvt build'
# i'm not going to consider this an error, maybe
# i just didn't extract it.
exit 0
fi
cd "$OBJDIR/urxvt"
make install
cd $HOME/pfk/$PFKARCH/urxvt-9.22/bin
links=$( echo * )
cd $HOME/pfk/$PFKARCH/bin
for f in $links ; do
rm -f $f
ln -s ../urxvt-9.22/bin/$f
done
exit 0
|
import test from 'ava';
import fs from 'fs/promises';
import injectBrowserToNode from '../../lib/utils/inject-browser-to-node.js';
import mockProcessCWD from '../helpers/mock-process-cwd.js';
import buildApplication from '../../lib/builders/build-application.js';
import buildVendor from '../../lib/builders/build-vendor.js';
import buildCSS from '../../lib/builders/build-css.js';
import startHTTPServer from '../../lib/runners/start-http-server.js';
import WorkerPool from '../../lib/worker-pool/index.js';
const CWD = process.cwd();
test.beforeEach(async () => {
global.MBER_THREAD_POOL = WorkerPool.start();
});
test.afterEach.always(async () => {
global.MBER_THREAD_POOL.workers.forEach((worker) => worker.terminate());
});
test.serial('injectBrowserToNode() works when there is no html or http server running with index.html', async (t) => {
t.plan(6);
await injectBrowserToNode();
[
global.window, global.mainContext, global.document, global.self
].forEach((reference) => t.truthy(reference));
t.true(global.window.location.href === 'http://localhost/');
t.true(document.querySelector('body').innerHTML.includes('<h1>Welcome to future, browser inside your node.js process</h1>'));
});
test.serial('injectBrowserToNode() works when there is a provided html and no http server running with index.html', async (t) => {
t.plan(14);
await injectBrowserToNode({
html: `
<html>
<head>
<title>Random title for test</title>
</head>
<body>
<h5 id="title">My title</h5>
<p id="text">This is a placeholder text</p>
</body>
</html>
`
});
[
global.window, global.mainContext, global.document, global.self
].forEach((reference) => t.truthy(reference));
t.true(global.window.location.href === 'http://localhost/');
t.true(document.getElementById('title').innerHTML === 'My title');
t.true(document.getElementById('text').innerHTML === 'This is a placeholder text');
await injectBrowserToNode({
html: `
<html>
<head>
<title>Random title for test</title>
</head>
<body>
<h5 id="title">Some other title</h5>
<p id="text">Other text</p>
</body>
</html>
`,
url: 'http://localhost:8081'
});
[
global.window, global.mainContext, global.document, global.self
].forEach((reference) => t.truthy(reference));
t.true(global.window.location.href === 'http://localhost:8081/');
t.true(document.getElementById('title').innerHTML === 'Some other title');
t.true(document.getElementById('text').innerHTML === 'Other text');
});
test.serial('injectBrowserToNode() works htmlPath is provided', async (t) => {
await fs.writeFile(`${CWD}/ember-app-boilerplate/tmp/foo.html`, `
<html>
<head></head>
<body>
<p>This is a written file</p>
<script>
window.THIS_IS_TESTING = true;
</script>
</body>
</html>
`);
await injectBrowserToNode({ htmlPath: `${CWD}/ember-app-boilerplate/tmp/foo.html` });
[
global.window, global.mainContext, global.document, global.self, global.window.THIS_IS_TESTING
].forEach((reference) => t.truthy(reference));
t.true(global.window.location.href === 'http://localhost/');
t.true(document.querySelector('p').innerHTML === 'This is a written file');
await fs.writeFile(`${CWD}/ember-app-boilerplate/tmp/foo.html`, `
<html>
<head></head>
<body>
<p>This is another written file</p>
<script>
window.THIS_IS_ANOTHER_TESTING = true;
</script>
</body>
</html>
`);
await injectBrowserToNode({
htmlPath: `${CWD}/ember-app-boilerplate/tmp/foo.html`,
url: 'http://localhost:5555'
});
[
global.window, global.mainContext, global.document, global.self,
global.window.THIS_IS_ANOTHER_TESTING
].forEach((reference) => t.truthy(reference));
t.true(global.window.location.href === 'http://localhost:5555/');
t.true(document.querySelector('p').innerHTML === 'This is another written file');
});
// NOTE: commented out because of unpreventable unhandled rejection error
// test.serial('injectBrowserToNode() works url is provided', async (t) => {
// t.plan(10);
// const PROJECT_ROOT = `${CWD}/ember-app-boilerplate`;
// const mock = mockProcessCWD(PROJECT_ROOT);
// const ENV = { modulePrefix: 'izelnakri', environment: 'development' };
// await Promise.all([
// buildApplication(ENV),
// buildVendor(ENV),
// buildCSS(),
// fs.copyFile(`${PROJECT_ROOT}/index.html`, `${PROJECT_ROOT}/tmp/index.html`)
// ]);
// await startHTTPServer({
// ENV: { environment: 'development', modulePrefix: 'frontend' },
// cliArguments: { fastboot: false, port: 1234 }
// });
// await injectBrowserToNode({ url: 'http://localhost:1234' });
// const window = global.window;
// await (new Promise((resolve) => setTimeout(() => resolve(), 1000)));
// [
// window, global.mainContext, global.document, global.self, window.Ember,
// window.Ember.Object, window.requirejs, window.require, window.define
// ].forEach((reference) => t.truthy(reference));
// t.true(global.window.location.href === 'http://localhost:1234/');
// mock.removeMock();
// });
|
<gh_stars>0
const frame = 1 / 22
function getZone (middleX, middleY, inX, inY) {
if (Math.abs(inX - middleX) <= 25 && Math.abs(inY - middleY) <= 25) {
return (0)
}
const top = (inY < middleY)
const left = (inX < middleX)
if (top) {
return (left) ? 1 : 2
} else {
return (left) ? 3 : 4
}
}
function scrub (target, zone) {
if (!zone) {
(target.paused) ?
target.play() :
target.pause()
} else if (zone % 2 != 0) {
target.currentTime -= frame
target.pause()
} else {
target.currentTime += frame
target.pause()
}
}
function handleScrubbing (e) {
e.preventDefault()
let interface = (e.touches) ? e.touches[0] : e
let videoBorderInfo = e.target.getBoundingClientRect()
let middleY = videoBorderInfo.y + (videoBorderInfo.height / 2)
let middleX = videoBorderInfo.x + (videoBorderInfo.width / 2)
let zone = getZone(middleX, middleY, interface.clientX, interface.clientY)
scrub(e.target, zone)
return (true)
}
function transformVideo (el) {
if (!el) { return false }
el.style.zIndex = 100
el.controls = true
el.loop = false
el.autoplay = false
el.onended = (e) => {
el.pause()
}
el.muted = true
el.onclick = handleScrubbing
el.ontouchstart = handleScrubbing
return true
}
window.addEventListener('click', (e) => {
if (e.target.className === 'fXIG0') {
if (!transformVideo(e.target.offsetParent.children[0].childNodes[0].children[0].children[0])) {
console.log('failed to init video scrubber')
}
}
}) |
#!/bin/bash
set -ex
sass --watch --scss --poll \
djparakeet/scss/:djparakeet/static/css/
|
<filename>open-sphere-base/core/src/main/java/io/opensphere/core/geometry/renderproperties/RenderPropertyChangeListener.java
package io.opensphere.core.geometry.renderproperties;
/** Interface for listeners for changes to render properties. */
@FunctionalInterface
public interface RenderPropertyChangeListener
{
/**
* Callback to listeners when one or more properties have changed.
*
* @param evt The properties changed event.
*/
void propertyChanged(RenderPropertyChangedEvent evt);
}
|
<filename>ts-socks/endpoint.cpp
#include "endpoint.h"
#include "context.h"
#include <ctime>
Endpoint::Endpoint(const char *id):m_sock(*Context::Get(), ZMQ_DEALER)
{
m_ep.append("inproc://").append(id);
this->setup();
char _id[10];
srand((uint)time(0));
sprintf(_id, "%04d-%04d", rand() % 10000, rand() % 10000);
m_sock.setsockopt(ZMQ_IDENTITY, _id, 10);
}
Endpoint::Endpoint(const int &port):m_sock(*Context::Get(), ZMQ_DEALER)
{
m_ep.append("ipc:///tmp/app").append(std::to_string(port)).append(".ipc");
this->setup();
char _id[10];
srand((uint)time(0));
sprintf(_id, "%04d-%04d", rand() % 10000, rand() % 10000);
m_sock.setsockopt(ZMQ_IDENTITY, _id, 10);
}
void Endpoint::setup()
{
print();
m_sockService.OnSend([this](const string &address, std::vector<std::string> &messages) -> string {
return this->Send(address, messages);
});
m_listner.Listen([this](std::vector<std::string> &messages) -> string {
print();
std::cout << "ep: " << this->m_ep.c_str() << ", msg: " << messages.size() << std::endl;
return m_sockService.Receive(messages);
});
m_listner.Setup(m_ep.c_str());
}
string Endpoint::Send(const string &address, const string &msg)
{
m_sock.connect(address);
{
zmq::message_t message(msg.size());
memcpy(message.data(), msg.c_str(), msg.size());
m_sock.send(message);
}
m_sock.disconnect(address);
return "OK";
}
string Endpoint::Send(const string &address, std::vector<std::string> &messages)
{
m_sock.connect(address);
{
size_t count = 0;
for (const std::string &msg : messages)
{
zmq::message_t message(msg.size());
memcpy(message.data(), msg.c_str(), msg.size());
if (++count < messages.size())
m_sock.send(message, ZMQ_SNDMORE);
else
m_sock.send(message);
}
}
m_sock.disconnect(address);
return "OK";
}
Endpoint::~Endpoint()
{
m_sock.close();
}
|
<reponame>hellokellyworld/purejswatermark-deploy
/**
* While there is nothing in these typings that prevent it from running in TS 2.8 even,
* due to the complexity of the typings anything lower than TS 3.1 will only see
* PJW as `any`. In order to test the strict versions of these types in our typing
* test suite, the version has been bumped to 3.1
*/
import {
PJW as PJWType,
Bitmap,
RGB,
RGBA,
UnionToIntersection,
GetPluginVal,
GetPluginConst,
GetPluginEncoders,
GetPluginDecoders,
PJWConstructors
} from '@PJW/core';
import typeFn from '@PJW/types';
import pluginFn from '@PJW/plugins';
type Types = ReturnType<typeof typeFn>;
type Plugins = ReturnType<typeof pluginFn>;
type IntersectedPluginTypes = UnionToIntersection<
GetPluginVal<Types> | GetPluginVal<Plugins>
>;
type IntersectedPluginConsts = UnionToIntersection<
GetPluginConst<Types> | GetPluginConst<Plugins>
>;
type IntersectedPluginEncoders = UnionToIntersection<
GetPluginEncoders<Types> | GetPluginEncoders<Plugins>
>;
type IntersectedPluginDecoders = UnionToIntersection<
GetPluginDecoders<Types> | GetPluginDecoders<Plugins>
>;
type PJW = PJWType & IntersectedPluginTypes;
declare const PJW: PJWConstructors & IntersectedPluginConsts & {
prototype: PJW;
encoders: IntersectedPluginEncoders;
decoders: IntersectedPluginDecoders;
};
export = PJW;
|
<filename>eventuate-tram-messaging-proxy-service/src/main/java/io/eventuate/tram/messaging/proxy/service/SubscriptionService.java
package io.eventuate.tram.messaging.proxy.service;
import io.eventuate.common.json.mapper.JSonMapper;
import io.eventuate.tram.commands.common.CommandMessageHeaders;
import io.eventuate.tram.commands.common.CommandReplyOutcome;
import io.eventuate.tram.commands.common.ReplyMessageHeaders;
import io.eventuate.tram.commands.common.paths.ResourcePath;
import io.eventuate.tram.commands.common.paths.ResourcePathPattern;
import io.eventuate.tram.consumer.common.MessageConsumerImplementation;
import io.eventuate.tram.consumer.http.common.EventuateHttpHeaders;
import io.eventuate.tram.consumer.http.common.HttpMessage;
import io.eventuate.tram.events.common.EventMessageHeaders;
import io.eventuate.tram.messaging.common.Message;
import io.eventuate.tram.messaging.consumer.MessageSubscription;
import io.eventuate.tram.messaging.producer.MessageBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.web.client.RestTemplate;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
public class SubscriptionService {
private Logger logger = LoggerFactory.getLogger(getClass());
private SubscriptionPersistenceService subscriptionPersistenceService;
private SubscriptionRequestManager subscriptionRequestManager;
private RestTemplate restTemplate;
private MessageConsumerImplementation messageConsumerImplementation;
public SubscriptionService(SubscriptionPersistenceService subscriptionPersistenceService,
SubscriptionRequestManager subscriptionRequestManager,
RestTemplate restTemplate,
MessageConsumerImplementation messageConsumerImplementation) {
this.subscriptionPersistenceService = subscriptionPersistenceService;
this.subscriptionRequestManager = subscriptionRequestManager;
this.restTemplate = restTemplate;
this.messageConsumerImplementation = messageConsumerImplementation;
}
private ConcurrentMap<String, MessageSubscription> messageSubscriptions = new ConcurrentHashMap<>();
public String makeSubscriptionRequest(String subscriberId,
Set<String> channels,
String callbackUrl) {
String subscriptionInstanceId = generateId();
subscriptionRequestManager.createSubscriptionRequest(new SubscriptionInfo(subscriptionInstanceId,
subscriberId, channels, callbackUrl));
subscriptionPersistenceService.saveSubscriptionInfo(new SubscriptionInfo(subscriptionInstanceId,
subscriberId, channels, callbackUrl));
return subscriptionInstanceId;
}
public void subscribeToReply(String subscriberId,
String replyChannel,
Optional<String> resource,
Set<String> commands,
String callbackUrl) {
messageSubscriptions.computeIfAbsent(subscriberId, instanceId -> {
MessageSubscription messageSubscription = messageConsumerImplementation.subscribe(subscriberId,
Collections.singleton(replyChannel),
message -> publishReply(message, callbackUrl, subscriberId, commands, resource));
return messageSubscription;
});
}
private void publishReply(Message message,
String callbackUrl,
String subscriberId,
Set<String> commands,
Optional<String> resource) {
logger.debug("publishing reply {}", message);
String command = message.getRequiredHeader(CommandMessageHeaders.inReply(CommandMessageHeaders.COMMAND_TYPE));
if (!commands.contains(command)) {
return;
}
if (!shouldPublishResource(resource, message.getHeader(CommandMessageHeaders.inReply(CommandMessageHeaders.RESOURCE)))) {
return;
}
String location = String.format("%s/%s/%s/%s/%s/%s%s",
callbackUrl,
subscriberId,
command,
message.getRequiredHeader(ReplyMessageHeaders.IN_REPLY_TO),
message.getRequiredHeader(ReplyMessageHeaders.REPLY_TYPE),
message.getRequiredHeader(ReplyMessageHeaders.REPLY_OUTCOME),
message.getHeader(CommandMessageHeaders.inReply(CommandMessageHeaders.RESOURCE)).orElse(""));
logger.debug("sending reply {} to location {}", message, location);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
restTemplate.postForLocation(location, new HttpEntity<>(message.getPayload(), headers));
logger.debug("sent reply {} to location {}", message, location);
}
public String subscribeToCommand(String commandDispatcherId,
String channel,
Optional<String> resource,
Set<String> commands,
String callbackUrl) {
messageSubscriptions.computeIfAbsent(commandDispatcherId, instanceId -> {
MessageSubscription messageSubscription = messageConsumerImplementation.subscribe(commandDispatcherId,
Collections.singleton(channel),
message -> publishCommand(message, commandDispatcherId, resource, commands, callbackUrl));
return messageSubscription;
});
return commandDispatcherId;
}
public String subscribeToEvent(String subscriberId,
String aggregate,
Set<String> events,
String callbackUrl) {
messageSubscriptions.computeIfAbsent(subscriberId, instanceId -> {
MessageSubscription messageSubscription = messageConsumerImplementation.subscribe(subscriberId,
Collections.singleton(aggregate),
message -> publishEvent(message, aggregate, events, callbackUrl, subscriberId));
return messageSubscription;
});
return subscriberId;
}
public String subscribeToMessage(String subscriberId,
Set<String> channels,
String callbackUrl,
String subscriptionInstanceId) {
messageSubscriptions.computeIfAbsent(subscriptionInstanceId, instanceId -> {
MessageSubscription messageSubscription = messageConsumerImplementation.subscribe(subscriberId,
channels,
message -> publishMessage(message, callbackUrl, subscriberId, subscriptionInstanceId));
return messageSubscription;
});
return subscriptionInstanceId;
}
private void publishMessage(Message message,
String callbackUrl,
String subscriberId,
String subscriptionInstanceId) {
String location = callbackUrl + "/" + subscriptionInstanceId;
logger.debug("sending message {} to location {}", message, location);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
addCommonHeaders(headers, subscriberId, message.getId());
HttpMessage httpMessage = new HttpMessage(message.getId(), message.getHeaders(), message.getPayload());
restTemplate.postForLocation(location, new HttpEntity<>(httpMessage, headers));
logger.debug("sent message {} to location {}", message, location);
}
private void publishEvent(Message message,
String aggregate,
Set<String> events,
String callbackUrl,
String subscriberId) {
logger.debug("publishing event {}", message);
String event = message.getRequiredHeader(EventMessageHeaders.EVENT_TYPE);
if (!events.contains(event)) {
return;
}
String location = String.format("%s/%s/%s/%s/%s/%s",
callbackUrl,
subscriberId,
aggregate,
message.getRequiredHeader(EventMessageHeaders.AGGREGATE_ID),
event,
message.getRequiredHeader(Message.ID));
logger.debug("sending event {} to location {}", message, location);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
addCommonHeaders(headers, subscriberId, message.getId());
restTemplate.postForLocation(location, new HttpEntity<>(message.getPayload(), headers));
logger.debug("sent event {} to location {}", message, location);
}
private void publishCommand(Message message,
String commandDispatcherId,
Optional<String> resource,
Set<String> commands,
String callbackUrl) {
logger.debug("publishing command {}", message);
String command = message.getRequiredHeader(CommandMessageHeaders.COMMAND_TYPE);
if (!commands.contains(command)) {
return;
}
if (!shouldPublishResource(resource, message.getHeader(CommandMessageHeaders.RESOURCE))) {
return;
}
String replyChannel = message.getRequiredHeader(CommandMessageHeaders.REPLY_TO);
String location =
String.format("%s/%s/%s/%s/%s%s",
callbackUrl,
commandDispatcherId,
message.getId(),
command,
replyChannel,
resource.isPresent() ? message.getRequiredHeader(CommandMessageHeaders.RESOURCE) : "");
logger.debug("sending command {} to location {}", message, location);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
Map<String, String> correlationHeaders = correlationHeaders(message.getHeaders());
headers.add(EventuateHttpHeaders.COMMAND_REPLY_HEADERS, JSonMapper.toJson(correlationHeaders));
addCommonHeaders(headers, commandDispatcherId, message.getId());
restTemplate.postForLocation(location, new HttpEntity<>(message.getPayload(), headers));
logger.debug("sent command {} to location {}", message, location);
}
private boolean shouldPublishResource(Optional<String> resource, Optional<String> messageResource) {
if (resource.isPresent()) {
return messageResource
.map(mr -> {
ResourcePathPattern resourcePathPattern = ResourcePathPattern.parse(resource.get());
ResourcePath resourcePath = ResourcePath.parse(mr);
return resourcePathPattern.isSatisfiedBy(resourcePath);
})
.orElse(false);
}
return true;
}
private Map<String, String> correlationHeaders(Map<String, String> headers) {
Map<String, String> m = headers.entrySet()
.stream()
.filter(e -> e.getKey().startsWith(CommandMessageHeaders.COMMAND_HEADER_PREFIX))
.collect(Collectors.toMap(e -> CommandMessageHeaders.inReply(e.getKey()),
Map.Entry::getValue));
m.put(ReplyMessageHeaders.IN_REPLY_TO, headers.get(Message.ID));
return m;
}
public void updateSubscription(String subscriptionInstanceId) {
Optional
.ofNullable(messageSubscriptions.get(subscriptionInstanceId))
.ifPresent(subscription -> subscriptionRequestManager.touch(subscriptionInstanceId));
}
public void makeUnsubscriptionRequest(String subscriptionInstanceId) {
subscriptionRequestManager.removeSubscriptionRequest(subscriptionInstanceId);
}
public void unsubscribe(String subscriptionInstanceId) {
Optional
.ofNullable(messageSubscriptions.remove(subscriptionInstanceId))
.ifPresent(MessageSubscription::unsubscribe);
subscriptionPersistenceService.deleteSubscriptionInfo(subscriptionInstanceId);
}
private String generateId() {
return UUID.randomUUID().toString();
}
private void addCommonHeaders(HttpHeaders headers, String subscriberId, String messageId) {
headers.add(EventuateHttpHeaders.SUBSCRIBER_ID, subscriberId);
headers.add(EventuateHttpHeaders.MESSAGE_ID, messageId);
}
}
|
let list = [1, 2, 4, 10, 8];
let highest = Math.max(...list);
console.log(highest); // 10 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
. ./dev/build-set-env.sh
docker build -t ballista-base:$BALLISTA_VERSION -f dev/docker/ballista-base.dockerfile .
docker build -t ballista:$BALLISTA_VERSION -f dev/docker/ballista.dockerfile . |
<reponame>smvv21/corteza-webapp-messaging
const types = {
pending: 'pending',
completed: 'completed',
updatePermissions: 'updatePermissions',
}
const findPermission = (state, operation) => {
return (state.permissions.find(s => s.operation === operation) || {}).allow
}
export default function (Messaging) {
return {
namespaced: true,
state: {
permissions: [],
pending: false,
},
getters: {
pending: (state) => state.pending,
canAccess: (state) => (findPermission(state, 'access')),
canGrant: (state) => (findPermission(state, 'grant')),
canCreateGroupChannel: (state) => (findPermission(state, 'channel.group.create')),
canCreatePublicChannel: (state) => (findPermission(state, 'channel.public.create')),
canCreatePrivateChannel: (state) => (findPermission(state, 'channel.private.create')),
},
actions: {
async load ({ commit }) {
commit(types.pending)
Messaging.permissionsEffective().then((permissions) => {
commit(types.updatePermissions, permissions)
commit(types.completed)
})
},
},
mutations: {
[types.pending] (state) {
state.pending = true
},
[types.completed] (state) {
state.pending = false
},
[types.updatePermissions] (state, permissions) {
if (state.permissions.length === 0) {
state.permissions = permissions
} else {
permissions.forEach(per => {
// Replaces given cmd due to an update
const n = state.permissions.findIndex(p => p.operation === per.operation)
// Doesn't yet exist -- add it
if (n < 0) {
state.permissions.push(per)
} else {
state.permissions.splice(n, 1, per)
}
})
}
},
},
}
}
|
#!/bin/bash
# Secure OpenVPN server installer for Debian, Ubuntu, CentOS, Amazon Linux 2, Fedora and Arch Linux
# https://github.com/angristan/openvpn-install
function isRoot () {
if [ "$EUID" -ne 0 ]; then
return 1
fi
}
function tunAvailable () {
if [ ! -e /dev/net/tun ]; then
return 1
fi
}
function checkOS () {
if [[ -e /etc/debian_version ]]; then
OS="debian"
# shellcheck disable=SC1091
source /etc/os-release
if [[ "$ID" == "debian" || "$ID" == "raspbian" ]]; then
if [[ ! $VERSION_ID =~ (8|9|10) ]]; then
echo "⚠️ Your version of Debian is not supported."
echo ""
echo "However, if you're using Debian >= 9 or unstable/testing then you can continue."
echo "Keep in mind they are not supported, though."
echo ""
until [[ $CONTINUE =~ (y|n) ]]; do
read -rp "Continue? [y/n]: " -e CONTINUE
done
if [[ "$CONTINUE" = "n" ]]; then
exit 1
fi
fi
elif [[ "$ID" == "ubuntu" ]];then
OS="ubuntu"
if [[ ! $VERSION_ID =~ (16.04|18.04|19.04) ]]; then
echo "⚠️ Your version of Ubuntu is not supported."
echo ""
echo "However, if you're using Ubuntu > 17 or beta, then you can continue."
echo "Keep in mind they are not supported, though."
echo ""
until [[ $CONTINUE =~ (y|n) ]]; do
read -rp "Continue? [y/n]: " -e CONTINUE
done
if [[ "$CONTINUE" = "n" ]]; then
exit 1
fi
fi
fi
elif [[ -e /etc/system-release ]]; then
# shellcheck disable=SC1091
source /etc/os-release
if [[ "$ID" = "fedora" ]]; then
OS="fedora"
fi
if [[ "$ID" = "centos" ]]; then
OS="centos"
if [[ ! $VERSION_ID =~ (7|8) ]]; then
echo "⚠️ Your version of CentOS is not supported."
echo ""
echo "The script only support CentOS 7."
echo ""
exit 1
fi
fi
if [[ "$ID" = "amzn" ]]; then
OS="amzn"
if [[ ! $VERSION_ID == "2" ]]; then
echo "⚠️ Your version of Amazon Linux is not supported."
echo ""
echo "The script only support Amazon Linux 2."
echo ""
exit 1
fi
fi
elif [[ -e /etc/arch-release ]]; then
OS=arch
else
echo "Looks like you aren't running this installer on a Debian, Ubuntu, Fedora, CentOS, Amazon Linux 2 or Arch Linux system"
exit 1
fi
}
function initialCheck () {
if ! isRoot; then
echo "Sorry, you need to run this as root"
exit 1
fi
if ! tunAvailable; then
echo "TUN is not available"
exit 1
fi
checkOS
}
function installUnbound () {
if [[ ! -e /etc/unbound/unbound.conf ]]; then
if [[ "$OS" =~ (debian|ubuntu) ]]; then
apt-get install -y unbound
# Configuration
echo 'interface: 10.8.0.1
access-control: 10.8.0.1/24 allow
hide-identity: yes
hide-version: yes
use-caps-for-id: yes
prefetch: yes' >> /etc/unbound/unbound.conf
elif [[ "$OS" =~ (centos|amzn) ]]; then
yum install -y unbound
# Configuration
sed -i 's|# interface: 0.0.0.0$|interface: 10.8.0.1|' /etc/unbound/unbound.conf
sed -i 's|# access-control: 127.0.0.0/8 allow|access-control: 10.8.0.1/24 allow|' /etc/unbound/unbound.conf
sed -i 's|# hide-identity: no|hide-identity: yes|' /etc/unbound/unbound.conf
sed -i 's|# hide-version: no|hide-version: yes|' /etc/unbound/unbound.conf
sed -i 's|use-caps-for-id: no|use-caps-for-id: yes|' /etc/unbound/unbound.conf
elif [[ "$OS" = "fedora" ]]; then
dnf install -y unbound
# Configuration
sed -i 's|# interface: 0.0.0.0$|interface: 10.8.0.1|' /etc/unbound/unbound.conf
sed -i 's|# access-control: 127.0.0.0/8 allow|access-control: 10.8.0.1/24 allow|' /etc/unbound/unbound.conf
sed -i 's|# hide-identity: no|hide-identity: yes|' /etc/unbound/unbound.conf
sed -i 's|# hide-version: no|hide-version: yes|' /etc/unbound/unbound.conf
sed -i 's|# use-caps-for-id: no|use-caps-for-id: yes|' /etc/unbound/unbound.conf
elif [[ "$OS" = "arch" ]]; then
pacman -Syu --noconfirm unbound
# Get root servers list
curl -o /etc/unbound/root.hints https://www.internic.net/domain/named.cache
mv /etc/unbound/unbound.conf /etc/unbound/unbound.conf.old
echo 'server:
use-syslog: yes
do-daemonize: no
username: "unbound"
directory: "/etc/unbound"
trust-anchor-file: trusted-key.key
root-hints: root.hints
interface: 10.8.0.1
access-control: 10.8.0.1/24 allow
port: 53
num-threads: 2
use-caps-for-id: yes
harden-glue: yes
hide-identity: yes
hide-version: yes
qname-minimisation: yes
prefetch: yes' > /etc/unbound/unbound.conf
fi
if [[ ! "$OS" =~ (fedora|centos|amzn) ]];then
# DNS Rebinding fix
echo "private-address: 10.0.0.0/8
private-address: 172.16.0.0/12
private-address: 192.168.0.0/16
private-address: 169.254.0.0/16
private-address: fd00::/8
private-address: fe80::/10
private-address: 127.0.0.0/8
private-address: ::ffff:0:0/96" >> /etc/unbound/unbound.conf
fi
else # Unbound is already installed
echo 'include: /etc/unbound/openvpn.conf' >> /etc/unbound/unbound.conf
# Add Unbound 'server' for the OpenVPN subnet
echo 'server:
interface: 10.8.0.1
access-control: 10.8.0.1/24 allow
hide-identity: yes
hide-version: yes
use-caps-for-id: yes
prefetch: yes
private-address: 10.0.0.0/8
private-address: 172.16.0.0/12
private-address: 192.168.0.0/16
private-address: 169.254.0.0/16
private-address: fd00::/8
private-address: fe80::/10
private-address: 127.0.0.0/8
private-address: ::ffff:0:0/96' > /etc/unbound/openvpn.conf
fi
systemctl enable unbound
systemctl restart unbound
}
function installQuestions () {
echo "Welcome to the OpenVPN installer!"
echo "The git repository is available at: https://github.com/angristan/openvpn-install"
echo ""
echo "I need to ask you a few questions before starting the setup."
echo "You can leave the default options and just press enter if you are ok with them."
echo ""
echo "I need to know the IPv4 address of the network interface you want OpenVPN listening to."
echo "Unless your server is behind NAT, it should be your public IPv4 address."
# Detect public IPv4 address and pre-fill for the user
IP=$(ip addr | grep 'inet' | grep -v inet6 | grep -vE '127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | head -1)
APPROVE_IP=${APPROVE_IP:-n}
if [[ $APPROVE_IP =~ n ]]; then
read -rp "IP address: " -e -i "$IP" IP
fi
# If $IP is a private IP address, the server must be behind NAT
if echo "$IP" | grep -qE '^(10\.|172\.1[6789]\.|172\.2[0-9]\.|172\.3[01]\.|192\.168)'; then
echo ""
echo "It seems this server is behind NAT. What is its public IPv4 address or hostname?"
echo "We need it for the clients to connect to the server."
until [[ "$ENDPOINT" != "" ]]; do
read -rp "Public IPv4 address or hostname: " -e ENDPOINT
done
fi
echo ""
echo "Checking for IPv6 connectivity..."
echo ""
# "ping6" and "ping -6" availability varies depending on the distribution
if type ping6 > /dev/null 2>&1; then
PING6="ping6 -c3 ipv6.google.com > /dev/null 2>&1"
else
PING6="ping -6 -c3 ipv6.google.com > /dev/null 2>&1"
fi
if eval "$PING6"; then
echo "Your host appears to have IPv6 connectivity."
SUGGESTION="y"
else
echo "Your host does not appear to have IPv6 connectivity."
SUGGESTION="n"
fi
echo ""
# Ask the user if they want to enable IPv6 regardless its availability.
until [[ $IPV6_SUPPORT =~ (y|n) ]]; do
read -rp "Do you want to enable IPv6 support (NAT)? [y/n]: " -e -i $SUGGESTION IPV6_SUPPORT
done
echo ""
echo "What port do you want OpenVPN to listen to?"
echo " 1) Default: 1194"
echo " 2) Custom"
echo " 3) Random [49152-65535]"
until [[ "$PORT_CHOICE" =~ ^[1-3]$ ]]; do
read -rp "Port choice [1-3]: " -e -i 1 PORT_CHOICE
done
case $PORT_CHOICE in
1)
PORT="1194"
;;
2)
until [[ "$PORT" =~ ^[0-9]+$ ]] && [ "$PORT" -ge 1 ] && [ "$PORT" -le 65535 ]; do
read -rp "Custom port [1-65535]: " -e -i 1194 PORT
done
;;
3)
# Generate random number within private ports range
PORT=$(shuf -i49152-65535 -n1)
echo "Random Port: $PORT"
;;
esac
echo ""
echo "What protocol do you want OpenVPN to use?"
echo "UDP is faster. Unless it is not available, you shouldn't use TCP."
echo " 1) UDP"
echo " 2) TCP"
until [[ "$PROTOCOL_CHOICE" =~ ^[1-2]$ ]]; do
read -rp "Protocol [1-2]: " -e -i 1 PROTOCOL_CHOICE
done
case $PROTOCOL_CHOICE in
1)
PROTOCOL="udp"
;;
2)
PROTOCOL="tcp"
;;
esac
echo ""
echo "What DNS resolvers do you want to use with the VPN?"
echo " 1) Current system resolvers (from /etc/resolv.conf)"
echo " 2) Self-hosted DNS Resolver (Unbound)"
echo " 3) Cloudflare (Anycast: worldwide)"
echo " 4) Quad9 (Anycast: worldwide)"
echo " 5) Quad9 uncensored (Anycast: worldwide)"
echo " 6) FDN (France)"
echo " 7) DNS.WATCH (Germany)"
echo " 8) OpenDNS (Anycast: worldwide)"
echo " 9) Google (Anycast: worldwide)"
echo " 10) Yandex Basic (Russia)"
echo " 11) AdGuard DNS (Russia)"
echo " 12) NextDNS (Worldwide)"
echo " 13) Custom"
until [[ "$DNS" =~ ^[0-9]+$ ]] && [ "$DNS" -ge 1 ] && [ "$DNS" -le 13 ]; do
read -rp "DNS [1-12]: " -e -i 3 DNS
if [[ $DNS == 2 ]] && [[ -e /etc/unbound/unbound.conf ]]; then
echo ""
echo "Unbound is already installed."
echo "You can allow the script to configure it in order to use it from your OpenVPN clients"
echo "We will simply add a second server to /etc/unbound/unbound.conf for the OpenVPN subnet."
echo "No changes are made to the current configuration."
echo ""
until [[ $CONTINUE =~ (y|n) ]]; do
read -rp "Apply configuration changes to Unbound? [y/n]: " -e CONTINUE
done
if [[ $CONTINUE = "n" ]];then
# Break the loop and cleanup
unset DNS
unset CONTINUE
fi
elif [[ $DNS == "13" ]]; then
until [[ "$DNS1" =~ ^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$ ]]; do
read -rp "Primary DNS: " -e DNS1
done
until [[ "$DNS2" =~ ^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$ ]]; do
read -rp "Secondary DNS (optional): " -e DNS2
if [[ "$DNS2" == "" ]]; then
break
fi
done
fi
done
echo ""
echo "Do you want to use compression? It is not recommended since the VORACLE attack make use of it."
until [[ $COMPRESSION_ENABLED =~ (y|n) ]]; do
read -rp"Enable compression? [y/n]: " -e -i n COMPRESSION_ENABLED
done
if [[ $COMPRESSION_ENABLED == "y" ]];then
echo "Choose which compression algorithm you want to use: (they are ordered by efficiency)"
echo " 1) LZ4-v2"
echo " 2) LZ4"
echo " 3) LZ0"
until [[ $COMPRESSION_CHOICE =~ ^[1-3]$ ]]; do
read -rp"Compression algorithm [1-3]: " -e -i 1 COMPRESSION_CHOICE
done
case $COMPRESSION_CHOICE in
1)
COMPRESSION_ALG="lz4-v2"
;;
2)
COMPRESSION_ALG="lz4"
;;
3)
COMPRESSION_ALG="lzo"
;;
esac
fi
echo ""
echo "Do you want to customize encryption settings?"
echo "Unless you know what you're doing, you should stick with the default parameters provided by the script."
echo "Note that whatever you choose, all the choices presented in the script are safe. (Unlike OpenVPN's defaults)"
echo "See https://github.com/angristan/openvpn-install#security-and-encryption to learn more."
echo ""
until [[ $CUSTOMIZE_ENC =~ (y|n) ]]; do
read -rp "Customize encryption settings? [y/n]: " -e -i n CUSTOMIZE_ENC
done
if [[ $CUSTOMIZE_ENC == "n" ]];then
# Use default, sane and fast parameters
CIPHER="AES-128-GCM"
CERT_TYPE="1" # ECDSA
CERT_CURVE="prime256v1"
CC_CIPHER="TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256"
DH_TYPE="1" # ECDH
DH_CURVE="prime256v1"
HMAC_ALG="SHA256"
TLS_SIG="2" # tls-auth
else
echo ""
echo "Choose which cipher you want to use for the data channel:"
echo " 1) AES-128-GCM (recommended)"
echo " 2) AES-192-GCM"
echo " 3) AES-256-GCM"
echo " 4) AES-128-CBC"
echo " 5) AES-192-CBC"
echo " 6) AES-256-CBC"
until [[ "$CIPHER_CHOICE" =~ ^[1-6]$ ]]; do
read -rp "Cipher [1-6]: " -e -i 1 CIPHER_CHOICE
done
case $CIPHER_CHOICE in
1)
CIPHER="AES-128-GCM"
;;
2)
CIPHER="AES-192-GCM"
;;
3)
CIPHER="AES-256-GCM"
;;
4)
CIPHER="AES-128-CBC"
;;
5)
CIPHER="AES-192-CBC"
;;
6)
CIPHER="AES-256-CBC"
;;
esac
echo ""
echo "Choose what kind of certificate you want to use:"
echo " 1) ECDSA (recommended)"
echo " 2) RSA"
until [[ $CERT_TYPE =~ ^[1-2]$ ]]; do
read -rp"Certificate key type [1-2]: " -e -i 1 CERT_TYPE
done
case $CERT_TYPE in
1)
echo ""
echo "Choose which curve you want to use for the certificate's key:"
echo " 1) prime256v1 (recommended)"
echo " 2) secp384r1"
echo " 3) secp521r1"
until [[ $CERT_CURVE_CHOICE =~ ^[1-3]$ ]]; do
read -rp"Curve [1-3]: " -e -i 1 CERT_CURVE_CHOICE
done
case $CERT_CURVE_CHOICE in
1)
CERT_CURVE="prime256v1"
;;
2)
CERT_CURVE="secp384r1"
;;
3)
CERT_CURVE="secp521r1"
;;
esac
;;
2)
echo ""
echo "Choose which size you want to use for the certificate's RSA key:"
echo " 1) 2048 bits (recommended)"
echo " 2) 3072 bits"
echo " 3) 4096 bits"
until [[ "$RSA_KEY_SIZE_CHOICE" =~ ^[1-3]$ ]]; do
read -rp "RSA key size [1-3]: " -e -i 1 RSA_KEY_SIZE_CHOICE
done
case $RSA_KEY_SIZE_CHOICE in
1)
RSA_KEY_SIZE="2048"
;;
2)
RSA_KEY_SIZE="3072"
;;
3)
RSA_KEY_SIZE="4096"
;;
esac
;;
esac
echo ""
echo "Choose which cipher you want to use for the control channel:"
case $CERT_TYPE in
1)
echo " 1) ECDHE-ECDSA-AES-128-GCM-SHA256 (recommended)"
echo " 2) ECDHE-ECDSA-AES-256-GCM-SHA384"
until [[ $CC_CIPHER_CHOICE =~ ^[1-2]$ ]]; do
read -rp"Control channel cipher [1-2]: " -e -i 1 CC_CIPHER_CHOICE
done
case $CC_CIPHER_CHOICE in
1)
CC_CIPHER="TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256"
;;
2)
CC_CIPHER="TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384"
;;
esac
;;
2)
echo " 1) ECDHE-RSA-AES-128-GCM-SHA256 (recommended)"
echo " 2) ECDHE-RSA-AES-256-GCM-SHA384"
until [[ $CC_CIPHER_CHOICE =~ ^[1-2]$ ]]; do
read -rp"Control channel cipher [1-2]: " -e -i 1 CC_CIPHER_CHOICE
done
case $CC_CIPHER_CHOICE in
1)
CC_CIPHER="TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256"
;;
2)
CC_CIPHER="TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384"
;;
esac
;;
esac
echo ""
echo "Choose what kind of Diffie-Hellman key you want to use:"
echo " 1) ECDH (recommended)"
echo " 2) DH"
until [[ $DH_TYPE =~ [1-2] ]]; do
read -rp"DH key type [1-2]: " -e -i 1 DH_TYPE
done
case $DH_TYPE in
1)
echo ""
echo "Choose which curve you want to use for the ECDH key:"
echo " 1) prime256v1 (recommended)"
echo " 2) secp384r1"
echo " 3) secp521r1"
while [[ $DH_CURVE_CHOICE != "1" && $DH_CURVE_CHOICE != "2" && $DH_CURVE_CHOICE != "3" ]]; do
read -rp"Curve [1-3]: " -e -i 1 DH_CURVE_CHOICE
done
case $DH_CURVE_CHOICE in
1)
DH_CURVE="prime256v1"
;;
2)
DH_CURVE="secp384r1"
;;
3)
DH_CURVE="secp521r1"
;;
esac
;;
2)
echo ""
echo "Choose what size of Diffie-Hellman key you want to use:"
echo " 1) 2048 bits (recommended)"
echo " 2) 3072 bits"
echo " 3) 4096 bits"
until [[ "$DH_KEY_SIZE_CHOICE" =~ ^[1-3]$ ]]; do
read -rp "DH key size [1-3]: " -e -i 1 DH_KEY_SIZE_CHOICE
done
case $DH_KEY_SIZE_CHOICE in
1)
DH_KEY_SIZE="2048"
;;
2)
DH_KEY_SIZE="3072"
;;
3)
DH_KEY_SIZE="4096"
;;
esac
;;
esac
echo ""
# The "auth" options behaves differently with AEAD ciphers
if [[ "$CIPHER" =~ CBC$ ]]; then
echo "The digest algorithm authenticates data channel packets and tls-auth packets from the control channel."
elif [[ "$CIPHER" =~ GCM$ ]]; then
echo "The digest algorithm authenticates tls-auth packets from the control channel."
fi
echo "Which digest algorithm do you want to use for HMAC?"
echo " 1) SHA-256 (recommended)"
echo " 2) SHA-384"
echo " 3) SHA-512"
until [[ $HMAC_ALG_CHOICE =~ ^[1-3]$ ]]; do
read -rp "Digest algorithm [1-3]: " -e -i 1 HMAC_ALG_CHOICE
done
case $HMAC_ALG_CHOICE in
1)
HMAC_ALG="SHA256"
;;
2)
HMAC_ALG="SHA384"
;;
3)
HMAC_ALG="SHA512"
;;
esac
echo ""
echo "You can add an additional layer of security to the control channel with tls-auth and tls-crypt"
echo "tls-auth authenticates the packets, while tls-crypt authenticate and encrypt them."
echo " 1) tls-crypt (recommended)"
echo " 2) tls-auth"
until [[ $TLS_SIG =~ [1-2] ]]; do
read -rp "Control channel additional security mechanism [1-2]: " -e -i 1 TLS_SIG
done
fi
echo ""
echo "Okay, that was all I needed. We are ready to setup your OpenVPN server now."
echo "You will be able to generate a client at the end of the installation."
APPROVE_INSTALL=${APPROVE_INSTALL:-n}
if [[ $APPROVE_INSTALL =~ n ]]; then
read -n1 -r -p "Press any key to continue..."
fi
}
function installOpenVPN () {
if [[ $AUTO_INSTALL == "y" ]]; then
# Set default choices so that no questions will be asked.
APPROVE_INSTALL=${APPROVE_INSTALL:-y}
APPROVE_IP=${APPROVE_IP:-y}
IPV6_SUPPORT=${IPV6_SUPPORT:-n}
PORT_CHOICE=${PORT_CHOICE:-1}
PROTOCOL_CHOICE=${PROTOCOL_CHOICE:-1}
DNS=${DNS:-1}
COMPRESSION_ENABLED=${COMPRESSION_ENABLED:-n}
CUSTOMIZE_ENC=${CUSTOMIZE_ENC:-n}
CLIENT=${CLIENT:-client}
PASS=${PASS:-1}
CONTINUE=${CONTINUE:-y}
# Behind NAT, we'll default to the publicly reachable IPv4.
PUBLIC_IPV4=$(curl ifconfig.co)
ENDPOINT=${ENDPOINT:-$PUBLIC_IPV4}
fi
# Run setup questions first, and set other variales if auto-install
installQuestions
# Get the "public" interface from the default route
NIC=$(ip -4 route ls | grep default | grep -Po '(?<=dev )(\S+)' | head -1)
if [[ "$OS" =~ (debian|ubuntu) ]]; then
apt-get update
apt-get -y install ca-certificates gnupg
# We add the OpenVPN repo to get the latest version.
if [[ "$VERSION_ID" = "8" ]]; then
echo "deb http://build.openvpn.net/debian/openvpn/stable jessie main" > /etc/apt/sources.list.d/openvpn.list
wget -O - https://swupdate.openvpn.net/repos/repo-public.gpg | apt-key add -
apt-get update
fi
if [[ "$VERSION_ID" = "16.04" ]]; then
echo "deb http://build.openvpn.net/debian/openvpn/stable xenial main" > /etc/apt/sources.list.d/openvpn.list
wget -O - https://swupdate.openvpn.net/repos/repo-public.gpg | apt-key add -
apt-get update
fi
# Ubuntu > 16.04 and Debian > 8 have OpenVPN >= 2.4 without the need of a third party repository.
apt-get install -y openvpn iptables openssl wget ca-certificates curl
elif [[ "$OS" = 'centos' ]]; then
yum install -y epel-release
yum install -y openvpn iptables openssl wget ca-certificates curl tar 'policycoreutils-python*'
elif [[ "$OS" = 'amzn' ]]; then
amazon-linux-extras install -y epel
yum install -y openvpn iptables openssl wget ca-certificates curl
elif [[ "$OS" = 'fedora' ]]; then
dnf install -y openvpn iptables openssl wget ca-certificates curl
elif [[ "$OS" = 'arch' ]]; then
# Install required dependencies and upgrade the system
pacman --needed --noconfirm -Syu openvpn iptables openssl wget ca-certificates curl
fi
# Find out if the machine uses nogroup or nobody for the permissionless group
if grep -qs "^nogroup:" /etc/group; then
NOGROUP=nogroup
else
NOGROUP=nobody
fi
# An old version of easy-rsa was available by default in some openvpn packages
if [[ -d /etc/openvpn/easy-rsa/ ]]; then
rm -rf /etc/openvpn/easy-rsa/
fi
# Install the latest version of easy-rsa from source
local version="3.0.6"
wget -O ~/EasyRSA-unix-v${version}.tgz https://github.com/OpenVPN/easy-rsa/releases/download/v${version}/EasyRSA-unix-v${version}.tgz
tar xzf ~/EasyRSA-unix-v${version}.tgz -C ~/
mv ~/EasyRSA-v${version} /etc/openvpn/easy-rsa
chown -R root:root /etc/openvpn/easy-rsa/
rm -f ~/EasyRSA-unix-v${version}.tgz
cd /etc/openvpn/easy-rsa/ || return
case $CERT_TYPE in
1)
echo "set_var EASYRSA_ALGO ec" > vars
echo "set_var EASYRSA_CURVE $CERT_CURVE" >> vars
;;
2)
echo "set_var EASYRSA_KEY_SIZE $RSA_KEY_SIZE" > vars
;;
esac
# Generate a random, alphanumeric identifier of 16 characters for CN and one for server name
SERVER_CN="cn_$(head /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)"
SERVER_NAME="server_$(head /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)"
echo "set_var EASYRSA_REQ_CN $SERVER_CN" >> vars
# Create the PKI, set up the CA, the DH params and the server certificate
./easyrsa init-pki
# Workaround to remove unharmful error until easy-rsa 3.0.7
# https://github.com/OpenVPN/easy-rsa/issues/261
sed -i 's/^RANDFILE/#RANDFILE/g' pki/openssl-easyrsa.cnf
./easyrsa --batch build-ca nopass
if [[ $DH_TYPE == "2" ]]; then
# ECDH keys are generated on-the-fly so we don't need to generate them beforehand
openssl dhparam -out dh.pem $DH_KEY_SIZE
fi
./easyrsa build-server-full "$SERVER_NAME" nopass
EASYRSA_CRL_DAYS=3650 ./easyrsa gen-crl
case $TLS_SIG in
1)
# Generate tls-crypt key
openvpn --genkey --secret /etc/openvpn/tls-crypt.key
;;
2)
# Generate tls-auth key
openvpn --genkey --secret /etc/openvpn/tls-auth.key
;;
esac
# Move all the generated files
cp pki/ca.crt pki/private/ca.key "pki/issued/$SERVER_NAME.crt" "pki/private/$SERVER_NAME.key" /etc/openvpn/easy-rsa/pki/crl.pem /etc/openvpn
if [[ $DH_TYPE == "2" ]]; then
cp dh.pem /etc/openvpn
fi
# Make cert revocation list readable for non-root
chmod 644 /etc/openvpn/crl.pem
# Generate server.conf
echo "port $PORT" > /etc/openvpn/server.conf
if [[ "$IPV6_SUPPORT" = 'n' ]]; then
echo "proto $PROTOCOL" >> /etc/openvpn/server.conf
elif [[ "$IPV6_SUPPORT" = 'y' ]]; then
echo "proto ${PROTOCOL}6" >> /etc/openvpn/server.conf
fi
echo "dev tun
user nobody
group $NOGROUP
persist-key
persist-tun
keepalive 10 120
topology subnet
server 10.8.0.0 255.255.255.0
ifconfig-pool-persist ipp.txt" >> /etc/openvpn/server.conf
# DNS resolvers
case $DNS in
1)
# Locate the proper resolv.conf
# Needed for systems running systemd-resolved
if grep -q "127.0.0.53" "/etc/resolv.conf"; then
RESOLVCONF='/run/systemd/resolve/resolv.conf'
else
RESOLVCONF='/etc/resolv.conf'
fi
# Obtain the resolvers from resolv.conf and use them for OpenVPN
grep -v '#' $RESOLVCONF | grep 'nameserver' | grep -E -o '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | while read -r line; do
echo "push \"dhcp-option DNS $line\"" >> /etc/openvpn/server.conf
done
;;
2)
echo 'push "dhcp-option DNS 10.8.0.1"' >> /etc/openvpn/server.conf
;;
3) # Cloudflare
echo 'push "dhcp-option DNS 1.0.0.1"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 1.1.1.1"' >> /etc/openvpn/server.conf
;;
4) # Quad9
echo 'push "dhcp-option DNS 9.9.9.9"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 149.112.112.112"' >> /etc/openvpn/server.conf
;;
5) # Quad9 uncensored
echo 'push "dhcp-option DNS 9.9.9.10"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 149.112.112.10"' >> /etc/openvpn/server.conf
;;
6) # FDN
echo 'push "dhcp-option DNS 80.67.169.40"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 80.67.169.12"' >> /etc/openvpn/server.conf
;;
7) # DNS.WATCH
echo 'push "dhcp-option DNS 84.200.69.80"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 84.200.70.40"' >> /etc/openvpn/server.conf
;;
8) # OpenDNS
echo 'push "dhcp-option DNS 208.67.222.222"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 208.67.220.220"' >> /etc/openvpn/server.conf
;;
9) # Google
echo 'push "dhcp-option DNS 8.8.8.8"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 8.8.4.4"' >> /etc/openvpn/server.conf
;;
10) # Yandex Basic
echo 'push "dhcp-option DNS 77.88.8.8"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 77.88.8.1"' >> /etc/openvpn/server.conf
;;
11) # AdGuard DNS
echo 'push "dhcp-option DNS 176.103.130.130"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 176.103.130.131"' >> /etc/openvpn/server.conf
;;
12) # NextDNS
echo 'push "dhcp-option DNS 45.90.28.167"' >> /etc/openvpn/server.conf
echo 'push "dhcp-option DNS 45.90.30.167"' >> /etc/openvpn/server.conf
;;
13) # Custom DNS
echo "push \"dhcp-option DNS $DNS1\"" >> /etc/openvpn/server.conf
if [[ "$DNS2" != "" ]]; then
echo "push \"dhcp-option DNS $DNS2\"" >> /etc/openvpn/server.conf
fi
;;
esac
echo 'push "redirect-gateway def1 bypass-dhcp"' >> /etc/openvpn/server.conf
# IPv6 network settings if needed
if [[ "$IPV6_SUPPORT" = 'y' ]]; then
echo 'server-ipv6 fd42:42:42:42::/112
tun-ipv6
push tun-ipv6
push "route-ipv6 2000::/3"
push "redirect-gateway ipv6"' >> /etc/openvpn/server.conf
fi
if [[ $COMPRESSION_ENABLED == "y" ]]; then
echo "compress $COMPRESSION_ALG" >> /etc/openvpn/server.conf
fi
if [[ $DH_TYPE == "1" ]]; then
echo "dh none" >> /etc/openvpn/server.conf
echo "ecdh-curve $DH_CURVE" >> /etc/openvpn/server.conf
elif [[ $DH_TYPE == "2" ]]; then
echo "dh dh.pem" >> /etc/openvpn/server.conf
fi
case $TLS_SIG in
1)
echo "tls-crypt tls-crypt.key 0" >> /etc/openvpn/server.conf
;;
2)
echo "tls-auth tls-auth.key 0" >> /etc/openvpn/server.conf
;;
esac
echo "crl-verify crl.pem
ca ca.crt
cert $SERVER_NAME.crt
key $SERVER_NAME.key
auth $HMAC_ALG
cipher $CIPHER
ncp-ciphers $CIPHER
tls-server
tls-version-min 1.2
tls-cipher $CC_CIPHER
status /var/log/openvpn/status.log
verb 3" >> /etc/openvpn/server.conf
# Create log dir
mkdir -p /var/log/openvpn
# Enable routing
echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.d/20-openvpn.conf
if [[ "$IPV6_SUPPORT" = 'y' ]]; then
echo 'net.ipv6.conf.all.forwarding=1' >> /etc/sysctl.d/20-openvpn.conf
fi
# Apply sysctl rules
sysctl --system
# If SELinux is enabled and a custom port was selected, we need this
if hash sestatus 2>/dev/null; then
if sestatus | grep "Current mode" | grep -qs "enforcing"; then
if [[ "$PORT" != '1194' ]]; then
semanage port -a -t openvpn_port_t -p "$PROTOCOL" "$PORT"
fi
fi
fi
# Finally, restart and enable OpenVPN
if [[ "$OS" = 'arch' || "$OS" = 'fedora' || "$OS" = 'centos' ]]; then
# Don't modify package-provided service
cp /usr/lib/systemd/system/openvpn-server@.service /etc/systemd/system/openvpn-server@.service
# Workaround to fix OpenVPN service on OpenVZ
sed -i 's|LimitNPROC|#LimitNPROC|' /etc/systemd/system/openvpn-server@.service
# Another workaround to keep using /etc/openvpn/
sed -i 's|/etc/openvpn/server|/etc/openvpn|' /etc/systemd/system/openvpn-server@.service
# On fedora, the service hardcodes the ciphers. We want to manage the cipher ourselves, so we remove it from the service
if [[ "$OS" == "fedora" ]];then
sed -i 's|--cipher AES-256-GCM --ncp-ciphers AES-256-GCM:AES-128-GCM:AES-256-CBC:AES-128-CBC:BF-CBC||' /etc/systemd/system/openvpn-server@.service
fi
systemctl daemon-reload
systemctl restart openvpn-server@server
systemctl enable openvpn-server@server
elif [[ "$OS" == "ubuntu" ]] && [[ "$VERSION_ID" == "16.04" ]]; then
# On Ubuntu 16.04, we use the package from the OpenVPN repo
# This package uses a sysvinit service
systemctl enable openvpn
systemctl start openvpn
else
# Don't modify package-provided service
cp /lib/systemd/system/openvpn\@.service /etc/systemd/system/openvpn\@.service
# Workaround to fix OpenVPN service on OpenVZ
sed -i 's|LimitNPROC|#LimitNPROC|' /etc/systemd/system/openvpn\@.service
# Another workaround to keep using /etc/openvpn/
sed -i 's|/etc/openvpn/server|/etc/openvpn|' /etc/systemd/system/openvpn\@.service
systemctl daemon-reload
systemctl restart openvpn@server
systemctl enable openvpn@server
fi
if [[ $DNS == 2 ]];then
installUnbound
fi
# Add iptables rules in two scripts
mkdir /etc/iptables
# Script to add rules
echo "#!/bin/sh
iptables -t nat -I POSTROUTING 1 -s 10.8.0.0/24 -o $NIC -j MASQUERADE
iptables -I INPUT 1 -i tun0 -j ACCEPT
iptables -I FORWARD 1 -i $NIC -o tun0 -j ACCEPT
iptables -I FORWARD 1 -i tun0 -o $NIC -j ACCEPT
iptables -I INPUT 1 -i $NIC -p $PROTOCOL --dport $PORT -j ACCEPT" > /etc/iptables/add-openvpn-rules.sh
if [[ "$IPV6_SUPPORT" = 'y' ]]; then
echo "ip6tables -t nat -I POSTROUTING 1 -s fd42:42:42:42::/112 -o $NIC -j MASQUERADE
ip6tables -I INPUT 1 -i tun0 -j ACCEPT
ip6tables -I FORWARD 1 -i $NIC -o tun0 -j ACCEPT
ip6tables -I FORWARD 1 -i tun0 -o $NIC -j ACCEPT" >> /etc/iptables/add-openvpn-rules.sh
fi
# Script to remove rules
echo "#!/bin/sh
iptables -t nat -D POSTROUTING -s 10.8.0.0/24 -o $NIC -j MASQUERADE
iptables -D INPUT -i tun0 -j ACCEPT
iptables -D FORWARD -i $NIC -o tun0 -j ACCEPT
iptables -D FORWARD -i tun0 -o $NIC -j ACCEPT
iptables -D INPUT -i $NIC -p $PROTOCOL --dport $PORT -j ACCEPT" > /etc/iptables/rm-openvpn-rules.sh
if [[ "$IPV6_SUPPORT" = 'y' ]]; then
echo "ip6tables -t nat -D POSTROUTING -s fd42:42:42:42::/112 -o $NIC -j MASQUERADE
ip6tables -D INPUT -i tun0 -j ACCEPT
ip6tables -D FORWARD -i $NIC -o tun0 -j ACCEPT
ip6tables -D FORWARD -i tun0 -o $NIC -j ACCEPT" >> /etc/iptables/rm-openvpn-rules.sh
fi
chmod +x /etc/iptables/add-openvpn-rules.sh
chmod +x /etc/iptables/rm-openvpn-rules.sh
# Handle the rules via a systemd script
echo "[Unit]
Description=iptables rules for OpenVPN
Before=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/etc/iptables/add-openvpn-rules.sh
ExecStop=/etc/iptables/rm-openvpn-rules.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target" > /etc/systemd/system/iptables-openvpn.service
# Enable service and apply rules
systemctl daemon-reload
systemctl enable iptables-openvpn
systemctl start iptables-openvpn
# If the server is behind a NAT, use the correct IP address for the clients to connect to
if [[ "$ENDPOINT" != "" ]]; then
IP=$ENDPOINT
fi
# client-template.txt is created so we have a template to add further users later
echo "client" > /etc/openvpn/client-template.txt
if [[ "$PROTOCOL" = 'udp' ]]; then
echo "proto udp" >> /etc/openvpn/client-template.txt
elif [[ "$PROTOCOL" = 'tcp' ]]; then
echo "proto tcp-client" >> /etc/openvpn/client-template.txt
fi
echo "remote $IP $PORT
dev tun
resolv-retry infinite
nobind
persist-key
persist-tun
remote-cert-tls server
verify-x509-name $SERVER_NAME name
auth $HMAC_ALG
auth-nocache
cipher $CIPHER
tls-client
tls-version-min 1.2
tls-cipher $CC_CIPHER
setenv opt block-outside-dns # Prevent Windows 10 DNS leak
verb 3" >> /etc/openvpn/client-template.txt
if [[ $COMPRESSION_ENABLED == "y" ]]; then
echo "compress $COMPRESSION_ALG" >> /etc/openvpn/client-template.txt
fi
# Generate the custom client.ovpn
newClient
echo "If you want to add more clients, you simply need to run this script another time!"
}
function newClient () {
echo ""
echo "Tell me a name for the client."
echo "Use one word only, no special characters."
until [[ "$CLIENT" =~ ^[a-zA-Z0-9_]+$ ]]; do
read -rp "Client name: " -e CLIENT
done
echo ""
echo "Do you want to protect the configuration file with a password?"
echo "(e.g. encrypt the private key with a password)"
echo " 1) Add a passwordless client"
echo " 2) Use a password for the client"
until [[ "$PASS" =~ ^[1-2]$ ]]; do
read -rp "Select an option [1-2]: " -e -i 1 PASS
done
cd /etc/openvpn/easy-rsa/ || return
case $PASS in
1)
./easyrsa build-client-full "$CLIENT" nopass
;;
2)
echo "⚠️ You will be asked for the client password below ⚠️"
./easyrsa build-client-full "$CLIENT"
;;
esac
# Home directory of the user, where the client configuration (.ovpn) will be written
if [ -e "/home/$CLIENT" ]; then # if $1 is a user name
homeDir="/home/$CLIENT"
elif [ "${SUDO_USER}" ]; then # if not, use SUDO_USER
homeDir="/home/${SUDO_USER}"
else # if not SUDO_USER, use /root
homeDir="/root"
fi
# Determine if we use tls-auth or tls-crypt
if grep -qs "^tls-crypt" /etc/openvpn/server.conf; then
TLS_SIG="1"
elif grep -qs "^tls-auth" /etc/openvpn/server.conf; then
TLS_SIG="2"
fi
# Generates the custom client.ovpn
cp /etc/openvpn/client-template.txt "$homeDir/$CLIENT.ovpn"
{
echo "<ca>"
cat "/etc/openvpn/easy-rsa/pki/ca.crt"
echo "</ca>"
echo "<cert>"
awk '/BEGIN/,/END/' "/etc/openvpn/easy-rsa/pki/issued/$CLIENT.crt"
echo "</cert>"
echo "<key>"
cat "/etc/openvpn/easy-rsa/pki/private/$CLIENT.key"
echo "</key>"
case $TLS_SIG in
1)
echo "<tls-crypt>"
cat /etc/openvpn/tls-crypt.key
echo "</tls-crypt>"
;;
2)
echo "key-direction 1"
echo "<tls-auth>"
cat /etc/openvpn/tls-auth.key
echo "</tls-auth>"
;;
esac
} >> "$homeDir/$CLIENT.ovpn"
echo ""
echo "Client $CLIENT added, the configuration file is available at $homeDir/$CLIENT.ovpn."
echo "Download the .ovpn file and import it in your OpenVPN client."
exit 0
}
function revokeClient () {
NUMBEROFCLIENTS=$(tail -n +2 /etc/openvpn/easy-rsa/pki/index.txt | grep -c "^V")
if [[ "$NUMBEROFCLIENTS" = '0' ]]; then
echo ""
echo "You have no existing clients!"
exit 1
fi
echo ""
echo "Select the existing client certificate you want to revoke"
tail -n +2 /etc/openvpn/easy-rsa/pki/index.txt | grep "^V" | cut -d '=' -f 2 | nl -s ') '
if [[ "$NUMBEROFCLIENTS" = '1' ]]; then
read -rp "Select one client [1]: " CLIENTNUMBER
else
read -rp "Select one client [1-$NUMBEROFCLIENTS]: " CLIENTNUMBER
fi
CLIENT=$(tail -n +2 /etc/openvpn/easy-rsa/pki/index.txt | grep "^V" | cut -d '=' -f 2 | sed -n "$CLIENTNUMBER"p)
cd /etc/openvpn/easy-rsa/ || return
./easyrsa --batch revoke "$CLIENT"
EASYRSA_CRL_DAYS=3650 ./easyrsa gen-crl
# Cleanup
rm -f "pki/reqs/$CLIENT.req"
rm -f "pki/private/$CLIENT.key"
rm -f "pki/issued/$CLIENT.crt"
rm -f /etc/openvpn/crl.pem
cp /etc/openvpn/easy-rsa/pki/crl.pem /etc/openvpn/crl.pem
chmod 644 /etc/openvpn/crl.pem
find /home/ -maxdepth 2 -name "$CLIENT.ovpn" -delete
rm -f "/root/$CLIENT.ovpn"
sed -i "s|^$CLIENT,.*||" /etc/openvpn/ipp.txt
echo ""
echo "Certificate for client $CLIENT revoked."
}
function removeUnbound () {
# Remove OpenVPN-related config
sed -i 's|include: \/etc\/unbound\/openvpn.conf||' /etc/unbound/unbound.conf
rm /etc/unbound/openvpn.conf
systemctl restart unbound
until [[ $REMOVE_UNBOUND =~ (y|n) ]]; do
echo ""
echo "If you were already using Unbound before installing OpenVPN, I removed the configuration related to OpenVPN."
read -rp "Do you want to completely remove Unbound? [y/n]: " -e REMOVE_UNBOUND
done
if [[ "$REMOVE_UNBOUND" = 'y' ]]; then
# Stop Unbound
systemctl stop unbound
if [[ "$OS" =~ (debian|ubuntu) ]]; then
apt-get autoremove --purge -y unbound
elif [[ "$OS" = 'arch' ]]; then
pacman --noconfirm -R unbound
elif [[ "$OS" =~ (centos|amzn) ]]; then
yum remove -y unbound
elif [[ "$OS" = 'fedora' ]]; then
dnf remove -y unbound
fi
rm -rf /etc/unbound/
echo ""
echo "Unbound removed!"
else
echo ""
echo "Unbound wasn't removed."
fi
}
function removeOpenVPN () {
echo ""
# shellcheck disable=SC2034
read -rp "Do you really want to remove OpenVPN? [y/n]: " -e -i n REMOVE
if [[ "$REMOVE" = 'y' ]]; then
# Get OpenVPN port from the configuration
PORT=$(grep '^port ' /etc/openvpn/server.conf | cut -d " " -f 2)
# Stop OpenVPN
if [[ "$OS" =~ (fedora|arch|centos) ]]; then
systemctl disable openvpn-server@server
systemctl stop openvpn-server@server
# Remove customised service
rm /etc/systemd/system/openvpn-server@.service
elif [[ "$OS" == "ubuntu" ]] && [[ "$VERSION_ID" == "16.04" ]]; then
systemctl disable openvpn
systemctl stop openvpn
else
systemctl disable openvpn@server
systemctl stop openvpn@server
# Remove customised service
rm /etc/systemd/system/openvpn\@.service
fi
# Remove the iptables rules related to the script
systemctl stop iptables-openvpn
# Cleanup
systemctl disable iptables-openvpn
rm /etc/systemd/system/iptables-openvpn.service
systemctl daemon-reload
rm /etc/iptables/add-openvpn-rules.sh
rm /etc/iptables/rm-openvpn-rules.sh
# SELinux
if hash sestatus 2>/dev/null; then
if sestatus | grep "Current mode" | grep -qs "enforcing"; then
if [[ "$PORT" != '1194' ]]; then
semanage port -d -t openvpn_port_t -p udp "$PORT"
fi
fi
fi
if [[ "$OS" =~ (debian|ubuntu) ]]; then
apt-get autoremove --purge -y openvpn
if [[ -e /etc/apt/sources.list.d/openvpn.list ]];then
rm /etc/apt/sources.list.d/openvpn.list
apt-get update
fi
elif [[ "$OS" = 'arch' ]]; then
pacman --noconfirm -R openvpn
elif [[ "$OS" =~ (centos|amzn) ]]; then
yum remove -y openvpn
elif [[ "$OS" = 'fedora' ]]; then
dnf remove -y openvpn
fi
# Cleanup
find /home/ -maxdepth 2 -name "*.ovpn" -delete
find /root/ -maxdepth 1 -name "*.ovpn" -delete
rm -rf /etc/openvpn
rm -rf /usr/share/doc/openvpn*
rm -f /etc/sysctl.d/20-openvpn.conf
rm -rf /var/log/openvpn
# Unbound
if [[ -e /etc/unbound/openvpn.conf ]]; then
removeUnbound
fi
echo ""
echo "OpenVPN removed!"
else
echo ""
echo "Removal aborted!"
fi
}
function manageMenu () {
clear
echo "Welcome to OpenVPN-install!"
echo "The git repository is available at: https://github.com/angristan/openvpn-install"
echo ""
echo "It looks like OpenVPN is already installed."
echo ""
echo "What do you want to do?"
echo " 1) Add a new user"
echo " 2) Revoke existing user"
echo " 3) Remove OpenVPN"
echo " 4) Exit"
until [[ "$MENU_OPTION" =~ ^[1-4]$ ]]; do
read -rp "Select an option [1-4]: " MENU_OPTION
done
case $MENU_OPTION in
1)
newClient
;;
2)
revokeClient
;;
3)
removeOpenVPN
;;
4)
exit 0
;;
esac
}
# Check for root, TUN, OS...
initialCheck
# Check if OpenVPN is already installed
if [[ -e /etc/openvpn/server.conf ]]; then
manageMenu
else
installOpenVPN
fi
|
#!/bin/bash
set -e
# Host tools required by Android, WebGL, and iOS builds
MOBILE_HOST_TOOLS="matc resgen cmgen filamesh"
WEB_HOST_TOOLS="${MOBILE_HOST_TOOLS} mipgen filamesh"
IOS_TOOLCHAIN_URL="https://opensource.apple.com/source/clang/clang-800.0.38/src/cmake/platforms/iOS.cmake"
function print_help {
local self_name=`basename $0`
echo "Usage:"
echo " $self_name [options] <build_type1> [<build_type2> ...] [targets]"
echo ""
echo "Options:"
echo " -h"
echo " Print this help message."
echo " -a"
echo " Generate .tgz build archives, implies -i."
echo " -c"
echo " Clean build directories."
echo " -f"
echo " Always invoke CMake before incremental builds."
echo " -i"
echo " Install build output"
echo " -j"
echo " Do not compile desktop Java projects"
echo " -m"
echo " Compile with make instead of ninja."
echo " -p platform1,platform2,..."
echo " Where platformN is [desktop|android|ios|webgl|all]."
echo " Platform(s) to build, defaults to desktop."
echo " Building for iOS will automatically generate / download"
echo " the toolchains if needed and perform a partial desktop build."
echo " -u"
echo " Run all unit tests, will trigger a debug build if needed."
echo " -v"
echo " Add Vulkan support to the Android build."
echo " -s"
echo " Add iOS simulator support to the iOS build."
echo " -w"
echo " Build Web documents (compiles .md.html files to .html)."
echo ""
echo "Build types:"
echo " release"
echo " Release build only"
echo " debug"
echo " Debug build only"
echo ""
echo "Targets:"
echo " Any target supported by the underlying build system"
echo ""
echo "Examples:"
echo " Desktop release build:"
echo " \$ ./$self_name release"
echo ""
echo " Desktop debug and release builds:"
echo " \$ ./$self_name debug release"
echo ""
echo " Clean, desktop debug build and create archive of build artifacts:"
echo " \$ ./$self_name -c -a debug"
echo ""
echo " Android release build type:"
echo " \$ ./$self_name -p android release"
echo ""
echo " Desktop and Android release builds, with installation:"
echo " \$ ./$self_name -p desktop,android -i release"
echo ""
echo " Desktop matc target, release build:"
echo " \$ ./$self_name release matc"
echo ""
echo " Build gltf_viewer then immediately run it with no arguments:"
echo " \$ ./$self_name release run_gltf_viewer"
echo ""
}
# Requirements
CMAKE_MAJOR=3
CMAKE_MINOR=10
ANDROID_NDK_VERSION=20
# Internal variables
TARGET=release
ISSUE_CLEAN=false
ISSUE_DEBUG_BUILD=false
ISSUE_RELEASE_BUILD=false
# Default: build desktop only
ISSUE_ANDROID_BUILD=false
ISSUE_IOS_BUILD=false
ISSUE_DESKTOP_BUILD=true
ISSUE_WEBGL_BUILD=false
ISSUE_ARCHIVES=false
BUILD_JS_DOCS=false
ISSUE_CMAKE_ALWAYS=false
ISSUE_WEB_DOCS=false
RUN_TESTS=false
JS_DOCS_OPTION="-DGENERATE_JS_DOCS=OFF"
ENABLE_JAVA=ON
INSTALL_COMMAND=
VULKAN_ANDROID_OPTION="-DFILAMENT_SUPPORTS_VULKAN=OFF"
IOS_BUILD_SIMULATOR=false
BUILD_GENERATOR=Ninja
BUILD_COMMAND=ninja
BUILD_CUSTOM_TARGETS=
UNAME=`echo $(uname)`
LC_UNAME=`echo ${UNAME} | tr '[:upper:]' '[:lower:]'`
# Functions
function build_clean {
echo "Cleaning build directories..."
rm -Rf out
rm -Rf android/filament-android/build android/filament-android/.externalNativeBuild
rm -Rf android/filament-android/build android/filament-android/.cxx
rm -Rf android/filamat-android/build android/filamat-android/.externalNativeBuild
rm -Rf android/filamat-android/build android/filamat-android/.cxx
rm -Rf android/gltfio-android/build android/gltfio-android/.externalNativeBuild
rm -Rf android/gltfio-android/build android/gltfio-android/.cxx
rm -Rf android/filament-utils-android/build android/filament-utils-android/.externalNativeBuild
rm -Rf android/filament-utils-android/build android/filament-utils-android/.cxx
}
function build_desktop_target {
local lc_target=`echo $1 | tr '[:upper:]' '[:lower:]'`
local build_targets=$2
if [[ ! "$build_targets" ]]; then
build_targets=${BUILD_CUSTOM_TARGETS}
fi
echo "Building $lc_target in out/cmake-${lc_target}..."
mkdir -p out/cmake-${lc_target}
cd out/cmake-${lc_target}
# On macOS, set the deployment target to 10.14.
local name=`echo $(uname)`
local lc_name=`echo $name | tr '[:upper:]' '[:lower:]'`
if [[ "$lc_name" == "darwin" ]]; then
local deployment_target="-DCMAKE_OSX_DEPLOYMENT_TARGET=10.14"
fi
if [[ ! -d "CMakeFiles" ]] || [[ "$ISSUE_CMAKE_ALWAYS" == "true" ]]; then
cmake \
-G "$BUILD_GENERATOR" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_BUILD_TYPE=$1 \
-DCMAKE_INSTALL_PREFIX=../${lc_target}/filament \
-DENABLE_JAVA=${ENABLE_JAVA} \
${deployment_target} \
../..
fi
${BUILD_COMMAND} ${build_targets}
if [[ "$INSTALL_COMMAND" ]]; then
echo "Installing ${lc_target} in out/${lc_target}/filament..."
${BUILD_COMMAND} ${INSTALL_COMMAND}
fi
if [[ -d "../${lc_target}/filament" ]]; then
if [[ "$ISSUE_ARCHIVES" == "true" ]]; then
echo "Generating out/filament-${lc_target}-${LC_UNAME}.tgz..."
cd ../${lc_target}
tar -czvf ../filament-${lc_target}-${LC_UNAME}.tgz filament
fi
fi
cd ../..
}
function build_desktop {
if [[ "$ISSUE_DEBUG_BUILD" == "true" ]]; then
build_desktop_target "Debug" "$1"
fi
if [[ "$ISSUE_RELEASE_BUILD" == "true" ]]; then
build_desktop_target "Release" "$1"
fi
}
function build_webgl_with_target {
local lc_target=`echo $1 | tr '[:upper:]' '[:lower:]'`
echo "Building WebGL $lc_target..."
mkdir -p out/cmake-webgl-${lc_target}
cd out/cmake-webgl-${lc_target}
if [[ ! "$BUILD_TARGETS" ]]; then
BUILD_TARGETS=${BUILD_CUSTOM_TARGETS}
ISSUE_CMAKE_ALWAYS=true
fi
if [[ ! -d "CMakeFiles" ]] || [[ "$ISSUE_CMAKE_ALWAYS" == "true" ]]; then
# Apply the emscripten environment within a subshell.
(
source ${EMSDK}/emsdk_env.sh
cmake \
-G "$BUILD_GENERATOR" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_TOOLCHAIN_FILE=${EMSDK}/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake \
-DCMAKE_BUILD_TYPE=$1 \
-DCMAKE_INSTALL_PREFIX=../webgl-${lc_target}/filament \
-DWEBGL=1 \
${JS_DOCS_OPTION} \
../..
${BUILD_COMMAND} ${BUILD_TARGETS}
)
fi
if [[ -d "web/filament-js" ]]; then
if [[ "$BUILD_JS_DOCS" == "true" ]]; then
echo "Generating JavaScript documentation..."
local DOCS_FOLDER="web/docs"
local DOCS_SCRIPT="../../web/docs/build.py"
python3 ${DOCS_SCRIPT} --disable-demo \
--output-folder ${DOCS_FOLDER} \
--build-folder ${PWD}
fi
if [[ "$ISSUE_ARCHIVES" == "true" ]]; then
echo "Generating out/filament-${lc_target}-web.tgz..."
# The web archive has the following subfolders:
# dist...core WASM module and accompanying JS file.
# docs...HTML tutorials for the JS API, accompanying demos, and a reference page.
cd web
tar -cvf ../../filament-${lc_target}-web.tar -s /^filament-js/dist/ \
filament-js/filament.js
tar -rvf ../../filament-${lc_target}-web.tar -s /^filament-js/dist/ \
filament-js/filament.wasm
cd -
gzip -c ../filament-${lc_target}-web.tar > ../filament-${lc_target}-web.tgz
rm ../filament-${lc_target}-web.tar
fi
fi
cd ../..
}
function build_webgl {
# For the host tools, supress install and always use Release.
local old_install_command=${INSTALL_COMMAND}; INSTALL_COMMAND=
local old_issue_debug_build=${ISSUE_DEBUG_BUILD}; ISSUE_DEBUG_BUILD=false
local old_issue_release_build=${ISSUE_RELEASE_BUILD}; ISSUE_RELEASE_BUILD=true
build_desktop "${WEB_HOST_TOOLS}"
INSTALL_COMMAND=${old_install_command}
ISSUE_DEBUG_BUILD=${old_issue_debug_build}
ISSUE_RELEASE_BUILD=${old_issue_release_build}
if [[ "$ISSUE_DEBUG_BUILD" == "true" ]]; then
build_webgl_with_target "Debug"
fi
if [[ "$ISSUE_RELEASE_BUILD" == "true" ]]; then
build_webgl_with_target "Release"
fi
}
function build_android_target {
local LC_TARGET=`echo $1 | tr '[:upper:]' '[:lower:]'`
local ARCH=$2
echo "Building Android $LC_TARGET ($ARCH)..."
mkdir -p out/cmake-android-${LC_TARGET}-${ARCH}
cd out/cmake-android-${LC_TARGET}-${ARCH}
if [[ ! -d "CMakeFiles" ]] || [[ "$ISSUE_CMAKE_ALWAYS" == "true" ]]; then
cmake \
-G "$BUILD_GENERATOR" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_BUILD_TYPE=$1 \
-DCMAKE_INSTALL_PREFIX=../android-${LC_TARGET}/filament \
-DCMAKE_TOOLCHAIN_FILE=../../build/toolchain-${ARCH}-linux-android.cmake \
${VULKAN_ANDROID_OPTION} \
../..
fi
# We must always install Android libraries to build the AAR
${BUILD_COMMAND} install
cd ../..
}
function build_android_arch {
local arch=$1
local arch_name=$2
if [[ "$ISSUE_DEBUG_BUILD" == "true" ]]; then
build_android_target "Debug" "$arch"
fi
if [[ "$ISSUE_RELEASE_BUILD" == "true" ]]; then
build_android_target "Release" "$arch"
fi
}
function archive_android {
local lc_target=`echo $1 | tr '[:upper:]' '[:lower:]'`
if [[ -d "out/android-${lc_target}/filament" ]]; then
if [[ "$ISSUE_ARCHIVES" == "true" ]]; then
echo "Generating out/filament-android-${lc_target}-${LC_UNAME}.tgz..."
cd out/android-${lc_target}
tar -czvf ../filament-android-${lc_target}-${LC_UNAME}.tgz filament
cd ../..
fi
fi
}
function ensure_android_build {
if [[ "$ANDROID_HOME" == "" ]]; then
echo "Error: ANDROID_HOME is not set, exiting"
exit 1
fi
local ndk_side_by_side="${ANDROID_HOME}/ndk/"
if [[ -d $ndk_side_by_side ]]; then
local ndk_version=`ls ${ndk_side_by_side} | sort -V | tail -n 1 | cut -f 1 -d "."`
if [[ ${ndk_version} -lt ${ANDROID_NDK_VERSION} ]]; then
echo "Error: Android NDK side-by-side version ${ANDROID_NDK_VERSION} or higher must be installed, exiting"
exit 1
fi
else
echo "Error: Android NDK side-by-side version ${ANDROID_NDK_VERSION} or higher must be installed, exiting"
exit 1
fi
local cmake_version=`cmake --version`
if [[ "$cmake_version" =~ ([0-9]+)\.([0-9]+)\.[0-9]+ ]]; then
if [[ "${BASH_REMATCH[1]}" -lt "${CMAKE_MAJOR}" ]] || \
[[ "${BASH_REMATCH[2]}" -lt "${CMAKE_MINOR}" ]]; then
echo "Error: cmake version ${CMAKE_MAJOR}.${CMAKE_MINOR}+ is required," \
"${BASH_REMATCH[1]}.${BASH_REMATCH[2]} installed, exiting"
exit 1
fi
fi
}
function build_android {
ensure_android_build
# Supress intermediate desktop tools install
local old_install_command=${INSTALL_COMMAND}
INSTALL_COMMAND=
build_desktop "${MOBILE_HOST_TOOLS}"
INSTALL_COMMAND=${old_install_command}
build_android_arch "aarch64" "aarch64-linux-android"
build_android_arch "arm7" "arm-linux-androideabi"
build_android_arch "x86_64" "x86_64-linux-android"
build_android_arch "x86" "i686-linux-android"
if [[ "$ISSUE_DEBUG_BUILD" == "true" ]]; then
archive_android "Debug"
fi
if [[ "$ISSUE_RELEASE_BUILD" == "true" ]]; then
archive_android "Release"
fi
cd android
if [[ "$ISSUE_DEBUG_BUILD" == "true" ]]; then
./gradlew \
-Pfilament_dist_dir=../out/android-debug/filament \
-Pextra_cmake_args=${VULKAN_ANDROID_OPTION} \
:filament-android:assembleDebug \
:gltfio-android:assembleDebug \
:filament-utils-android:assembleDebug
./gradlew \
-Pfilament_dist_dir=../out/android-debug/filament \
:filamat-android:assembleDebug
if [[ "$INSTALL_COMMAND" ]]; then
echo "Installing out/filamat-android-debug.aar..."
cp filamat-android/build/outputs/aar/filamat-android-full-debug.aar ../out/
cp filamat-android/build/outputs/aar/filamat-android-lite-debug.aar ../out/
echo "Installing out/filament-android-debug.aar..."
cp filament-android/build/outputs/aar/filament-android-debug.aar ../out/
echo "Installing out/gltfio-android-debug.aar..."
cp gltfio-android/build/outputs/aar/gltfio-android-debug.aar ../out/
echo "Installing out/filament-utils-android-debug.aar..."
cp filament-utils-android/build/outputs/aar/filament-utils-android-debug.aar ../out/
fi
fi
if [[ "$ISSUE_RELEASE_BUILD" == "true" ]]; then
./gradlew \
-Pfilament_dist_dir=../out/android-release/filament \
-Pextra_cmake_args=${VULKAN_ANDROID_OPTION} \
:filament-android:assembleRelease \
:gltfio-android:assembleRelease \
:filament-utils-android:assembleRelease
./gradlew \
-Pfilament_dist_dir=../out/android-release/filament \
:filamat-android:assembleRelease
if [[ "$INSTALL_COMMAND" ]]; then
echo "Installing out/filamat-android-release.aar..."
cp filamat-android/build/outputs/aar/filamat-android-full-release.aar ../out/
cp filamat-android/build/outputs/aar/filamat-android-lite-release.aar ../out/
echo "Installing out/filament-android-release.aar..."
cp filament-android/build/outputs/aar/filament-android-release.aar ../out/
echo "Installing out/gltfio-android-release.aar..."
cp gltfio-android/build/outputs/aar/gltfio-android-release.aar ../out/
echo "Installing out/filament-utils-android-release.aar..."
cp filament-utils-android/build/outputs/aar/filament-utils-android-release.aar ../out/
fi
fi
cd ..
}
function ensure_ios_toolchain {
local toolchain_path="build/toolchain-mac-ios.cmake"
if [[ -e ${toolchain_path} ]]; then
echo "iOS toolchain file exists."
return 0
fi
echo
echo "iOS toolchain file does not exist."
echo "It will automatically be downloaded from http://opensource.apple.com."
if [[ "$GITHUB_WORKFLOW" ]]; then
REPLY=y
else
read -p "Continue? (y/n) " -n 1 -r
echo
fi
if [[ ! "$REPLY" =~ ^[Yy]$ ]]; then
echo "Toolchain file must be downloaded to continue."
exit 1
fi
curl -o "${toolchain_path}" "${IOS_TOOLCHAIN_URL}" || {
echo "Error downloading iOS toolchain file."
exit 1
}
# Apple's toolchain hard-codes the PLATFORM_NAME into the toolchain file. Instead, make this a
# CACHE variable that can be overriden on the command line.
local FIND='SET(PLATFORM_NAME iphoneos)'
local REPLACE='SET(PLATFORM_NAME "iphoneos" CACHE STRING "iOS platform to build for")'
sed -i '' "s/${FIND}/${REPLACE}/g" ./${toolchain_path}
# Apple's toolchain specifies isysroot based on an environment variable, which we don't set.
# The toolchain doesn't need to do this, however, as isysroot is implicitly set in the toolchain
# via CMAKE_OSX_SYSROOT.
local FIND='SET(IOS_COMMON_FLAGS "-isysroot $ENV{SDKROOT} '
local REPLACE='SET(IOS_COMMON_FLAGS "'
sed -i '' "s/${FIND}/${REPLACE}/g" ./${toolchain_path}
# Prepend Filament-specific settings.
(cat build/toolchain-mac-ios.filament.cmake; cat ${toolchain_path}) > tmp && mv tmp ${toolchain_path}
echo "Successfully downloaded iOS toolchain file and prepended Filament-specific settings."
}
function build_ios_target {
local lc_target=`echo $1 | tr '[:upper:]' '[:lower:]'`
local arch=$2
local platform=$3
echo "Building iOS $lc_target ($arch) for $platform..."
mkdir -p out/cmake-ios-${lc_target}-${arch}
cd out/cmake-ios-${lc_target}-${arch}
if [[ ! -d "CMakeFiles" ]] || [[ "$ISSUE_CMAKE_ALWAYS" == "true" ]]; then
cmake \
-G "$BUILD_GENERATOR" \
-DIMPORT_EXECUTABLES_DIR=out \
-DCMAKE_BUILD_TYPE=$1 \
-DCMAKE_INSTALL_PREFIX=../ios-${lc_target}/filament \
-DIOS_ARCH=${arch} \
-DPLATFORM_NAME=${platform} \
-DIOS_MIN_TARGET=12.0 \
-DIOS=1 \
-DCMAKE_TOOLCHAIN_FILE=../../build/toolchain-mac-ios.cmake \
../..
fi
${BUILD_COMMAND}
if [[ "$INSTALL_COMMAND" ]]; then
echo "Installing ${lc_target} in out/${lc_target}/filament..."
${BUILD_COMMAND} ${INSTALL_COMMAND}
fi
if [[ -d "../ios-${lc_target}/filament" ]]; then
if [[ "$ISSUE_ARCHIVES" == "true" ]]; then
echo "Generating out/filament-${lc_target}-ios.tgz..."
cd ../ios-${lc_target}
tar -czvf ../filament-${lc_target}-ios.tgz filament
fi
fi
cd ../..
}
function build_ios {
# Supress intermediate desktop tools install
local old_install_command=${INSTALL_COMMAND}
INSTALL_COMMAND=
build_desktop "${MOBILE_HOST_TOOLS}"
INSTALL_COMMAND=${old_install_command}
ensure_ios_toolchain
# In theory, we could support iPhone architectures older than arm64, but
# only arm64 devices support OpenGL 3.0 / Metal
if [[ "$ISSUE_DEBUG_BUILD" == "true" ]]; then
build_ios_target "Debug" "arm64" "iphoneos"
if [[ "$IOS_BUILD_SIMULATOR" == "true" ]]; then
build_ios_target "Debug" "x86_64" "iphonesimulator"
fi
fi
if [[ "$ISSUE_RELEASE_BUILD" == "true" ]]; then
build_ios_target "Release" "arm64" "iphoneos"
if [[ "$IOS_BUILD_SIMULATOR" == "true" ]]; then
build_ios_target "Release" "x86_64" "iphonesimulator"
fi
fi
}
function build_web_docs {
echo "Building Web documents..."
mkdir -p out/web-docs
cd out/web-docs
# Create an empty npm package to link markdeep-rasterizer into
npm list | grep web-docs@1.0.0 > /dev/null || npm init --yes > /dev/null
npm list | grep markdeep-rasterizer > /dev/null || npm install ../../third_party/markdeep-rasterizer > /dev/null
# Generate documents
npx markdeep-rasterizer ../../docs/Filament.md.html ../../docs/Materials.md.html ../../docs/
cd ../..
}
function validate_build_command {
set +e
# Make sure CMake is installed
local cmake_binary=`which cmake`
if [[ ! "$cmake_binary" ]]; then
echo "Error: could not find cmake, exiting"
exit 1
fi
# Make sure Ninja is installed
if [[ "$BUILD_COMMAND" == "ninja" ]]; then
local ninja_binary=`which ninja`
if [[ ! "$ninja_binary" ]]; then
echo "Warning: could not find ninja, using make instead"
BUILD_GENERATOR="Unix Makefiles"
BUILD_COMMAND="make"
fi
fi
# Make sure Make is installed
if [[ "$BUILD_COMMAND" == "make" ]]; then
local make_binary=`which make`
if [[ ! "$make_binary" ]]; then
echo "Error: could not find make, exiting"
exit 1
fi
fi
# Make sure we have Java
local javac_binary=`which javac`
if [[ "$JAVA_HOME" == "" ]] || [[ ! "$javac_binary" ]]; then
echo "Warning: JAVA_HOME is not set, skipping Java projects"
ENABLE_JAVA=OFF
fi
# If building a WebAssembly module, ensure we know where Emscripten lives.
if [[ "$EMSDK" == "" ]] && [[ "$ISSUE_WEBGL_BUILD" == "true" ]]; then
echo "Error: EMSDK is not set, exiting"
exit 1
fi
# Web documents require node and npm for processing
if [[ "$ISSUE_WEB_DOCS" == "true" ]]; then
local node_binary=`which node`
local npm_binary=`which npm`
local npx_binary=`which npx`
if [[ ! "$node_binary" ]] || [[ ! "$npm_binary" ]] || [[ ! "$npx_binary" ]]; then
echo "Error: Web documents require node, npm and npx to be installed"
exit 1
fi
fi
set -e
}
function run_test {
local test=$1
# The input string might contain arguments, so we use "set -- $test" to replace $1 with the
# first whitespace-separated token in the string.
set -- ${test}
local test_name=`basename $1`
./out/cmake-debug/${test} --gtest_output="xml:out/test-results/$test_name/sponge_log.xml"
}
function run_tests {
if [[ "$ISSUE_WEBGL_BUILD" == "true" ]]; then
if ! echo "TypeScript `tsc --version`" ; then
tsc --noEmit \
third_party/gl-matrix/gl-matrix.d.ts \
web/filament-js/filament.d.ts \
web/filament-js/test.ts
fi
else
while read test; do
run_test "$test"
done < build/common/test_list.txt
fi
}
# Beginning of the script
pushd `dirname $0` > /dev/null
while getopts ":hacfijmp:tuvslw" opt; do
case ${opt} in
h)
print_help
exit 1
;;
a)
ISSUE_ARCHIVES=true
INSTALL_COMMAND=install
;;
c)
ISSUE_CLEAN=true
;;
f)
ISSUE_CMAKE_ALWAYS=true
;;
i)
INSTALL_COMMAND=install
;;
j)
ENABLE_JAVA=OFF
;;
m)
BUILD_GENERATOR="Unix Makefiles"
BUILD_COMMAND="make"
;;
p)
ISSUE_DESKTOP_BUILD=false
platforms=$(echo "$OPTARG" | tr ',' '\n')
for platform in ${platforms}
do
case ${platform} in
desktop)
ISSUE_DESKTOP_BUILD=true
;;
android)
ISSUE_ANDROID_BUILD=true
;;
ios)
ISSUE_IOS_BUILD=true
;;
webgl)
ISSUE_WEBGL_BUILD=true
;;
all)
ISSUE_ANDROID_BUILD=true
ISSUE_IOS_BUILD=true
ISSUE_DESKTOP_BUILD=true
ISSUE_WEBGL_BUILD=false
;;
esac
done
;;
u)
ISSUE_DEBUG_BUILD=true
RUN_TESTS=true
;;
v)
VULKAN_ANDROID_OPTION="-DFILAMENT_SUPPORTS_VULKAN=ON"
echo "Enabling support for Vulkan in the core Filament library."
echo ""
echo "To switch your application to Vulkan, in Android Studio go to Preferences > "
echo "Build, Executation Deployment > Compiler. In the command-line options field, "
echo "add -Pextra_cmake_args=-DFILAMENT_SUPPORTS_VULKAN=ON."
echo "Also be sure to pass Engine.Backend.VULKAN to Engine.create."
echo ""
;;
s)
IOS_BUILD_SIMULATOR=true
echo "iOS simulator support enabled."
;;
w)
ISSUE_WEB_DOCS=true
;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo ""
print_help
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
echo ""
print_help
exit 1
;;
esac
done
if [[ "$#" == "0" ]]; then
print_help
exit 1
fi
shift $(($OPTIND - 1))
for arg; do
if [[ "$arg" == "release" ]]; then
ISSUE_RELEASE_BUILD=true
elif [[ "$arg" == "debug" ]]; then
ISSUE_DEBUG_BUILD=true
else
BUILD_CUSTOM_TARGETS="$BUILD_CUSTOM_TARGETS $arg"
fi
done
validate_build_command
if [[ "$ISSUE_CLEAN" == "true" ]]; then
build_clean
fi
if [[ "$ISSUE_DESKTOP_BUILD" == "true" ]]; then
build_desktop
fi
if [[ "$ISSUE_ANDROID_BUILD" == "true" ]]; then
build_android
fi
if [[ "$ISSUE_IOS_BUILD" == "true" ]]; then
build_ios
fi
if [[ "$ISSUE_WEBGL_BUILD" == "true" ]]; then
build_webgl
fi
if [[ "$ISSUE_WEB_DOCS" == "true" ]]; then
build_web_docs
fi
if [[ "$RUN_TESTS" == "true" ]]; then
run_tests
fi
|
A lightweight AI model can be developed using programming languages such as Python, JavaScript, etc. It will contain code to process text data, extract features, apply a suitable machine learning algorithm and train a model to classify and categorize text. |
package com.alianza.sip.impl;
import com.alianza.sip.SipContact;
import gov.nist.javax.sdp.fields.SDPField;
import gov.nist.javax.sdp.parser.SDPParser;
import javax.inject.Inject;
import javax.sdp.SdpEncoder;
import javax.sip.InvalidArgumentException;
import javax.sip.address.Address;
import javax.sip.address.AddressFactory;
import javax.sip.address.SipURI;
import javax.sip.header.*;
import javax.sip.message.MessageFactory;
import javax.sip.message.Request;
import java.text.ParseException;
import java.util.Collections;
public class RequestFactory {
private final MessageFactory messageFactory;
private final HeaderFactory headerFactory;
private final AddressFactory addressFactory;
@Inject
public RequestFactory(final MessageFactory messageFactory,
final HeaderFactory headerFactory,
final AddressFactory addressFactory) {
this.messageFactory = messageFactory;
this.headerFactory = headerFactory;
this.addressFactory = addressFactory;
}
public Request createInitialInviteRequest(final SipContact to, final SipContact from,
final String callId, final ViaDetails via) {
try {
SipURI requestUri = addressFactory.createSipURI(to.getContact(), to.getSipAddress());
FromHeader fromHeader = createFromHeader(from);
ToHeader toHeader = createToHeader(to);
CallIdHeader callIdHeader = headerFactory.createCallIdHeader(callId);
CSeqHeader cSeqHeader = headerFactory.createCSeqHeader(1L, Request.INVITE);
MaxForwardsHeader maxForwardsHeader = headerFactory.createMaxForwardsHeader(70);
ContentTypeHeader contentTypeHeader = headerFactory.createContentTypeHeader("application", "sdp");
ViaHeader viaHeader = headerFactory.createViaHeader(via.getIpAddress(), via.getPort(), via.getProtocol(), via.getBranch());
Request request = messageFactory.createRequest(requestUri, Request.INVITE, callIdHeader, cSeqHeader,
fromHeader, toHeader, Collections.singletonList(viaHeader), maxForwardsHeader);
request.addHeader(createContactHeader(from));
return request;
} catch (InvalidArgumentException e) {
e.printStackTrace();
} catch (ParseException e) {
e.printStackTrace();
}
return null;
}
private ContactHeader createContactHeader(SipContact contact) throws ParseException {
return headerFactory.createContactHeader(createContactAddress(contact));
}
private FromHeader createFromHeader(SipContact contact) throws ParseException {
return headerFactory.createFromHeader(createContactAddress(contact), contact.getTag());
}
private ToHeader createToHeader(SipContact contact) throws ParseException {
return headerFactory.createToHeader(createContactAddress(contact), contact.getTag());
}
private Address createContactAddress(SipContact contact) throws ParseException {
SipURI sipUri = addressFactory.createSipURI(contact.getContact(), contact.getSipAddress());
Address sipAddress = addressFactory.createAddress(sipUri);
sipAddress.setDisplayName(contact.getDisplayName());
return sipAddress;
}
}
|
#../dashing2/dashing2 sketch -k15 --topk 10 --parse-by-seq --edit-distance --compute-edit-distance ./covid19/toy.fasta --cmpout ./knnGraph_covid.txt -F table_covid.txt
#../dashing2/dashing2 sketch -k15 --parse-by-seq --square ./covid19/toy.fasta --cmpout table_covid.txt
#../dashing2/dashing2 sketch -k15 --parse-by-seq --square --edit-distance --compute-edit-distance ./covid19/toy.fasta --cmpout table_covid_OMH.txt
../dashing2/dashing2 sketch -k25 --parse-by-seq --square ./ecoli/ecoli_merge.fasta --cmpout table_ecoli.txt
../dashing2/dashing2 sketch -k25 --parse-by-seq --square --edit-distance --compute-edit-distance ./ecoli/ecoli_2.fasta --cmpout table_ecoli_OMH.txt
#../dashing2/dashing2 sketch -k50 --topk 10 --parse-by-seq --edit-distance --compute-edit-distance ./ecoli/ecoli.fasta --cmpout ./knnGraph_ecoli.txt
|
const { task, src, dest } = require('gulp');
const babel = require('gulp-babel');
const uglify = require('gulp-uglify');
const aliases = require('gulp-wechat-weapp-src-alisa');
const exit = require('exit');
task('js', callback => {
console.log('处理js文件');
src(['src/**/*.js', '*.js', '!gulpfile.js'])
// 地址别名替换
.pipe(aliases(require('../config/alias').default))
// babel,语法转换
.pipe(babel().on('error', function(error) {
if (error) {
console.log(error);
// exit(0)
}
}))
// 代码压缩
// .pipe(uglify({}))
// 输出
.pipe(dest('dist'));
callback();
}); |
<gh_stars>1-10
from typing import Dict, Any
from solo import http_defaults, http_endpoint
from solo.apps.accounts.service import UserService
from solo.apps.accounts.model import User, Guest
from solo.apps.accounts import get_user
from solo.server.db import SQLEngine
from solo.server.request import Request
from solo.server.definitions import HttpMethod
from solo.server.statuses import Forbidden
from solo.vendor.old_session.old_session import SessionStore
@http_defaults(route_name='/users', permission='users:view', renderer='json')
class AccountsListHandler:
def __init__(self, request: Request, context: Dict[str, Any]):
self.request = request
@http_endpoint(request_method=HttpMethod.GET)
async def get(self):
return {}
@http_defaults(route_name='/users/me', authenticated=True, renderer='json')
class FrontendAuthenticationHandler:
def __init__(self, request: Request, context: Dict[str, Any]):
self.request = request
self.context = context
@http_endpoint(request_method=HttpMethod.GET)
async def authenticate_frontend(self,
store: SessionStore,
db: SQLEngine):
"""
"""
user = await get_user(store, db, self.request)
if user is Guest:
raise Forbidden()
return {
'id': str(user.id),
'type': 'users',
'attributes': user.as_dict(),
}
@http_defaults(route_name='/users/{userId}', authenticated=True, renderer='json')
class AccountDetailsHandler:
def __init__(self, request: Request, context: Dict[str, Any]):
self.request = request
self.context = context
@http_endpoint(request_method=HttpMethod.GET)
async def get(self):
user_service = UserService(self.request.app)
user = await user_service.get(User.id, self.context['userId'])
if not user:
raise Forbidden()
return {
'id': str(user.id),
'type': 'users',
'attributes': user.as_dict(),
}
|
/// <reference types="./my-module.rt" />
/* GENERATED STUB, remove this comment and take over development of this code. */
import { session, Entity } from '@frusal/library-for-browser';
export class NamedEntity extends Entity {
// nothing yet
}
session.factory.registerUserClass(NamedEntity);
export class Product extends NamedEntity {
// nothing yet
}
session.factory.registerUserClass(Product);
export class Order extends NamedEntity {
// nothing yet
}
session.factory.registerUserClass(Order);
export class OrderLine extends NamedEntity {
// nothing yet
}
session.factory.registerUserClass(OrderLine);
|
def play_game(n, obstacles, treasures):
grid = [[0] * n for _ in range(n)]
for x, y in obstacles:
grid[y][x] = -1 # Mark obstacle positions as -1
for x, y in treasures:
grid[y][x] = 1 # Mark treasure positions as 1
def is_valid_move(x, y):
return 0 <= x < n and 0 <= y < n and grid[y][x] != -1
def dfs(x, y):
if x == n - 1 and y == n - 1:
return True # Reached the end
if grid[y][x] == 1:
return False # Encountered a treasure
if grid[y][x] == -1:
return False # Encountered an obstacle
grid[y][x] = -1 # Mark current position as visited
moves = [(1, 0), (-1, 0), (0, 1), (0, -1)]
for dx, dy in moves:
new_x, new_y = x + dx, y + dy
if is_valid_move(new_x, new_y) and dfs(new_x, new_y):
return True
return False
return dfs(0, 0) |
REM callFunction.sql
REM Chapter 9, Oracle9i PL/SQL Programming by <NAME>
REM This script shows how to call a stored function.
set serveroutput on
DECLARE
CURSOR c_Classes IS
SELECT department, course
FROM classes;
BEGIN
FOR v_ClassRecord IN c_Classes LOOP
-- Output all the classes which don't have very much room
IF AlmostFull(v_ClassRecord.department,
v_ClassRecord.course) THEN
DBMS_OUTPUT.PUT_LINE(
v_ClassRecord.department || ' ' ||
v_ClassRecord.course || ' is almost full!');
END IF;
END LOOP;
END;
/
|
fun swapArray(arr: Array<Int>): Array<Int>{
val temp = arr[0]
arr[0] = arr[arr.size-1]
arr[arr.size-1] = temp
return arr
}
fun main(args: Array<String>){
val arr = arrayOf(1, 2, 3, 4)
println(swapArray(arr).contentToString())
// Output: [4, 2, 3, 1]
} |
from typing import List
def get_config_file_path(args: List[str]) -> str:
config_file_path = ""
for i in range(len(args)):
if args[i] == "--config-file" and i + 1 < len(args):
if args[i + 1].startswith("/"):
config_file_path = args[i + 1]
break
elif not config_file_path:
config_file_path = args[i + 1]
if not config_file_path:
config_file_path = "default_config.yaml"
return config_file_path |
<reponame>BBK-PiJ-2015-07/FinalProject
package prefuse.util.collections;
import java.util.Iterator;
/**
* @author <a href="http://jheer.org"><NAME></a>
*/
public interface LiteralIterator extends Iterator {
int nextInt();
boolean isIntSupported();
long nextLong();
boolean isLongSupported();
float nextFloat();
boolean isFloatSupported();
double nextDouble();
boolean isDoubleSupported();
boolean nextBoolean();
boolean isBooleanSupported();
} // end of interface LiteralIterator
|
import numpy as np
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
# Create a feature vector consisting of the Color, Weight, and Size of the fruits.
features = np.array([
[125, 0.7, 9], # Orange
[90, 0.5, 6], # Orange
[150, 0.75, 10], # Apple
[88, 0.7, 8] # Apple
])
# Create a label vector containing the labels of the fruits.
labels = np.array([0, 0, 1, 1])
# Standardize the data.
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
# Train the model.
model = SVC(kernel='linear')
model.fit(scaled_features, labels) |
import { Sequelize } from 'sequelize';
import { loadModules } from '../util/common.util';
import { dbConfig } from '../config';
const options = {
dialect: 'sqlite',
storage: dbConfig.dbPath
};
const sequelize = new Sequelize(options);
(async () => {
// Initialize database
try {
await sequelize.authenticate();
console.log('Connection has been established successfully.');
} catch (error) {
console.error('Unable to connect to the database:', error);
}
// Load models
const models = loadModules(dbConfig.modelPath, 'default');
for (let model of Object.values(models)) {
await model.sync();
}
})();
export default sequelize;
|
<reponame>dimostoilov/hcp-portal-service-for-pcm
jQuery.sap.registerModulePath("sap.ui.fiori.util.Formatter", registerPrefix + "/pcmcpapps/Invite/util/Formatter");
sap.ui.define(["sap/ui/fiori/util/Formatter"], function(Formatter) {
var formatter;
module("pcmcpapps --> Invite Formatter", {
setup: function() {
formatter = Formatter;
},
teardown : function() {
}
});
test("test status format", function(){
ok(formatter.status(formatter.statusStateValues.SUCCESS) === "Success");
ok(formatter.status(formatter.statusStateValues.FAILURE) === "Error");
ok(formatter.status(formatter.statusStateValues.IN_PROCESS) === "None");
ok(formatter.status("-1") === "None");
ok(formatter.status() === "None");
ok(formatter.statusIcon(formatter.statusStateValues.SUCCESS) === "accept");
ok(formatter.statusIcon(formatter.statusStateValues.FAILURE) === "notification");
ok(formatter.statusIcon(formatter.statusStateValues.IN_PROCESS) === "pending");
});
});
|
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
## Variables with defaults (if not overwritten by environment)
##
MVN=${MVN:-mvn}
##
## Required variables
##
OLD_VERSION=${OLD_VERSION}
NEW_VERSION=${NEW_VERSION}
if [ -z "${OLD_VERSION}" ]; then
echo "NEW_VERSION was not set."
exit 1
fi
if [ -z "${NEW_VERSION}" ]; then
echo "NEW_VERSION was not set."
exit 1
fi
# fail immediately
set -o errexit
set -o nounset
CURR_DIR=`pwd`
BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
PROJECT_ROOT="${BASE_DIR}/../../"
# Sanity check to ensure that resolved paths are valid; a LICENSE file should aways exist in project root
if [ ! -f ${PROJECT_ROOT}/LICENSE ]; then
echo "Project root path ${PROJECT_ROOT} is not valid; script may be in the wrong directory."
exit 1
fi
###########################
cd ${PROJECT_ROOT}
# change version in all pom files
mvn versions:set -DgenerateBackupPoms=false -DnewVersion=${NEW_VERSION}
# change version in Python SDK's setup.py file
perl -pi -e "s#version=\'$OLD_VERSION\'#version=\'$NEW_VERSION\'#" statefun-sdk-python/setup.py
# change version in Javascript SDK's package.json file
perl -pi -e "s#version=\'$OLD_VERSION\'#version=\'$NEW_VERSION\'#" statefun-sdk-js/package.json
# change version strings in README
perl -pi -e "s#<version>(.*)$OLD_VERSION(.*)</version>#<version>$NEW_VERSION</version>#" README.md
perl -pi -e "s#-DarchetypeVersion=$OLD_VERSION#-DarchetypeVersion=$NEW_VERSION#" README.md
# change version strings in tools directory
perl -pi -e "s#version: $OLD_VERSION#version: $NEW_VERSION#" tools/k8s/Chart.yaml
# change version strings in docs config
perl -pi -e "s#version: \"$OLD_VERSION\"#version: \"$NEW_VERSION\"#" docs/_config.yml
perl -pi -e "s#version_title: \"$OLD_VERSION\"#version_title: \"$NEW_VERSION\"#" docs/_config.yml
# change Stateful Functions image version tags in all Dockerfiles and image build script
find . -name 'Dockerfile*' -type f -exec perl -pi -e "s#FROM flink-statefun:$OLD_VERSION#FROM flink-statefun:$NEW_VERSION#" {} \;
perl -pi -e "s#VERSION_TAG=$OLD_VERSION#VERSION_TAG=$NEW_VERSION#" tools/docker/build-stateful-functions.sh
git commit -am "[release] Update version to ${NEW_VERSION}"
NEW_VERSION_COMMIT_HASH=`git rev-parse HEAD`
echo "Done. Created a new commit for the new version ${NEW_VERSION}, with hash ${NEW_VERSION_COMMIT_HASH}"
echo "If this is a new version to be released (or a candidate to be voted on), don't forget to create a signed release tag on GitHub and push the changes."
echo "e.g., git tag -s -m \"Apache Flink Stateful Functions, release 1.1 candidate #2\" release-1.1-rc2 ${NEW_VERSION_COMMIT_HASH}"
cd ${CURR_DIR}
|
import { useEffect } from "react";
import { connect } from "react-redux";
import { refreshToken } from "../../redux/actions";
const Auth = ({ refreshToken }) => {
useEffect(() => {
refreshToken();
// A timer that refreshes the token automatically
// every 14 minutes (1 minute less than the expiry of the access token)
const timer = setInterval(() => {
refreshToken();
}, 14 * 60 * 1000);
return () => {
clearInterval(timer);
}
}, []);
return null;
};
export default connect(null, { refreshToken })(Auth); |
/*
* jqeury-CcUi 0.1
* Copyright (c) 2012 Chuchur http://www.Chuchur.com/
* Date: 2012-8-3
* QQ :455105775
* Dialog弹窗for bootstrap。
*/
(function () {
$.fn.alert = function (options) {
var defaults = {
type: 'success',
title: '提示',
content: '恭喜,操作成功!',
buttons: [{
id: 'chur',
name: '确定',
callback: ''
}],
modal: true,
draggabled: false,
tourl: '',
}
/*
defaults = {
type: '', //类型有5种info,success,warning,primary,danger
title: '', //标题
content: '', //提示文字信息
btntext: '', //按钮文字
modal: false, //模式,开关灯
draggable: false //是否可拖动
even: 'click', //事件
backbtn: true, //是否取用返回按钮
tourl:'' //跳转
}
*/
var titles = {
'success': '成功提示',
'info': '询问提示',
'warning': '警告提示',
'primary': '权限提示',
'danger': '出错提示'
};
var contents = {
'success': '恭喜,操作成功!',
'info': '你确定要删除这条数据吗?',
'warning': '警告!数据无价,请谨慎操作!',
'primary': '对不起您没有此项操作权限!',
'danger': '抱歉,操作失败!'
};
if (!options['title'] || options['title'] == "") {
options['title'] = titles[options['type']];
}
if (!options['content'] || options['content'] == "") {
options['content'] = contents[options['type']];
}
var o = $.extend(defaults, options);
var _modal = '<iframe class="alert-modal"></iframe>'
function closed() {
$('.chur-alert').remove();
$('.alert-modal').remove();
$.each(o.buttons, function (i, row) { row["callback"] = null;})
}
var _button = "";
$.each(o.buttons, function (i, row) {
_button += '<input type="button" class="btn closed btn-' + o.type + '" id="' + row["id"] + '" value="' + row["name"] + '"/> ';
$('#' + row["id"]).live('click', function () { try { row["callback"](); } catch (err) { /*alert('出错啦:' + err.message + row["callback"])*/ } finally { row["callback"] = null; } })
});
var _html = '<div class="chur-alert"><div class="alert alert-' + o.type + '">' +
'<a class="close closed" href="#">×</a>' +
'<h4 class="alert-heading">' + o.title + '</h4>' +
'<div class="context">' + o.content + '</div>' +
'<div class="rightbtn">' + _button + '</div></div></div>'
if ($('.alert-' + o.type).length < 1) {
$('body').append(_html);
$('.alert-' + o.type).show('fast')
.find('.closed').live('click', closed)
if (o.modal) {
$('body').append(_modal);
}
if (o.draggabled) {
$('.alert').draggable({ 'containment': 'body' });
}
};
};
})(); |
#!/bin/bash
# Copyright Johns Hopkins University (Author: Daniel Povey) 2012. Apache 2.0.
# begin configuration section.
cmd=run.pl
min_lmwt=5
max_lmwt=17
#end configuration section.
[ -f ./path.sh ] && . ./path.sh
. parse_options.sh || exit 1;
if [ $# -ne 3 ]; then
echo "Usage: $0 [--cmd (run.pl|queue.pl...)] <data-dir> <lang-dir|graph-dir> <decode-dir>"
echo " Options:"
echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes."
echo " --min_lmwt <int> # minumum LM-weight for lattice rescoring "
echo " --max_lmwt <int> # maximum LM-weight for lattice rescoring "
exit 1;
fi
data=$1
lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied.
dir=$3
model=$dir/../final.mdl # assume model one level up from decoding dir.
hubscr=$KALDI_ROOT/tools/sctk/bin/hubscr.pl
[ ! -f $hubscr ] && echo "Cannot find scoring program at $hubscr" && exit 1;
hubdir=`dirname $hubscr`
for f in $data/text $lang/words.txt $dir/lat.1.gz; do
[ ! -f $f ] && echo "$0: expecting file $f to exist" && exit 1;
done
name=`basename $data`; # e.g. eval2000
mkdir -p $dir/scoring/log
function filter_text {
perl -e 'foreach $w (@ARGV) { $bad{$w} = 1; }
while(<STDIN>) { @A = split(" ", $_); $id = shift @A; print "$id ";
foreach $a (@A) { if (!defined $bad{$a}) { print "$a "; }} print "\n"; }' \
'[noise]' '[laughter]' '[vocalized-noise]' '<unk>' '%hesitation'
}
#function filter_text {
# perl -e 'foreach $w (@ARGV) { $bad{$w} = 1; }
# while(<STDIN>) { @A = split(" ", $_); $id = shift @A; print "$id ";
# foreach $a (@A) { if (!defined $bad{$a}) { print "$a "; }} print "\n"; }' \
# '[NOISE]' '[LAUGHTER]' '[VOCALIZED-NOISE]' '<UNK>' '%HESITATION'
#}
$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/best_path.LMWT.log \
lattice-best-path --lm-scale=LMWT --word-symbol-table=$lang/words.txt \
"ark:gunzip -c $dir/lat.*.gz|" ark,t:$dir/scoring/LMWT.tra || exit 1;
for lmwt in `seq $min_lmwt $max_lmwt`; do
utils/int2sym.pl -f 2- $lang/words.txt <$dir/scoring/$lmwt.tra | \
filter_text > $dir/scoring/$lmwt.txt || exit 1;
done
filter_text <$data/text >$dir/scoring/text.filt
$cmd LMWT=$min_lmwt:$max_lmwt $dir/scoring/log/score.LMWT.log \
compute-wer --text --mode=present \
ark:$dir/scoring/text.filt ark:$dir/scoring/LMWT.txt ">&" $dir/wer_LMWT || exit 1;
exit 0
|
<reponame>bankscrap/bankscrap-openbank
require_relative 'utils.rb'
module Bankscrap
module Openbank
class Card < ::Bankscrap::Card
include Utils
attr_accessor :contract_id
CARD_ENDPOINT = '/my-money/tarjetas/movimientosCategoria'.freeze
# Fetch transactions for the given account.
# By default it fetches transactions for the last month,
#
# Returns an array of BankScrap::Transaction objects
def fetch_transactions_for(connection, start_date: Date.today - 1.month, end_date: Date.today)
transactions = []
fields = { producto: contract_id,
numeroContrato: id,
pan: pan,
fechaDesde: format_date(start_date),
fechaHasta: format_date(end_date)
}
# Loop over pagination
until fields.empty?
data = connection.get(CARD_ENDPOINT, fields: fields)
transactions += data['lista']['movimientos'].map { |data| build_transaction(data) }.compact
fields = next_page_fields(data)
end
transactions
end
def fetch_transactions(start_date: Date.today - 2.years, end_date: Date.today)
fetch_transactions_for(bank, start_date: start_date, end_date: end_date)
end
# Build a transaction object from API data
def build_transaction(data)
return if data['estadoPeticion'] == 'L'
Transaction.new(
account: self,
id: data['numeroMovimintoEnDia'],
amount: money(data['impOperacion']),
description: data['txtCajero'],
effective_date: parse_date(data['fechaAnotacionMovimiento']),
operation_date: parse_date(data['fechaMovimiento']),
balance: Money.new(0, 'EUR') # TODO: Prepaid/debit cards don't have a Balance - maybe Credit ones do.
)
end
end
end
end
|
package main
import "testing"
// Test the creation of a basic Scum
func TestScumHasBasicMembers(t *testing.T) {
// New up a scum
scum := Scum{"1.2.3.4", 4, false, false}
scum.NumAttempts = 3
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package servlets;
import beans.GearBean;
import beans.ProfileBean;
import entities.Skioprema;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.List;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import javax.persistence.Query;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
/**
*
* @author Nikola
*/
public class SearchGear extends HttpServlet {
/**
* Processes requests for both HTTP <code>GET</code> and <code>POST</code>
* methods.
*
* @param request servlet request
* @param response servlet response
* @throws ServletException if a servlet-specific error occurs
* @throws IOException if an I/O error occurs
*/
protected void processRequest(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
String name = request.getParameter("name");
String type = request.getParameter("type");
HttpSession session = request.getSession();
EntityManagerFactory emf = Persistence.createEntityManagerFactory("Februar2017PU");
EntityManager em = emf.createEntityManager();
try {
ProfileBean profileBean = (ProfileBean)session.getAttribute("profileBean");
if (profileBean != null) {
Query query;
if (!name.equals("") && !type.equals("")) {
query = em.createQuery("SELECT s FROM Skioprema s WHERE s.naziv = :name AND s.vrsta = :type");
query.setParameter("name", name);
query.setParameter("type", type);
} else if (!name.equals("")) {
query = em.createQuery("SELECT s FROM Skioprema s WHERE s.naziv = :name");
query.setParameter("name", name);
} else if (!type.equals("")) {
query = em.createQuery("SELECT s FROM Skioprema s WHERE s.vrsta = :type");
query.setParameter("type", type);
} else {
query = em.createQuery("SELECT s FROM Skioprema s");
}
List<Skioprema> results = query.getResultList();
profileBean.resetGear();
results.forEach(result -> profileBean.add(new GearBean(result)));
profileBean.setShouldShowPaid(false);
session.setAttribute("profileBean", profileBean);
}
response.sendRedirect("profile.jsp");
} finally {
em.close();
emf.close();
}
}
// <editor-fold defaultstate="collapsed" desc="HttpServlet methods. Click on the + sign on the left to edit the code.">
/**
* Handles the HTTP <code>GET</code> method.
*
* @param request servlet request
* @param response servlet response
* @throws ServletException if a servlet-specific error occurs
* @throws IOException if an I/O error occurs
*/
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
processRequest(request, response);
}
/**
* Handles the HTTP <code>POST</code> method.
*
* @param request servlet request
* @param response servlet response
* @throws ServletException if a servlet-specific error occurs
* @throws IOException if an I/O error occurs
*/
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
processRequest(request, response);
}
/**
* Returns a short description of the servlet.
*
* @return a String containing servlet description
*/
@Override
public String getServletInfo() {
return "Short description";
}// </editor-fold>
}
|
#!/bin/sh
export CFLAGS="-I$PREFIX/include"
export LDFLAGS="-L$PREFIX/lib"
./configure --prefix=$PREFIX \
--with-ssl=$PREFIX \
--enable-hcache \
--enable-imap \
--enable-smtp \
--with-homespool=.mailbox
make
make install
|
<gh_stars>100-1000
/**
* @jest-environment ./prisma/prisma-test-environment.js
*/
import { v4 as uuid } from 'uuid'
import { prisma } from '@infra/prisma/client'
import { redisConnection } from '@infra/redis/connection'
import { makeDeleteUserHandler } from '../factories/DeleteUserHandlerFactory'
const deleteUserHandler = makeDeleteUserHandler()
describe('Delete User Handler (Kafka)', () => {
afterAll(async () => {
redisConnection.disconnect()
await prisma.$disconnect()
})
it('should be able to delete the user', async () => {
await prisma.contact.create({
data: {
id: uuid(),
name: '<NAME>',
email: '<EMAIL>',
integration_id: 'user-integration-id',
},
})
await deleteUserHandler.handle({
userId: 'user-integration-id',
})
const contactInDatabase = await prisma.contact.findUnique({
where: {
integration_id: 'user-integration-id',
},
})
expect(contactInDatabase).toBeFalsy()
})
})
|
#!/bin/bash
docker build -t barryto/file-server .
|
$:.push File.expand_path("../lib", __FILE__)
# Maintain your gem's version:
require "tail/version"
# Describe your gem and declare its dependencies:
Gem::Specification.new do |s|
s.name = "tail"
s.version = Tail::VERSION
s.authors = ["<NAME>"]
s.email = ["<EMAIL>"]
s.homepage = "http://github.com/k2m30/tail"
s.summary = "Tail command for your .log files via browser."
s.description = "In order to have a quick access to your .log files this gem provides *nix `tail` command functionality to your Rails application. If something goes wrong you don't have to ssh to your server anymore. Now you have normal scroll and search in browser instead of `nano`, `eamacs`, `vim`, `mcedit` - name it."
s.files = Dir["{app,config,db,lib}/**/*", "MIT-LICENSE", "Rakefile", "README.md"]
s.test_files = Dir["test/**/*"]
s.post_install_message = "Don't forget to mount gem (routes.rb) like this: mount Tail::Engine, at: \"/tail\" "
s.add_dependency "rails", ">= 3.2"
s.required_ruby_version = ">= 1.9.3"
end
|
!/bin/bash
echo "####Creating Containers####"
docker run -dt --cap-add=ALL --name ospf1 --ip 192.168.0.2 --net=clos-oob-network -P ubuntu_flex:v2
docker run -dt --cap-add=ALL --name ospf2 --ip 192.168.0.3 --net=clos-oob-network -P ubuntu_flex:v2
ospf1_pid=`docker inspect -f '{{.State.Pid}}' ospf1`
ospf2_pid=`docker inspect -f '{{.State.Pid}}' ospf2`
mkdir -p /var/run/netns
ln -s /proc/$ospf1_pid/ns/net /var/run/netns/$ospf1_pid
ln -s /proc/$ospf2_pid/ns/net /var/run/netns/$ospf2_pid
echo -e "done!\n"
# Create the "peer" interfaces and hand them out
echo "###Creating P2P interfaces###"
#ospf1 to ospf3-4 ports 8
sudo ip link add eth1 type veth peer name eth11
sudo ip link add eth2 type veth peer name eth12
sudo ip link add eth3 type veth peer name eth13
#ospf1 interfaces
sudo ip link set eth1 netns $ospf1_pid
sudo ip netns exec $ospf1_pid ip link set eth1 up
sudo ip link set eth2 netns $ospf1_pid
sudo ip netns exec $ospf1_pid ip link set eth2 up
sudo ip link set eth3 netns $ospf1_pid
sudo ip netns exec $ospf1_pid ip link set eth3 up
#ospf2 interfaces
sudo ip link set eth11 netns $ospf2_pid
sudo ip netns exec $ospf2_pid ip link set eth11 up
sudo ip link set eth12 netns $ospf2_pid
sudo ip netns exec $ospf2_pid ip link set eth12 up
sudo ip link set eth13 netns $ospf2_pid
sudo ip netns exec $ospf2_pid ip link set eth13 up
echo -e "done!\n"
|
import java.net.HttpURLConnection;
import javax.net.ssl.HttpsURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.Keys;
import org.openqa.selenium.StaleElementReferenceException;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
public class BrokenLinkFinder {
// This is a function which checks whether a given hyperlink in a web page is broken
public static void brokenLinkChecker(URL hyperLink) throws Exception {
String acknowledge = null;
int code = 0;
HttpURLConnection linkConnection = (HttpURLConnection) (hyperLink.openConnection());
// linkConnection.setRequestMethod("GET");
try {
System.out.println("*** Checking link " + hyperLink.toString());
// Initiate an HTTP connection
linkConnection.connect();
// Check whether the connection is responding
acknowledge = linkConnection.getResponseMessage();
code = linkConnection.getResponseCode(); // This response code should be 200 if
// connection is successful/link is valid/link
// exists
// Disconnect the connection links
linkConnection.disconnect();
System.out.println("*** The link " + "returned " + code);
} catch (Exception e) {
System.out.println("*** Throws exception " + e.toString());
System.out.println("*** The link "
+ "is not HTTP or requires certificate validation, message = " + acknowledge);
}
}
public static void main(String[] args) throws Exception {
// System.setProperty("webdriver.gecko.driver", "path/to/geckodriver");
WebDriver driver = new FirefoxDriver();
// System.setProperty("webdriver.chrome.driver", "path/to/chromedriver");
// WebDriver driver = new ChromeDriver();
// driver.get("https://sudiptac.bitbucket.io");
// driver.get("https://istd.sutd.edu.sg/");
// driver.get("https://www.google.com.sg");
driver.get("https://statcounter.com/");
// driver.get("http://2019.rtss.org/");
// driver.get("https://www.netsparker.com/blog/web-security/cve-2014-6271-shellshock-bash-vulnerability-scan/");
// Get all the links
java.util.List<WebElement> links = driver.findElements(By.tagName("a"));
System.out.println(links.size());
// Print all the links
for (int i = 0; i < links.size(); i = i + 1) {
System.out.println(i + " " + links.get(i).getText());
System.out.println(i + " " + links.get(i).getAttribute("href"));
}
// Call broken link checker for all the links found
for (int i = 50; i < links.size(); i = i + 1) {
try {
// System.out.println("*** Checking link " + i);
brokenLinkChecker(new URL(links.get(i).getAttribute("href")));
} catch (Exception e) {
System.out
.println("This is not a proper HTTP URL or requires certificate validation "
+ links.get(i).getAttribute("href"));
}
}
}
}
|
/*
* MIT License
*
* Copyright (c) 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.jamsimulator.jams.task;
import javafx.concurrent.Task;
import net.jamsimulator.jams.Jams;
import net.jamsimulator.jams.event.Listener;
import net.jamsimulator.jams.event.general.JAMSShutdownEvent;
import net.jamsimulator.jams.utils.Validate;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
/**
* Instances of this class allows developers to execute asynchronous tasks easily.
* <p>
* To execute a net.jamsimulator.jams.task, use the method {@link #execute(String, String, Runnable)},
* {@link #execute(String, String, Callable)} or {@link #execute(String, Task)}. If you use the last one
* you can configure your task to implement a progression bar.
* <p>
* The runnables will be wrapped in a {@link Task} instance. You can access to all tasks being executed using
* {@link #getTasks()}. This method creates a copy of the tasks' list.
* <p>
* If you only want to get the first {@link Task} you can use {@link #getFirstTask()}.
* This way you won't create any new list.
* <p>
* The executor inside this class will be shut down when the JAMS application is closed
* or when the method {@link #shutdown()} or {@link #shutdownNow()} is invoked.
* <p>
* Avoid creating tasks that invoke blocking methods: the thread used by the task won't be released while the thread
* is waiting.
*
* @see Task
* @see LanguageTask
*/
public class TaskExecutor {
private final ExecutorService executor;
private final LinkedList<Task<?>> tasks;
/**
* Creates the net.jamsimulator.jams.task executor.
*/
public TaskExecutor() {
this.executor = Executors.newCachedThreadPool();
tasks = new LinkedList<>();
Jams.getGeneralEventBroadcast().registerListeners(this, true);
}
/**
* Returns a new list with all tasks being executed in this executor.
*
* @return the new list.
*/
public synchronized List<Task<?>> getTasks() {
tasks.removeIf(FutureTask::isDone);
return new LinkedList<>(tasks);
}
/**
* Returns the first {@link Task} of the tasks' list if present.
*
* @return the first {@link Task} if present.
*/
public synchronized Optional<Task<?>> getFirstTask() {
tasks.removeIf(FutureTask::isDone);
return Optional.ofNullable(tasks.isEmpty() ? null : tasks.getFirst());
}
/**
* Executes the given {@link Runnable} in this executor.
* You must provide a name to the task.
*
* @param name the name of the task.
* @param title the title of the task. It may be null.
* @param runnable the code to execute.
* @return the {@link Task} being executed.
* @see ExecutorService#submit(Runnable)
*/
public synchronized Task<?> execute(String name, String title, Runnable runnable) {
Validate.notNull(name, "Name cannot be null!");
Validate.notNull(runnable, "Runnable cannot be null!");
tasks.removeIf(FutureTask::isDone);
var task = new Task<>() {
{
updateTitle(title);
}
@Override
protected Object call() throws Exception {
runnable.run();
return null;
}
};
executor.submit(task);
tasks.add(task);
return task;
}
/**
* Executes the given {@link Callable} in this executor.
* You must provide a name to the task
*
* @param name the name of the task.
* @param title the title of the task. It may be null.
* @param callable the code to execute.
* @return the {@link Task} being executed.
* @see ExecutorService#submit(Runnable)
*/
public synchronized <T> Task<T> execute(String name, String title, Callable<T> callable) {
Validate.notNull(name, "Name cannot be null!");
Validate.notNull(callable, "Callable cannot be null!");
tasks.removeIf(FutureTask::isDone);
var task = new Task<T>() {
{
updateTitle(title);
}
@Override
protected T call() throws Exception {
return callable.call();
}
};
executor.submit(task);
tasks.add(task);
return task;
}
/**
* Executes the given runnable in this executor.
* You must provide a name to the task
*
* @param name the name of the task.
* @param task the task to execute.
* @see ExecutorService#submit(Runnable)
*/
public synchronized <T> void execute(String name, Task<T> task) {
Validate.notNull(name, "Name cannot be null!");
Validate.notNull(task, "Task cannot be null!");
tasks.removeIf(FutureTask::isDone);
executor.submit(task);
tasks.add(task);
}
/**
* Returns whether this executor is shut down.
*
* @return whether this executor is shut down.
* @see ExecutorService#isShutdown()
*/
public boolean isShutdown() {
return executor.isShutdown();
}
/**
* Returns whether this executor is shut down and all its tasks have finished.
*
* @return whether this executor is shut down and all its tasks have finished.
* @see ExecutorService#isTerminated()
*/
public boolean isTerminated() {
return executor.isTerminated();
}
/**
* Shutdowns this executor, avoiding new tasks to be accepted.
* Running tasks will still be executed.
*
* @see ExecutorService#shutdown()
*/
public void shutdown() {
executor.shutdown();
}
/**
* Shutdowns this executor, killing all running tasks.
*
* @see ExecutorService#shutdownNow()
*/
public void shutdownNow() {
executor.shutdownNow();
}
@Listener
private void onShutdown(JAMSShutdownEvent.Before event) {
executor.shutdownNow();
}
}
|
#!/bin/bash
if [ -n "${HADOOP_DATANODE_UI_PORT}" ]; then
echo "Replacing default datanode UI port 9864 with ${HADOOP_DATANODE_UI_PORT}"
sed -i "$ i\<property><name>dfs.datanode.http.address</name><value>0.0.0.0:${HADOOP_DATANODE_UI_PORT}</value></property>" ${HADOOP_CONF_DIR}/hdfs-site.xml
fi
if [ "${HADOOP_NODE}" == "namenode" ]; then
echo "Starting Hadoop name node..."
yes | hdfs namenode -format
hdfs --daemon start namenode
hdfs --daemon start secondarynamenode
yarn --daemon start resourcemanager
mapred --daemon start historyserver
fi
if [ "${HADOOP_NODE}" == "datanode" ]; then
echo "Starting Hadoop data node..."
hdfs --daemon start datanode
yarn --daemon start nodemanager
fi
if [ -n "${HIVE_CONFIGURE}" ]; then
echo "Configuring Hive..."
schematool -dbType postgres -initSchema
# Start metastore service.
hive --service metastore &
# JDBC Server.
hiveserver2 &
fi
if [ -z "${SPARK_MASTER_ADDRESS}" ]; then
echo "Starting Spark master node..."
# Create directory for Spark logs
SPARK_LOGS_HDFS_PATH=/log/spark
if ! hadoop fs -test -d "${SPARK_LOGS_HDFS_PATH}"
then
hadoop fs -mkdir -p ${SPARK_LOGS_HDFS_PATH}
hadoop fs -chmod -R 755 ${SPARK_LOGS_HDFS_PATH}/*
fi
# Spark on YARN
SPARK_JARS_HDFS_PATH=/spark-jars
if ! hadoop fs -test -d "${SPARK_JARS_HDFS_PATH}"
then
hadoop dfs -copyFromLocal "${SPARK_HOME}/jars" "${SPARK_JARS_HDFS_PATH}"
fi
"${SPARK_HOME}/sbin/start-master.sh" -h master &
"${SPARK_HOME}/sbin/start-history-server.sh" &
else
echo "Starting Spark slave node..."
"${SPARK_HOME}/sbin/start-slave.sh" "${SPARK_MASTER_ADDRESS}" &
fi
echo "All initializations finished!"
# Blocking call to view all logs. This is what won't let container exit right away.
/scripts/parallel_commands.sh "scripts/watchdir ${HADOOP_LOG_DIR}" "scripts/watchdir ${SPARK_LOG_DIR}"
# Stop all
if [ "${HADOOP_NODE}" == "namenode" ]; then
hdfs namenode -format
hdfs --daemon stop namenode
hdfs --daemon stop secondarynamenode
yarn --daemon stop resourcemanager
mapred --daemon stop historyserver
fi
if [ "${HADOOP_NODE}" == "datanode" ]; then
hdfs --daemon stop datanode
yarn --daemon stop nodemanager
fi |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Libdrm(Package):
"""A userspace library for accessing the DRM, direct rendering manager,
on Linux, BSD and other systems supporting the ioctl interface."""
homepage = "http://dri.freedesktop.org/libdrm/"
url = "http://dri.freedesktop.org/libdrm/libdrm-2.4.59.tar.gz"
version('2.4.81', 'dc575dd661a082390e9f1366ca5734b0')
version('2.4.75', '743c16109d91a2539dfc9cc56130d695')
version('2.4.70', 'a8c275bce5f3d71a5ca25e8fb60df084')
version('2.4.59', '105ac7af1afcd742d402ca7b4eb168b6')
version('2.4.33', '86e4e3debe7087d5404461e0032231c8')
depends_on('pkgconfig', type='build')
depends_on('libpciaccess@0.10:', when=(sys.platform != 'darwin'))
depends_on('libpthread-stubs')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix),
'--enable-static',
'LIBS=-lrt') # This fixes a bug with `make check`
make()
make('check')
make('install')
|
<filename>19-React/01-Activities/05-Stu_HelloBootstrap/Solved/Bonus/src/components/Jumbotron.js<gh_stars>10-100
import React from "react";
function Jumbotron() {
return (
<div className="jumbotron">
<h1>Your Project</h1>
<p>
Enim adipisicing enim reprehenderit ex ullamco consectetur Lorem laboris magna exercitation.
Aliquip reprehenderit magna elit cillum adipisicing dolore aliquip velit. Ipsum ullamco
nostrud tempor eu deserunt ipsum incididunt fugiat esse ipsum. Exercitation nostrud
exercitation sit ex nostrud aliqua officia magna nostrud deserunt et esse eu deserunt. Non
dolor consequat qui ea culpa tempor magna nulla consectetur est occaecat.
</p>
</div>
);
}
export default Jumbotron;
|
#!/usr/bin/env bash
# Copyright Materialize, Inc. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
set -e
cat >> "$PGDATA/postgresql.conf" <<-EOCONF
ssl_cert_file = '/share/secrets/postgres.crt'
ssl_key_file = '/share/secrets/postgres.key'
ssl_ca_file = '/share/secrets/ca.crt'
EOCONF
|
public class Example {
public static void main(String[] args) {
int a = 1;
int b = 2;
int c = 0;
for(int i = 1; i < 10; i++) {
c += a + b;
a++;
}
System.out.println("c = "+ c);
}
} |
<filename>SourceCode/Go/holer/IntraServerHandler.go
/*
* Copyright 2018-present, Yudong (<NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
)
type IntraServerMsgHandler struct {
Uri string
AccessKey string
Pooler *PoolHandler
HolerConn *ConnHandler
}
func (msgHandler *IntraServerMsgHandler) Encode(msg interface{}) []byte {
if msg == nil {
return []byte{}
}
return msg.([]byte)
}
func (msgHandler *IntraServerMsgHandler) Decode(buf []byte) (interface{}, int) {
return buf, len(buf)
}
func (msgHandler *IntraServerMsgHandler) Receive(connHandler *ConnHandler, msgData interface{}) {
if connHandler.NextConn == nil {
return
}
data := msgData.([]byte)
msg := Message{Type: TYPE_TRANSFER}
msg.Data = data
connHandler.NextConn.Write(msg)
}
func (msgHandler *IntraServerMsgHandler) Success(connHandler *ConnHandler) {
holerHandler, err := msgHandler.Pooler.Pull()
if err != nil {
fmt.Println("Get holer connection error:", err, "Uri:", msgHandler.Uri)
msg := Message{Type: TYPE_DISCONNECT}
msg.Uri = msgHandler.Uri
msgHandler.HolerConn.Write(msg)
connHandler.Conn.Close()
} else {
holerHandler.NextConn = connHandler
connHandler.NextConn = holerHandler
msg := Message{Type: TYPE_CONNECT}
msg.Uri = msgHandler.Uri + "@" + msgHandler.AccessKey
holerHandler.Write(msg)
//fmt.Println("Intranet server connect success, notify holer server:", message.Uri)
}
}
func (msgHandler *IntraServerMsgHandler) Error(connHandler *ConnHandler) {
conn := connHandler.NextConn
if conn != nil {
msg := Message{Type: TYPE_DISCONNECT}
msg.Uri = msgHandler.Uri
conn.Write(msg)
conn.NextConn = nil
}
connHandler.MsgHandler = nil
}
func (msgHandler *IntraServerMsgHandler) Failure() {
msg := Message{Type: TYPE_DISCONNECT}
msg.Uri = msgHandler.Uri
msgHandler.HolerConn.Write(msg)
}
|
var
gulp = require('gulp'),
child_process = require('child_process'),
exec = require('child_process').exec,
minifyCss = require('gulp-minify-css'),
nodemon = require('gulp-nodemon');
// startup required services to run the app server
gulp.task('mongod', function() {
// spawn in a child process mongodb
child_process.exec('mongod', function(err,stdout,stderr){
console.log(stdout);
});
});
// Run app.js with nodemon
gulp.task('dev', function () {
nodemon({ script: 'app.js'
, ext: 'js' }).on('restart', function () {
console.log('restarted!')
});
});
// Run mocha tests
gulp.task('test', function () {
// spawn in a child process mongodb
exec('mocha', function(err,stdout,stderr){
console.log(stdout);
});
});
// Minify Css files
gulp.task('minify-css', function() {
return gulp.src('public/css/*.css')
.pipe(minifyCss({compatibility: 'ie8'}))
.pipe(gulp.dest('public/css'));
});
// start dev environment
gulp.task('startup', ['mongod', 'dev']);
|
#!/usr/bin/env bash
# Copyright 2021 The Cockroach Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Update vendor and bazel rules to match go.mod
#
# Usage:
# update-deps.sh [--patch|--minor] [packages]
set -o nounset
set -o errexit
set -o pipefail
if [[ -n "${BUILD_WORKSPACE_DIRECTORY:-}" ]]; then # Running inside bazel
echo "Updating modules..." >&2
elif ! command -v bazel &>/dev/null; then
echo "Install bazel at https://bazel.build" >&2
exit 1
else
(
set -o xtrace
bazel run //hack:update-deps -- "$@"
)
exit 0
fi
go=$(realpath "$1")
export PATH=$(dirname "$go"):$PATH
gazelle=$(realpath "$2")
kazel=$(realpath "$3")
jq=$(realpath "$4")
update_bazel=(
$(realpath "$5")
"$gazelle"
"$kazel"
)
#update_deps_licenses=(
# $(realpath "$6")
# "$go"
# "$jq"
#)
shift 5
cd "$BUILD_WORKSPACE_DIRECTORY"
trap 'echo "FAILED" >&2' ERR
export GO111MODULE=on
export GOPROXY=https://proxy.golang.org
export GOSUMDB=sum.golang.org
mode="${1:-}"
shift || true
#case "$mode" in
#--minor)
# if [[ -z "$@" ]]; then
# "$go" get -u ./...
# else
# "$go" get -u "$@"
# fi
# ;;
#--patch)
# if [[ -z "$@" ]]; then
# "$go" get -u=patch ./...
# else
# "$go" get -u=patch "$@"
# fi
# ;;
#"")
# # Just validate, or maybe manual go.mod edit
# ;;
#*)
# echo "Usage: $(basename "$0") [--patch|--minor] [packages]" >&2
# exit 1
# ;;
#esac
# rm -rf vendor
"$go" mod tidy
unset GOROOT
"$gazelle" update-repos \
--from_file=go.mod --to_macro=hack/build/repos.bzl%_go_dependencies \
--build_file_generation=on --build_file_proto_mode=disable
#"${update_bazel[@]}" # TODO(fejta): do we still need to do this?
#"${update_deps_licenses[@]}"
echo "SUCCESS: updated modules"
|
<reponame>MoozLee/RebateBot
package main
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"github.com/546669204/RebateBot/common"
"github.com/gin-gonic/gin"
)
func initWebApi() {
router := gin.New()
api := router.Group("api")
api.GET("/getService", getService)
api.GET("/getUserData", getUserData)
api.GET("/getOrderData", getOrderData)
api.GET("/getWithdrawData", getWithdrawData)
api.POST("/withdrawPay", withdrawPay)
api.GET("/getTemplateData", getTemplateData)
api.POST("/setTemplateData", setTemplateData)
api.POST("/tblogin", tblogin)
api.POST("/tbchecklogin", tbchecklogin)
api.POST("/reboot", reboot)
router.GET("/", func(c *gin.Context) {
c.File("./fanliadmin/dist/index.html")
})
router.Static("/static", "./fanliadmin/dist/static")
router.Run(":1778")
}
func getService(c *gin.Context) {
var m common.Msg
m.Data = ""
m.Method = "getservice"
resp := Client.ConnWriteReturn(m)
c.Data(200, "application/json", []byte(resp.Data))
}
func tblogin(c *gin.Context) {
type parmModel struct {
Data string `form:"data"`
}
var parm parmModel
c.ShouldBind(&parm)
var m common.Msg
m.Data = ""
m.To = parm.Data
m.Method = "tblogin"
resp := Client.ConnWriteReturn(m)
c.Data(200, "application/json", []byte(resp.Data))
}
func tbchecklogin(c *gin.Context) {
type parmModel struct {
Data string `form:"data"`
}
var parm parmModel
c.ShouldBind(&parm)
var m common.Msg
m.Data = ""
m.To = parm.Data
m.Method = "tbchecklogin"
resp := Client.ConnWriteReturn(m)
c.Data(200, "application/json", []byte(resp.Data))
}
func reboot(c *gin.Context) {
type parmModel struct {
Data string `form:"data"`
}
var parm parmModel
c.ShouldBind(&parm)
var m common.Msg
m.Data = parm.Data
m.Method = "reboot"
resp := Client.ConnWriteReturn(m)
c.Data(200, "application/json", []byte(resp.Data))
}
func getUserData(c *gin.Context) {
type parmModel struct {
Page uint `form:"page"`
PageSize uint `form:"pageSize"`
}
var parm parmModel
c.ShouldBind(&parm)
var j []MysqlUser
err := sess.Collection("xm_user").Find().Page(parm.Page).Paginate(parm.PageSize).All(&j)
count, _ := sess.Collection("xm_user").Find().Count()
if err != nil {
log.Println("getuserdata", err)
c.JSON(200, gin.H{"code": -1, "msg": err.Error()})
return
}
c.JSON(200, gin.H{"code": 0, "msg": "", "data": j, "count": count})
}
func getOrderData(c *gin.Context) {
type parmModel struct {
Page uint `form:"page"`
PageSize uint `form:"pageSize"`
}
var parm parmModel
c.ShouldBind(&parm)
var j []MysqlOrders
err := sess.Collection("xm_orders").Find().Page(parm.Page).Paginate(parm.PageSize).OrderBy(`create_time desc`).All(&j)
count, _ := sess.Collection("xm_orders").Find().Count()
if err != nil {
c.JSON(200, gin.H{"code": -1, "msg": err.Error()})
return
}
c.JSON(200, gin.H{"code": 0, "msg": "", "data": j, "count": count})
}
func getWithdrawData(c *gin.Context) {
type parmModel struct {
Page uint `form:"page"`
PageSize uint `form:"pageSize"`
}
var parm parmModel
c.ShouldBind(&parm)
var j []MysqlOrders
err := sess.Collection("xm_withdraw").Find().Where(`status = 0`).Page(parm.Page).Paginate(parm.PageSize).OrderBy(`create_time desc`).All(&j)
count, _ := sess.Collection("xm_withdraw").Find().Where(`status = 0`).Count()
if err != nil {
c.JSON(200, gin.H{"code": -1, "msg": err.Error()})
return
}
c.JSON(200, gin.H{"code": 0, "msg": "", "data": j, "count": count})
}
func withdrawPay(c *gin.Context) {
type parmModel struct {
ID uint `form:"id"`
}
var parm parmModel
c.ShouldBind(&parm)
_, err := sess.Update("xm_withdraw").Where(`id = ?`, parm.ID).Set(`status = 1`).Exec()
if err != nil {
c.JSON(200, gin.H{"code": -1, "msg": err.Error()})
return
}
c.JSON(200, gin.H{"code": 0, "msg": "", "data": ""})
}
func getTemplateData(c *gin.Context) {
c.JSON(200, gin.H{"code": 0, "msg": "", "data": TemplateList})
}
func setTemplateData(c *gin.Context) {
type parmModel struct {
Data string `form:"data"`
}
var parm parmModel
c.ShouldBind(&parm)
err := ioutil.WriteFile(filepath.Join(filepath.Dir(os.Args[0]), "template.json"), []byte(parm.Data), 0666)
if err != nil {
c.JSON(200, gin.H{"code": -1, "msg": err.Error()})
return
}
c.JSON(200, gin.H{"code": 0, "msg": ""})
}
|
# Implement the health_execute_command function
health_execute_command() {
# Add your command execution logic here
# For example:
# if the health command is "check_health", you can use:
# if check_health_command; then
# return 0
# else
# return 1
# fi
# Replace "check_health_command" with the actual command to be executed
# and handle the command execution result appropriately
# Return true if the command execution is successful, and false otherwise
}
# Check the result of executing the health command
if ! health_execute_command; then
return 1
fi
return 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.